From b853e74ad9146ad97ce46bb9ddaa40c062961d3a Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Fri, 5 Jul 2024 09:57:44 +0200 Subject: [PATCH] Add IPAM --- .github/workflows/build.yml | 56 +++ .github/workflows/codespell.yml | 15 - .github/workflows/conformance-e2e.yaml | 31 -- .github/workflows/e2e.yaml | 64 --- .github/workflows/image.yaml | 57 --- .github/workflows/license.conf | 9 - .github/workflows/license.yml | 14 - .github/workflows/lint.yml | 40 -- .github/workflows/release.yml | 38 -- .github/workflows/test.yml | 13 - .github/workflows/verify.yaml | 19 - api/v1alpha1/ionoscloudmachine_types.go | 22 +- api/v1alpha1/ionoscloudmachine_types_test.go | 51 +++ api/v1alpha1/ipam_types.go | 40 ++ api/v1alpha1/suite_test.go | 2 + api/v1alpha1/zz_generated.deepcopy.go | 35 +- cmd/main.go | 3 + ...r.x-k8s.io_ionoscloudmachinetemplates.yaml | 129 ++++++ config/rbac/role.yaml | 19 + .../ionoscloudmachine_controller.go | 11 +- internal/service/cloud/server.go | 46 +- internal/service/cloud/suite_test.go | 2 + internal/service/ipam/ipam.go | 307 +++++++++++++ internal/service/ipam/ipam_test.go | 405 ++++++++++++++++++ 24 files changed, 1109 insertions(+), 319 deletions(-) create mode 100644 .github/workflows/build.yml delete mode 100644 .github/workflows/codespell.yml delete mode 100644 .github/workflows/conformance-e2e.yaml delete mode 100644 .github/workflows/e2e.yaml delete mode 100644 .github/workflows/image.yaml delete mode 100644 .github/workflows/license.conf delete mode 100644 .github/workflows/license.yml delete mode 100644 .github/workflows/lint.yml delete mode 100644 .github/workflows/release.yml delete mode 100644 .github/workflows/test.yml delete mode 100644 .github/workflows/verify.yaml create mode 100644 api/v1alpha1/ipam_types.go create mode 100644 internal/service/ipam/ipam.go create mode 100644 internal/service/ipam/ipam_test.go diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000..9b43d5ba --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,56 @@ +name: main + +on: + push: + branches: ["main"] + pull_request: + branches: ["main"] + release: + types: + - published + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Run lint + run: | + make lint + - name: Run tests + run: | + make test + build: + runs-on: ubuntu-latest + steps: + - name: Prepare + id: prep + run: | + DOCKER_IMAGE=ghcr.io/gdatasoftwareag/cluster-api-ionoscloud-controller + VERSION=edge + if [[ $GITHUB_REF == refs/tags/* ]]; then + VERSION=${GITHUB_REF#refs/tags/} + elif [[ $GITHUB_REF == refs/heads/* ]]; then + VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') + elif [[ $GITHUB_REF == refs/pull/* ]]; then + VERSION=pr-${{ github.event.number }} + fi + TAGS="${DOCKER_IMAGE}:${VERSION},${DOCKER_IMAGE}:${VERSION}-${GITHUB_SHA::8}" + echo ::set-output name=version::${VERSION} + echo ::set-output name=tags::${TAGS} + echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ') + - name: Login to Github Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Push to GitHub Packages + uses: docker/build-push-action@v4 + with: + push: ${{ github.event_name == 'release' }} + tags: ${{ steps.prep.outputs.tags }} + labels: | + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.created=${{ steps.prep.outputs.created }} diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml deleted file mode 100644 index d179d982..00000000 --- a/.github/workflows/codespell.yml +++ /dev/null @@ -1,15 +0,0 @@ -# Helps catch spelling errors -name: Codespell -on: [ pull_request ] - -jobs: - codespell: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: codespell-project/actions-codespell@94259cd8be02ad2903ba34a22d9c13de21a74461 # v2.0 - with: - skip: .git,_artifacts,*.sum - ignore_words_file: .codespellignore - check_filenames: true - check_hidden: true diff --git a/.github/workflows/conformance-e2e.yaml b/.github/workflows/conformance-e2e.yaml deleted file mode 100644 index 8299c6b6..00000000 --- a/.github/workflows/conformance-e2e.yaml +++ /dev/null @@ -1,31 +0,0 @@ -name: Conformance end-to-end tests -on: - schedule: - - cron: "0 5 * * 1" - workflow_dispatch: {} -jobs: - conformance-e2e: - runs-on: ubuntu-latest - environment: e2e - env: - IONOS_TOKEN: ${{ secrets.IONOS_TOKEN }} - IONOSCLOUD_MACHINE_IMAGE_ID: ${{ vars.IONOSCLOUD_MACHINE_IMAGE_ID }} - CONTROL_PLANE_ENDPOINT_LOCATION: ${{ vars.CONTROL_PLANE_ENDPOINT_LOCATION }} - steps: - - uses: actions/checkout@v4 - - - name: Setup Go environment - uses: actions/setup-go@v5 - with: - go-version-file: "go.mod" - - - name: Run e2e tests - run: make GINKGO_LABEL="Conformance" test-e2e - - - name : Upload artifacts - uses: actions/upload-artifact@v4 - if: success() || failure() - with: - name: logs - path: _artifacts - retention-days: 7 diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml deleted file mode 100644 index 00ce57c5..00000000 --- a/.github/workflows/e2e.yaml +++ /dev/null @@ -1,64 +0,0 @@ -name: End-to-end tests -on: - pull_request_target: - types: ["opened", "synchronize", "reopened", "labeled", "unlabeled" ] - branches: [ "main" ] - paths: - - "**.go" - - "**.ya?ml" - - "**.sh" - - "Dockerfile" - - "Makefile" - push: - branches: - - main - paths: - - "**.go" - - "**.ya?ml" - - "**.sh" - - "Dockerfile" - - "Makefile" -jobs: - e2e: - if: ${{ ! contains( github.event.pull_request.labels.*.name, 'skip e2e') }} - runs-on: ubuntu-latest - environment: e2e - env: - IONOS_TOKEN: ${{ secrets.IONOS_TOKEN }} - IONOSCLOUD_MACHINE_IMAGE_ID: ${{ vars.IONOSCLOUD_MACHINE_IMAGE_ID }} - CONTROL_PLANE_ENDPOINT_LOCATION: ${{ vars.CONTROL_PLANE_ENDPOINT_LOCATION }} - steps: - - name: Check out branch ${{ github.ref }} - if: ${{ github.event_name == 'push' }} - uses: actions/checkout@v4 - - - name: Check out PR ${{ github.event.pull_request.number }} - if: ${{ github.event_name == 'pull_request_target' }} - uses: actions/checkout@v4 - with: - repository: ${{ github.event.pull_request.head.repo.full_name }} - ref: ${{ github.event.pull_request.head.ref }} - - - name: Setup Go environment - uses: actions/setup-go@v5 - with: - go-version-file: "go.mod" - - - name: Run e2e tests - id: tests - run: make test-e2e - - - name: Upload artifacts - uses: actions/upload-artifact@v4 - if: success() || failure() - with: - name: logs - path: _artifacts - retention-days: 7 - - - name: Remove cancelled run leftovers - if: cancelled() - env: - DATACENTER_ID: ${{ steps.tests.outputs.DATACENTER_ID }} - IP_BLOCK_ID: ${{ steps.tests.outputs.IP_BLOCK_ID }} - run: make remove-cancelled-e2e-leftovers diff --git a/.github/workflows/image.yaml b/.github/workflows/image.yaml deleted file mode 100644 index 763f69f7..00000000 --- a/.github/workflows/image.yaml +++ /dev/null @@ -1,57 +0,0 @@ -name: Container Image - -on: - push: - branches: - - main - - release-* - # Sequence of patterns matched against refs/tags - tags: - - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 - - pull_request: - branches: - - main - - release-* - -permissions: - contents: write - packages: write - -jobs: - image: - name: build image - runs-on: "ubuntu-latest" - steps: - - uses: actions/checkout@v4 - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: ghcr.io/ionos-cloud/cluster-api-provider-ionoscloud - - - name: Login to GitHub Container Registry - if: github.event_name != 'pull_request' - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build and push container image - uses: docker/build-push-action@v5 - with: - context: . - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - - - name: Scan image - if: github.event_name == 'pull_request' - uses: anchore/scan-action@v3 - id: scan - with: - image: ${{ steps.meta.outputs.tags }} - add-cpes-if-none: true - output-format: table diff --git a/.github/workflows/license.conf b/.github/workflows/license.conf deleted file mode 100644 index 56698ef6..00000000 --- a/.github/workflows/license.conf +++ /dev/null @@ -1,9 +0,0 @@ -[ - { - "include": [ "**/*.go" ], - "license": "hack/boilerplate.go.txt" - }, - { - "include": [ "**" ] - } -] diff --git a/.github/workflows/license.yml b/.github/workflows/license.yml deleted file mode 100644 index e92098b4..00000000 --- a/.github/workflows/license.yml +++ /dev/null @@ -1,14 +0,0 @@ -# Checks if all go files have the required license header -name: License -on: [ pull_request ] - -jobs: - license: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: viperproject/check-license-header@v2 - with: - path: . - config: .github/workflows/license.conf - strict: false diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml deleted file mode 100644 index 9a773796..00000000 --- a/.github/workflows/lint.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Lint -on: [ pull_request ] - -permissions: - # Required: allow read access to the content for analysis. - contents: read - -jobs: - golangci: - name: lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - - name: Run lint - run: "make lint" - - yamllint: - name: yamllint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: ibiqlik/action-yamllint@v3 - with: - format: github - - actionlint: - name: actionlint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Download actionlint - id: get_actionlint - run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) - shell: bash - - name: Check workflow files - run: ${{ steps.get_actionlint.outputs.executable }} -color - shell: bash diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index e25ae172..00000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: release - -on: - push: - # Sequence of patterns matched against refs/tags - tags: - - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 - -permissions: - contents: write # Allow to create a release. - -jobs: - build: - name: create draft release - runs-on: ubuntu-latest - steps: - - name: Set env - run: echo "RELEASE_TAG=${GITHUB_REF##refs/tags/}" >> "$GITHUB_ENV" - - name: checkout code - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - name: Set up Go - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - - name: generate release artifacts - run: | - make release-manifests RELEASE_VERSION=${{ env.RELEASE_TAG }} - - name: generate release templates - run: | - make release-templates - - name: Release - uses: softprops/action-gh-release@v2 - with: - draft: true - files: out/* - generate_release_notes: true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index a9ac3341..00000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Test -on: [ pull_request ] - -jobs: - go_test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - - name: Run tests - run: "make test" diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml deleted file mode 100644 index cd445245..00000000 --- a/.github/workflows/verify.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -name: Verify -on: - push: - branches: - - main - pull_request: - -jobs: - verify: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Setup Go environment - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - - name: Run verifications - run: make verify diff --git a/api/v1alpha1/ionoscloudmachine_types.go b/api/v1alpha1/ionoscloudmachine_types.go index 1658306a..a7041f3f 100644 --- a/api/v1alpha1/ionoscloudmachine_types.go +++ b/api/v1alpha1/ionoscloudmachine_types.go @@ -155,6 +155,9 @@ type IonosCloudMachineSpec struct { //+optional AdditionalNetworks Networks `json:"additionalNetworks,omitempty"` + // IPAMConfig allows to obtain IP Addresses from existing IP pools instead of using DHCP. + IPAMConfig `json:",inline"` + // FailoverIP can be set to enable failover for VMs in the same MachineDeployment. // It can be either set to an already reserved IPv4 address, or it can be set to "AUTO" // which will automatically reserve an IPv4 address for the Failover Group. @@ -175,6 +178,8 @@ type IonosCloudMachineSpec struct { // Networks contains a list of additional LAN IDs // that should be attached to the VM. +// +listType=map +// +listMapKey=networkID type Networks []Network // Network contains the config for additional LANs. @@ -183,6 +188,9 @@ type Network struct { // This LAN will be excluded from the deletion process. //+kubebuilder:validation:Minimum=1 NetworkID int32 `json:"networkID"` + + // IPAMConfig allows to obtain IP Addresses from existing IP pools instead of using DHCP. + IPAMConfig `json:",inline"` } // Volume is the physical storage on the VM. @@ -228,7 +236,7 @@ type IonosCloudMachineStatus struct { Ready bool `json:"ready"` // MachineNetworkInfo contains information about the network configuration of the VM. - // This information is only available after the VM has been provisioned. + //+optional MachineNetworkInfo *MachineNetworkInfo `json:"machineNetworkInfo,omitempty"` // FailureReason will be set in the event that there is a terminal problem @@ -280,6 +288,8 @@ type IonosCloudMachineStatus struct { } // MachineNetworkInfo contains information about the network configuration of the VM. +// Before the provisioning MachineNetworkInfo may contain IP addresses to be used for provisioning. +// After provisioning this information is available completely. type MachineNetworkInfo struct { // NICInfo holds information about the NICs, which are attached to the VM. //+optional @@ -289,10 +299,16 @@ type MachineNetworkInfo struct { // NICInfo provides information about the NIC of the VM. type NICInfo struct { // IPv4Addresses contains the IPv4 addresses of the NIC. - IPv4Addresses []string `json:"ipv4Addresses"` + // By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before + // creating the VM this can be temporarily empty, e.g. we use DHCP for IPv4 and fixed IP for IPv6. + //+optional + IPv4Addresses []string `json:"ipv4Addresses,omitempty"` // IPv6Addresses contains the IPv6 addresses of the NIC. - IPv6Addresses []string `json:"ipv6Addresses"` + // By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before + // creating the VM this can be temporarily empty, e.g. we use DHCP for IPv6 and fixed IP for IPv4. + //+optional + IPv6Addresses []string `json:"ipv6Addresses,omitempty"` // NetworkID is the ID of the LAN to which the NIC is connected. NetworkID int32 `json:"networkID"` diff --git a/api/v1alpha1/ionoscloudmachine_types_test.go b/api/v1alpha1/ionoscloudmachine_types_test.go index db38c274..57350ef4 100644 --- a/api/v1alpha1/ionoscloudmachine_types_test.go +++ b/api/v1alpha1/ionoscloudmachine_types_test.go @@ -64,6 +64,20 @@ func defaultMachine() *IonosCloudMachine { } } +func setInvalidPoolRef(m *IonosCloudMachine, poolType string, kind, apiGroup, name string) { + ref := &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(apiGroup), + Kind: kind, + Name: name, + } + switch poolType { + case "IPv6": + m.Spec.AdditionalNetworks[0].IPv6PoolRef = ref + case "IPv4": + m.Spec.AdditionalNetworks[0].IPv4PoolRef = ref + } +} + var _ = Describe("IonosCloudMachine Tests", func() { AfterEach(func() { m := &IonosCloudMachine{ @@ -337,6 +351,43 @@ var _ = Describe("IonosCloudMachine Tests", func() { m.Spec.AdditionalNetworks[0].NetworkID = -1 Expect(k8sClient.Create(context.Background(), m)).ToNot(Succeed()) }) + DescribeTable("should allow IPv4PoolRef.Kind GlobalInClusterIPPool and InClusterIPPool", func(kind string) { + m := defaultMachine() + m.Spec.AdditionalNetworks[0].IPv4PoolRef = &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: kind, + Name: "ipv4-pool", + } + Expect(k8sClient.Create(context.Background(), m)).To(Succeed()) + }, + Entry("GlobalInClusterIPPool", "GlobalInClusterIPPool"), + Entry("InClusterIPPool", "InClusterIPPool"), + ) + DescribeTable("should allow IPv6PoolRef.Kind GlobalInClusterIPPool and InClusterIPPool", func(kind string) { + m := defaultMachine() + m.Spec.AdditionalNetworks[0].IPv6PoolRef = &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: kind, + Name: "ipv6-pool", + } + Expect(k8sClient.Create(context.Background(), m)).To(Succeed()) + }, + Entry("GlobalInClusterIPPool", "GlobalInClusterIPPool"), + Entry("InClusterIPPool", "InClusterIPPool"), + ) + DescribeTable("must not allow invalid pool references", + func(poolType, kind, apiGroup, name string) { + m := defaultMachine() + setInvalidPoolRef(m, poolType, kind, apiGroup, name) + Expect(k8sClient.Create(context.Background(), m)).ToNot(Succeed()) + }, + Entry("invalid IPv6PoolRef with invalid kind", "IPv6", "SomeOtherIPPoolKind", "ipam.cluster.x-k8s.io", "ipv6-pool"), + Entry("invalid IPv6PoolRef with invalid apiGroup", "IPv6", "InClusterIPPool", "SomeWrongAPIGroup", "ipv6-pool"), + Entry("invalid IPv6PoolRef with empty name", "IPv6", "InClusterIPPool", "ipam.cluster.x-k8s.io", ""), + Entry("invalid IPv4PoolRef with invalid kind", "IPv4", "SomeOtherIPPoolKind", "ipam.cluster.x-k8s.io", "ipv4-pool"), + Entry("invalid IPv4PoolRef with invalid apiGroup", "IPv4", "InClusterIPPool", "SomeWrongAPIGroup", "ipv4-pool"), + Entry("invalid IPv4PoolRef with empty name", "IPv4", "InClusterIPPool", "ipam.cluster.x-k8s.io", ""), + ) }) }) Context("FailoverIP", func() { diff --git a/api/v1alpha1/ipam_types.go b/api/v1alpha1/ipam_types.go new file mode 100644 index 00000000..20f2cf0a --- /dev/null +++ b/api/v1alpha1/ipam_types.go @@ -0,0 +1,40 @@ +/* +Copyright 2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// IPAMConfig contains the config for ip address management. +type IPAMConfig struct { + // IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + // The nic will use an available IP address from the referenced pool. + // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io" + // +kubebuilder:validation:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool" + // +kubebuilder:validation:XValidation:rule="self.name != ''",message="ipv4PoolRef.name is required" + // +optional + IPv4PoolRef *corev1.TypedLocalObjectReference `json:"ipv4PoolRef,omitempty"` + + // IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + // The nic will use an available IP address from the referenced pool. + // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io" + // +kubebuilder:validation:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool" + // +kubebuilder:validation:XValidation:rule="self.name != ''",message="ipv6PoolRef.name is required" + // +optional + IPv6PoolRef *corev1.TypedLocalObjectReference `json:"ipv6PoolRef,omitempty"` +} diff --git a/api/v1alpha1/suite_test.go b/api/v1alpha1/suite_test.go index 96f12c78..f90453bf 100644 --- a/api/v1alpha1/suite_test.go +++ b/api/v1alpha1/suite_test.go @@ -21,6 +21,7 @@ import ( "testing" "k8s.io/apimachinery/pkg/runtime" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -53,6 +54,7 @@ var _ = BeforeSuite(func() { scheme := runtime.NewScheme() Expect(AddToScheme(scheme)).To(Succeed()) + Expect(ipamv1.AddToScheme(scheme)).To(Succeed()) cfg, err := testEnv.Start() Expect(err).ToNot(HaveOccurred()) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 75adbd9c..2819a00a 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -27,6 +27,31 @@ import ( "sigs.k8s.io/cluster-api/errors" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) { + *out = *in + if in.IPv4PoolRef != nil { + in, out := &in.IPv4PoolRef, &out.IPv4PoolRef + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + if in.IPv6PoolRef != nil { + in, out := &in.IPv6PoolRef, &out.IPv6PoolRef + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig. +func (in *IPAMConfig) DeepCopy() *IPAMConfig { + if in == nil { + return nil + } + out := new(IPAMConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { *out = *in @@ -327,8 +352,11 @@ func (in *IonosCloudMachineSpec) DeepCopyInto(out *IonosCloudMachineSpec) { if in.AdditionalNetworks != nil { in, out := &in.AdditionalNetworks, &out.AdditionalNetworks *out = make(Networks, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } + in.IPAMConfig.DeepCopyInto(&out.IPAMConfig) if in.FailoverIP != nil { in, out := &in.FailoverIP, &out.FailoverIP *out = new(string) @@ -529,6 +557,7 @@ func (in *NICInfo) DeepCopy() *NICInfo { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Network) DeepCopyInto(out *Network) { *out = *in + in.IPAMConfig.DeepCopyInto(&out.IPAMConfig) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. @@ -546,7 +575,9 @@ func (in Networks) DeepCopyInto(out *Networks) { { in := &in *out = make(Networks, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } diff --git a/cmd/main.go b/cmd/main.go index 3f1dcf51..ebbe8ab3 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -28,6 +28,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/cluster-api/util/flags" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -53,6 +54,8 @@ func init() { utilruntime.Must(clusterv1.AddToScheme(scheme)) utilruntime.Must(infrav1.AddToScheme(scheme)) + utilruntime.Must(ipamv1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml index bcadd90a..c7bd65ab 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml @@ -80,6 +80,74 @@ spec: description: Network contains the config for additional LANs. properties: + ipv4PoolRef: + description: |- + IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv4PoolRef allows only IPAMConfig apiGroup + ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv4PoolRef allows either InClusterIPPool + or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind + == 'GlobalInClusterIPPool' + - message: ipv4PoolRef.name is required + rule: self.name != '' + ipv6PoolRef: + description: |- + IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv6PoolRef allows only IPAMConfig apiGroup + ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv6PoolRef allows either InClusterIPPool + or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind + == 'GlobalInClusterIPPool' + - message: ipv6PoolRef.name is required + rule: self.name != '' networkID: description: |- NetworkID represents an ID an existing LAN in the data center. @@ -91,6 +159,9 @@ spec: - networkID type: object type: array + x-kubernetes-list-map-keys: + - networkID + x-kubernetes-list-type: map availabilityZone: default: AUTO description: AvailabilityZone is the availability zone in @@ -177,6 +248,64 @@ spec: - message: failoverIP must be either 'AUTO' or a valid IPv4 address rule: self == "AUTO" || self.matches("((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}$") + ipv4PoolRef: + description: |- + IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv4PoolRef.name is required + rule: self.name != '' + ipv6PoolRef: + description: |- + IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + The nic will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv6PoolRef.name is required + rule: self.name != '' memoryMB: default: 3072 description: |- diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 9e48d673..4173f90c 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -106,3 +106,22 @@ rules: - get - patch - update +- apiGroups: + - ipam.cluster.x-k8s.io + resources: + - ipaddressclaims + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - ipam.cluster.x-k8s.io + resources: + - ipaddresses + verbs: + - get + - list + - watch diff --git a/internal/controller/ionoscloudmachine_controller.go b/internal/controller/ionoscloudmachine_controller.go index 9707491a..861a2ed7 100644 --- a/internal/controller/ionoscloudmachine_controller.go +++ b/internal/controller/ionoscloudmachine_controller.go @@ -36,6 +36,7 @@ import ( infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/cloud" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/ipam" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/locker" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" ) @@ -62,6 +63,8 @@ func NewIonosCloudMachineReconciler(mgr ctrl.Manager) *IonosCloudMachineReconcil //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=ionoscloudmachines/finalizers,verbs=update //+kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch +//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddresses,verbs=get;list;watch +//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddressclaims,verbs=get;list;watch;create;update;delete //+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;update //+kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch @@ -138,11 +141,11 @@ func (r *IonosCloudMachineReconciler) Reconcile( return r.reconcileDelete(ctx, machineScope, cloudService) } - return r.reconcileNormal(ctx, cloudService, machineScope) + return r.reconcileNormal(ctx, machineScope, cloudService) } func (r *IonosCloudMachineReconciler) reconcileNormal( - ctx context.Context, cloudService *cloud.Service, machineScope *scope.Machine, + ctx context.Context, machineScope *scope.Machine, cloudService *cloud.Service, ) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) log.V(4).Info("Reconciling IonosCloudMachine") @@ -178,8 +181,10 @@ func (r *IonosCloudMachineReconciler) reconcileNormal( return ctrl.Result{RequeueAfter: defaultReconcileDuration}, nil } + ipamHelper := ipam.NewHelper(r.Client, log) reconcileSequence := []serviceReconcileStep[scope.Machine]{ {"ReconcileLAN", cloudService.ReconcileLAN}, + {"ReconcileIPAddressClaims", ipamHelper.ReconcileIPAddresses}, {"ReconcileServer", cloudService.ReconcileServer}, {"ReconcileIPFailover", cloudService.ReconcileIPFailover}, {"FinalizeMachineProvisioning", cloudService.FinalizeMachineProvisioning}, @@ -218,6 +223,7 @@ func (r *IonosCloudMachineReconciler) reconcileDelete( return ctrl.Result{RequeueAfter: reducedReconcileDuration}, nil } + ipamHelper := ipam.NewHelper(r.Client, log) reconcileSequence := []serviceReconcileStep[scope.Machine]{ // NOTE(avorima): NICs, which are configured in an IP failover configuration, cannot be deleted // by a request to delete the server. Therefore, during deletion, we need to remove the NIC from @@ -226,6 +232,7 @@ func (r *IonosCloudMachineReconciler) reconcileDelete( {"ReconcileServerDeletion", cloudService.ReconcileServerDeletion}, {"ReconcileLANDeletion", cloudService.ReconcileLANDeletion}, {"ReconcileFailoverIPBlockDeletion", cloudService.ReconcileFailoverIPBlockDeletion}, + {"ReconcileIPAddressClaimsDeletion", ipamHelper.ReconcileIPAddresses}, } for _, step := range reconcileSequence { diff --git a/internal/service/cloud/server.go b/internal/service/cloud/server.go index 6b789045..c38adb7b 100644 --- a/internal/service/cloud/server.go +++ b/internal/service/cloud/server.go @@ -404,27 +404,49 @@ func (s *Service) buildServerEntities(ms *scope.Machine, params serverEntityPara Items: &[]sdk.Volume{bootVolume}, } - // As we want to retrieve a public IP from the DHCP, we need to + primaryNIC := sdk.Nic{ + Properties: &sdk.NicProperties{ + Lan: ¶ms.lanID, + Name: ptr.To(s.nicName(ms.IonosMachine)), + }, + } + + if ms.IonosMachine.Status.MachineNetworkInfo != nil { + nicInfo := ms.IonosMachine.Status.MachineNetworkInfo.NICInfo[0] + primaryNIC.Properties.Ips = ptr.To(nicInfo.IPv4Addresses) + primaryNIC.Properties.Ipv6Ips = ptr.To(nicInfo.IPv6Addresses) + } + + primaryNIC.Properties.Dhcp = ptr.To(true) + + // In case we want to retrieve a public IP from the DHCP, we need to // create a NIC with empty IP addresses and patch the NIC afterward. + // To simplify the code we also follow this approach when using IP pools. serverNICs := sdk.Nics{ Items: &[]sdk.Nic{ - { - Properties: &sdk.NicProperties{ - Dhcp: ptr.To(true), - Lan: ¶ms.lanID, - Name: ptr.To(s.nicName(ms.IonosMachine)), - }, - }, + primaryNIC, }, } // Attach server to additional LANs if any. items := *serverNICs.Items - for _, nic := range ms.IonosMachine.Spec.AdditionalNetworks { - items = append(items, sdk.Nic{Properties: &sdk.NicProperties{ - Lan: &nic.NetworkID, - }}) + for i, nw := range ms.IonosMachine.Spec.AdditionalNetworks { + nic := sdk.Nic{ + Properties: &sdk.NicProperties{ + Lan: &nw.NetworkID, + }, + } + + if ms.IonosMachine.Status.MachineNetworkInfo != nil { + nicInfo := ms.IonosMachine.Status.MachineNetworkInfo.NICInfo[i+1] + nic.Properties.Ips = ptr.To(nicInfo.IPv4Addresses) + nic.Properties.Ipv6Ips = ptr.To(nicInfo.IPv6Addresses) + } + + nic.Properties.Dhcp = ptr.To(true) + + items = append(items, nic) } serverNICs.Items = &items diff --git a/internal/service/cloud/suite_test.go b/internal/service/cloud/suite_test.go index 14c68c79..7a777133 100644 --- a/internal/service/cloud/suite_test.go +++ b/internal/service/cloud/suite_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -172,6 +173,7 @@ func (s *ServiceTestSuite) SetupTest() { scheme := runtime.NewScheme() s.NoError(clusterv1.AddToScheme(scheme), "failed to extend scheme with Cluster API types") + s.NoError(ipamv1.AddToScheme(scheme), "failed to extend scheme with Cluster API ipam types") s.NoError(infrav1.AddToScheme(scheme), "failed to extend scheme with IonosCloud types") s.NoError(clientgoscheme.AddToScheme(scheme)) diff --git a/internal/service/ipam/ipam.go b/internal/service/ipam/ipam.go new file mode 100644 index 00000000..9659a662 --- /dev/null +++ b/internal/service/ipam/ipam.go @@ -0,0 +1,307 @@ +/* +Copyright 2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ipam offers services for IPAM management. +package ipam + +import ( + "context" + "errors" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" +) + +const ( + // PrimaryNICFormat is the format used for IPAddressClaims for the primary nic. + PrimaryNICFormat = "nic-%s" + + // AdditionalNICFormat is the format used for IPAddressClaims for additional nics. + AdditionalNICFormat = "nic-%s-%d" + + // IPV4Format is the IP v4 format. + IPV4Format = "ipv4" + + // IPV6Format is the IP v6 format. + IPV6Format = "ipv6" +) + +// Helper offers IP address management services for IONOS Cloud machine reconciliation. +type Helper struct { + logger logr.Logger + client client.Client +} + +// NewHelper creates new Helper. +func NewHelper(c client.Client, log logr.Logger) *Helper { + h := new(Helper) + h.client = c + h.logger = log + + return h +} + +// ReconcileIPAddresses prevents successful reconciliation of a IonosCloudMachine +// until an IPAMConfig Provider updates each IPAddressClaim associated to the +// IonosCloudMachine with a reference to an IPAddress. The IPAddress is stored in the status. +// This function is a no-op if the IonosCloudMachine has no associated IPAddressClaims. +func (h *Helper) ReconcileIPAddresses(ctx context.Context, machineScope *scope.Machine) (requeue bool, err error) { + log := h.logger.WithName("reconcileIPAddresses") + log.V(4).Info("reconciling IPAddresses.") + + networkInfos := &[]infrav1.NICInfo{} + + // primary NIC. + requeue, err = h.handlePrimaryNIC(ctx, machineScope, networkInfos) + if err != nil { + return true, errors.Join(err, errors.New("unable to handle primary nic")) + } + + if machineScope.IonosMachine.Spec.AdditionalNetworks != nil { + waitForAdditionalIP, err := h.handleAdditionalNICs(ctx, machineScope, networkInfos) + if err != nil { + return true, errors.Join(err, errors.New("unable to handle additional nics")) + } + requeue = requeue || waitForAdditionalIP + } + + // update the status + log.V(4).Info("updating IonosMachine.status.machineNetworkInfo.") + machineScope.IonosMachine.Status.MachineNetworkInfo = &infrav1.MachineNetworkInfo{NICInfo: *networkInfos} + + return requeue, nil +} + +func (h *Helper) ReconcileIPAddressClaimsDeletion(ctx context.Context, machineScope *scope.Machine) (err error) { + log := h.logger.WithName("reconcileIPAddressClaimsDeletion") + log.V(4).Info("removing finalizers from IPAddressClaims.") + + formats := []string{IPV4Format, IPV6Format} + nicNames := []string{fmt.Sprintf(PrimaryNICFormat, machineScope.IonosMachine.Name)} + + for _, network := range machineScope.IonosMachine.Spec.AdditionalNetworks { + nicName := fmt.Sprintf(AdditionalNICFormat, machineScope.IonosMachine.Name, network.NetworkID) + nicNames = append(nicNames, nicName) + } + + for _, format := range formats { + for _, nicName := range nicNames { + key := client.ObjectKey{ + Namespace: machineScope.IonosMachine.Namespace, + Name: fmt.Sprintf("%s-%s", nicName, format), + } + + claim, err := h.GetIPAddressClaim(ctx, key) + if err != nil { + if apierrors.IsNotFound(err) { + continue + } + return err + } + + if updated := controllerutil.RemoveFinalizer(claim, infrav1.MachineFinalizer); updated { + if err = h.client.Update(ctx, claim); err != nil { + return err + } + } + } + } + + return nil +} + +func (h *Helper) handlePrimaryNIC(ctx context.Context, machineScope *scope.Machine, nics *[]infrav1.NICInfo) (waitForIP bool, err error) { + nic := infrav1.NICInfo{Primary: true} + ipamConfig := machineScope.IonosMachine.Spec.IPAMConfig + nicName := fmt.Sprintf(PrimaryNICFormat, machineScope.IonosMachine.Name) + + // default NIC ipv4. + if ipamConfig.IPv4PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV4Format, ipamConfig.IPv4PoolRef) + if err != nil { + return false, err + } + if ip == "" { + waitForIP = true + } else { + nic.IPv4Addresses = []string{ip} + } + } + + // default NIC ipv6. + if ipamConfig.IPv6PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV6Format, ipamConfig.IPv6PoolRef) + if err != nil { + return false, err + } + if ip == "" { + waitForIP = true + } else { + nic.IPv6Addresses = []string{ip} + } + } + + *nics = append(*nics, nic) + + return waitForIP, nil +} + +func (h *Helper) handleAdditionalNICs(ctx context.Context, machineScope *scope.Machine, nics *[]infrav1.NICInfo) (waitForIP bool, err error) { + for _, net := range machineScope.IonosMachine.Spec.AdditionalNetworks { + nic := infrav1.NICInfo{Primary: false} + nicName := fmt.Sprintf(AdditionalNICFormat, machineScope.IonosMachine.Name, net.NetworkID) + if net.IPv4PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV4Format, net.IPv4PoolRef) + if err != nil { + return false, errors.Join(err, fmt.Errorf("unable to handle IPv4Address for nic %s", nicName)) + } + if ip == "" { + waitForIP = true + } else { + nic.IPv4Addresses = []string{ip} + } + } + + if net.IPv6PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, IPV6Format, net.IPv6PoolRef) + if err != nil { + return false, errors.Join(err, fmt.Errorf("unable to handle IPv6Address for nic %s", nicName)) + } + if ip == "" { + waitForIP = true + } else { + nic.IPv6Addresses = []string{ip} + } + } + + *nics = append(*nics, nic) + } + + return waitForIP, nil +} + +// handleIPAddressForNIC checks for an IPAddressClaim. If there is one it extracts the ip from the corresponding IPAddress object, otherwise it creates the IPAddressClaim and returns early. +func (h *Helper) handleIPAddressForNIC(ctx context.Context, machineScope *scope.Machine, nic, suffix string, poolRef *corev1.TypedLocalObjectReference) (ip string, err error) { + log := h.logger.WithName("handleIPAddressForNIC") + + key := client.ObjectKey{ + Namespace: machineScope.IonosMachine.Namespace, + Name: fmt.Sprintf("%s-%s", nic, suffix), + } + + claim, err := h.GetIPAddressClaim(ctx, key) + if err != nil { + if !apierrors.IsNotFound(err) { + return "", err + } + log.V(4).Info("IPAddressClaim not found, creating it.", "nic", nic) + err = h.CreateIPAddressClaim(ctx, machineScope.IonosMachine, key.Name, poolRef) + if err != nil { + return "", errors.Join(err, fmt.Errorf("unable to create IPAddressClaim for machine %s", machineScope.IonosMachine.Name)) + } + // we just created the claim, so we can return early and wait for the creation of the IPAddress. + return "", nil + } + + // we found a claim, lets see if there is an IPAddress + ipAddrName := claim.Status.AddressRef.Name + if ipAddrName == "" { + log.V(4).Info("No IPAddress found yet.", "nic", nic) + return "", nil + } + + ipAddrKey := types.NamespacedName{ + Namespace: machineScope.IonosMachine.Namespace, + Name: ipAddrName, + } + ipAddr, err := h.GetIPAddress(ctx, ipAddrKey) + if err != nil { + return "", errors.Join(err, fmt.Errorf("unable to get IPAddress specified in claim %s", claim.Name)) + } + + ip = ipAddr.Spec.Address + + log.V(4).Info("IPAddress found, ", "ip", ip, "nic", nic) + + return ip, nil +} + +// CreateIPAddressClaim creates an IPAddressClaim for a given object. +func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, name string, poolRef *corev1.TypedLocalObjectReference) error { + claimRef := types.NamespacedName{ + Namespace: owner.GetNamespace(), + Name: name, + } + + ipAddrClaim := &ipamv1.IPAddressClaim{} + var err error + if err = h.client.Get(ctx, claimRef, ipAddrClaim); err != nil && !apierrors.IsNotFound(err) { + return err + } + + if !apierrors.IsNotFound(err) { + // IPAddressClaim already exists + return nil + } + + desired := &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimRef.Name, + Namespace: claimRef.Namespace, + }, + Spec: ipamv1.IPAddressClaimSpec{ + PoolRef: *poolRef, + }, + } + _, err = controllerutil.CreateOrUpdate(ctx, h.client, desired, func() error { + controllerutil.AddFinalizer(desired, infrav1.MachineFinalizer) + return controllerutil.SetControllerReference(owner, desired, h.client.Scheme()) + }) + + return err +} + +// GetIPAddress attempts to retrieve the IPAddress. +func (h *Helper) GetIPAddress(ctx context.Context, key client.ObjectKey) (*ipamv1.IPAddress, error) { + out := &ipamv1.IPAddress{} + err := h.client.Get(ctx, key, out) + if err != nil { + return nil, err + } + + return out, nil +} + +// GetIPAddressClaim attempts to retrieve the IPAddressClaim. +func (h *Helper) GetIPAddressClaim(ctx context.Context, key client.ObjectKey) (*ipamv1.IPAddressClaim, error) { + out := &ipamv1.IPAddressClaim{} + err := h.client.Get(ctx, key, out) + if err != nil { + return nil, err + } + + return out, nil +} diff --git a/internal/service/ipam/ipam_test.go b/internal/service/ipam/ipam_test.go new file mode 100644 index 00000000..9464ea9f --- /dev/null +++ b/internal/service/ipam/ipam_test.go @@ -0,0 +1,405 @@ +/* +Copyright 2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/ionoscloud/clienttest" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/cloud" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/locker" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/ptr" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" +) + +type IpamTestSuite struct { + *require.Assertions + suite.Suite + k8sClient client.Client + ctx context.Context + machineScope *scope.Machine + clusterScope *scope.Cluster + log logr.Logger + service *cloud.Service + ipamHelper *Helper + capiCluster *clusterv1.Cluster + capiMachine *clusterv1.Machine + infraCluster *infrav1.IonosCloudCluster + infraMachine *infrav1.IonosCloudMachine + ionosClient *clienttest.MockClient +} + +func (s *IpamTestSuite) SetupSuite() { + s.log = logr.Discard() + s.ctx = context.Background() + s.Assertions = s.Require() +} + +func (s *IpamTestSuite) SetupTest() { + var err error + s.ionosClient = clienttest.NewMockClient(s.T()) + + s.capiCluster = &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-cluster", + UID: "uid", + }, + Spec: clusterv1.ClusterSpec{}, + } + s.infraCluster = &infrav1.IonosCloudCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: s.capiCluster.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: s.capiCluster.Name, + }, + }, + Spec: infrav1.IonosCloudClusterSpec{ + Location: "de/txl", + }, + Status: infrav1.IonosCloudClusterStatus{}, + } + s.capiMachine = &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-machine", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: s.capiCluster.Name, + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: s.capiCluster.Name, + Version: ptr.To("v1.26.12"), + ProviderID: ptr.To("ionos://dd426c63-cd1d-4c02-aca3-13b4a27c2ebf"), + }, + } + s.infraMachine = &infrav1.IonosCloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-machine", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: s.capiCluster.Name, + clusterv1.MachineDeploymentNameLabel: "test-md", + }, + }, + Spec: infrav1.IonosCloudMachineSpec{ + ProviderID: ptr.To("ionos://dd426c63-cd1d-4c02-aca3-13b4a27c2ebf"), + DatacenterID: "ccf27092-34e8-499e-a2f5-2bdee9d34a12", + NumCores: 2, + AvailabilityZone: infrav1.AvailabilityZoneAuto, + MemoryMB: 4096, + CPUFamily: ptr.To("AMD_OPTERON"), + Disk: &infrav1.Volume{ + Name: "test-machine-hdd", + DiskType: infrav1.VolumeDiskTypeHDD, + SizeGB: 20, + AvailabilityZone: infrav1.AvailabilityZoneAuto, + Image: &infrav1.ImageSpec{ + ID: "3e3e3e3e-3e3e-3e3e-3e3e-3e3e3e3e3e3e", + }, + }, + Type: infrav1.ServerTypeEnterprise, + }, + Status: infrav1.IonosCloudMachineStatus{}, + } + + scheme := runtime.NewScheme() + s.NoError(clusterv1.AddToScheme(scheme), "failed to extend scheme with Cluster API types") + s.NoError(ipamv1.AddToScheme(scheme), "failed to extend scheme with Cluster API ipam types") + s.NoError(infrav1.AddToScheme(scheme), "failed to extend scheme with IonosCloud types") + s.NoError(clientgoscheme.AddToScheme(scheme)) + + initObjects := []client.Object{s.infraMachine, s.infraCluster, s.capiCluster, s.capiMachine} + s.k8sClient = fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(initObjects...). + WithStatusSubresource(initObjects...). + Build() + + s.ipamHelper = NewHelper(s.k8sClient, s.log) + s.clusterScope, err = scope.NewCluster(scope.ClusterParams{ + Client: s.k8sClient, + Cluster: s.capiCluster, + IonosCluster: s.infraCluster, + Locker: locker.New(), + }) + s.NoError(err, "failed to create cluster scope") + + s.machineScope, err = scope.NewMachine(scope.MachineParams{ + Client: s.k8sClient, + Machine: s.capiMachine, + ClusterScope: s.clusterScope, + IonosMachine: s.infraMachine, + Locker: locker.New(), + }) + s.NoError(err, "failed to create machine scope") + + s.service, err = cloud.NewService(s.ionosClient, s.log) + s.NoError(err, "failed to create service") +} + +func TestIpamTestSuite(t *testing.T) { + suite.Run(t, new(IpamTestSuite)) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesDontCreateClaim() { + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + + // No PoolRefs provided, so the Reconcile must not create a claim. + list := &ipamv1.IPAddressClaimList{} + err = s.k8sClient.List(s.ctx, list) + s.Empty(list.Items) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv4CreateClaim() { + poolRef := defaultInClusterIPv4PoolRef() + + s.machineScope.IonosMachine.Spec.IPv4PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultPrimaryIPv4Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv6CreateClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + s.machineScope.IonosMachine.Spec.IPv6PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultPrimaryIPv6Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv4GetIPFromClaim() { + poolRef := defaultInClusterIPv4PoolRef() + + claim := defaultPrimaryIPv4Claim() + claim.Status.AddressRef.Name = "nic-test-machine-ipv4-10-0-0-2" + err := s.k8sClient.Create(s.ctx, claim) + s.NoError(err) + + ip := defaultIPv4Address(claim, poolRef) + err = s.k8sClient.Create(s.ctx, ip) + s.NoError(err) + + s.machineScope.IonosMachine.Spec.IPv4PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + s.Equal("10.0.0.2", s.machineScope.IonosMachine.Status.MachineNetworkInfo.NICInfo[0].IPv4Addresses[0]) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv6GetIPFromClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + claim := defaultPrimaryIPv6Claim() + claim.Status.AddressRef.Name = "nic-test-machine-ipv6-2001-db8--" + err := s.k8sClient.Create(s.ctx, claim) + s.NoError(err) + + ip := defaultIPv6Address(claim, poolRef) + err = s.k8sClient.Create(s.ctx, ip) + s.NoError(err) + + s.machineScope.IonosMachine.Spec.IPv6PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + s.Equal("2001:db8::", s.machineScope.IonosMachine.Status.MachineNetworkInfo.NICInfo[0].IPv6Addresses[0]) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesAdditionalIpv4CreateClaim() { + poolRef := defaultInClusterIPv4PoolRef() + + s.machineScope.IonosMachine.Spec.AdditionalNetworks = defaultAdditionalNetworksIpv4(poolRef) + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultAdditionalIPv4Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesAdditionalIpv6CreateClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + s.machineScope.IonosMachine.Spec.AdditionalNetworks = defaultAdditionalNetworksIpv6(poolRef) + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultAdditionalIPv6Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesAdditionalIpv6GetIPFromClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + claim := defaultAdditionalIPv6Claim() + claim.Status.AddressRef.Name = "nic-test-machine-ipv6-2001-db8--" + err := s.k8sClient.Create(s.ctx, claim) + s.NoError(err) + + ip := defaultIPv6Address(claim, poolRef) + err = s.k8sClient.Create(s.ctx, ip) + s.NoError(err) + + s.machineScope.IonosMachine.Spec.AdditionalNetworks = defaultAdditionalNetworksIpv6(poolRef) + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + s.Equal("2001:db8::", s.machineScope.IonosMachine.Status.MachineNetworkInfo.NICInfo[1].IPv6Addresses[0]) +} + +func defaultInClusterIPv4PoolRef() *corev1.TypedLocalObjectReference { + return &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "InClusterIPPool", + Name: "incluster-ipv4-pool", + } +} + +func defaultInClusterIPv6PoolRef() *corev1.TypedLocalObjectReference { + return &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "InClusterIPPool", + Name: "incluster-ipv6-pool", + } +} + +func defaultIPv4Address(claim *ipamv1.IPAddressClaim, poolRef *corev1.TypedLocalObjectReference) *ipamv1.IPAddress { + return &ipamv1.IPAddress{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv4-10-0-0-2", + Namespace: "default", + }, + Spec: ipamv1.IPAddressSpec{ + ClaimRef: *localRef(claim), + PoolRef: *poolRef, + Address: "10.0.0.2", + Prefix: 16, + }, + } +} + +func defaultIPv6Address(claim *ipamv1.IPAddressClaim, poolRef *corev1.TypedLocalObjectReference) *ipamv1.IPAddress { + return &ipamv1.IPAddress{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv6-2001-db8--", + Namespace: "default", + }, + Spec: ipamv1.IPAddressSpec{ + ClaimRef: *localRef(claim), + PoolRef: *poolRef, + Address: "2001:db8::", + Prefix: 42, + }, + } +} + +func defaultPrimaryIPv4Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv4", + Namespace: "default", + }, + } +} + +func defaultAdditionalIPv4Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-1-ipv4", + Namespace: "default", + }, + } +} + +func defaultAdditionalIPv6Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-1-ipv6", + Namespace: "default", + }, + } +} + +func defaultAdditionalNetworksIpv6(poolRef *corev1.TypedLocalObjectReference) []infrav1.Network { + return []infrav1.Network{{ + NetworkID: 1, + IPAMConfig: infrav1.IPAMConfig{ + IPv6PoolRef: poolRef, + }, + }} +} + +func defaultAdditionalNetworksIpv4(poolRef *corev1.TypedLocalObjectReference) []infrav1.Network { + return []infrav1.Network{{ + NetworkID: 1, + IPAMConfig: infrav1.IPAMConfig{ + IPv4PoolRef: poolRef, + }, + }} +} + +func defaultPrimaryIPv6Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv6", + Namespace: "default", + }, + } +} + +func localRef(obj client.Object) *corev1.LocalObjectReference { + return &corev1.LocalObjectReference{ + Name: obj.GetName(), + } +}