From 48d61bcc335f0fa60bff265c0b163c9b66539e2e Mon Sep 17 00:00:00 2001 From: Joshua Reese Date: Tue, 19 Nov 2024 02:05:50 +0000 Subject: [PATCH 1/3] Bootstrapped controller boilerplate, implemented very basic logic for network bindings, subnet claims, and subnets. Still much work to do around testing, and implementing controllers for other types when needed. The test suite is expected to pass from this point forward, though there are some Pending tests. --- cmd/main.go | 46 +++++ internal/.gitkeep | 0 internal/controller/network_controller.go | 49 +++++ .../controller/network_controller_test.go | 70 +++++++ .../controller/networkbinding_controller.go | 185 ++++++++++++++++++ .../networkbinding_controller_test.go | 159 +++++++++++++++ .../controller/networkcontext_controller.go | 49 +++++ .../networkcontext_controller_test.go | 70 +++++++ .../controller/networkpolicy_controller.go | 49 +++++ .../networkpolicy_controller_test.go | 70 +++++++ internal/controller/subnet_controller.go | 133 +++++++++++++ internal/controller/subnet_controller_test.go | 70 +++++++ internal/controller/subnetclaim_controller.go | 152 ++++++++++++++ .../controller/subnetclaim_controller_test.go | 70 +++++++ internal/controller/suite_test.go | 106 ++++++++++ 15 files changed, 1278 insertions(+) delete mode 100644 internal/.gitkeep create mode 100644 internal/controller/network_controller.go create mode 100644 internal/controller/network_controller_test.go create mode 100644 internal/controller/networkbinding_controller.go create mode 100644 internal/controller/networkbinding_controller_test.go create mode 100644 internal/controller/networkcontext_controller.go create mode 100644 internal/controller/networkcontext_controller_test.go create mode 100644 internal/controller/networkpolicy_controller.go create mode 100644 internal/controller/networkpolicy_controller_test.go create mode 100644 internal/controller/subnet_controller.go create mode 100644 internal/controller/subnet_controller_test.go create mode 100644 internal/controller/subnetclaim_controller.go create mode 100644 internal/controller/subnetclaim_controller_test.go create mode 100644 internal/controller/suite_test.go diff --git a/cmd/main.go b/cmd/main.go index adb4f84..9ce2816 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -18,6 +18,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/filters" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" + "go.datum.net/network-services-operator/internal/controller" // +kubebuilder:scaffold:imports ) @@ -29,6 +32,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(networkingv1alpha.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -122,6 +126,48 @@ func main() { os.Exit(1) } + if err = (&controller.NetworkReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Network") + os.Exit(1) + } + if err = (&controller.NetworkBindingReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "NetworkBinding") + os.Exit(1) + } + if err = (&controller.NetworkContextReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "NetworkContext") + os.Exit(1) + } + if err = (&controller.NetworkPolicyReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "NetworkPolicy") + os.Exit(1) + } + if err = (&controller.SubnetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Subnet") + os.Exit(1) + } + if err = (&controller.SubnetClaimReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "SubnetClaim") + os.Exit(1) + } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/internal/.gitkeep b/internal/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/internal/controller/network_controller.go b/internal/controller/network_controller.go new file mode 100644 index 0000000..cbc2168 --- /dev/null +++ b/internal/controller/network_controller.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +// NetworkReconciler reconciles a Network object +type NetworkReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networks,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networks/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networks/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the Network object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/reconcile +func (r *NetworkReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + + // TODO(user): your logic here + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NetworkReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&networkingv1alpha.Network{}). + Named("network"). + Complete(r) +} diff --git a/internal/controller/network_controller_test.go b/internal/controller/network_controller_test.go new file mode 100644 index 0000000..125f917 --- /dev/null +++ b/internal/controller/network_controller_test.go @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +var _ = Describe("Network Controller", Pending, func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + network := &networkingv1alpha.Network{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Network") + err := k8sClient.Get(ctx, typeNamespacedName, network) + if err != nil && errors.IsNotFound(err) { + resource := &networkingv1alpha.Network{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &networkingv1alpha.Network{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Network") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &NetworkReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/networkbinding_controller.go b/internal/controller/networkbinding_controller.go new file mode 100644 index 0000000..6488cec --- /dev/null +++ b/internal/controller/networkbinding_controller.go @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + "encoding/json" + "fmt" + "hash/fnv" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/rand" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +// NetworkBindingReconciler reconciles a NetworkBinding object +type NetworkBindingReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networkbindings,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networkbindings/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networkbindings/finalizers,verbs=update + +func (r *NetworkBindingReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, err error) { + logger := log.FromContext(ctx) + + // Each valid network binding should result in a NetworkAttachment being + // created for each unique `topology` that's found. + + var binding networkingv1alpha.NetworkBinding + if err := r.Client.Get(ctx, req.NamespacedName, &binding); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + if !binding.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + + logger.Info("reconciling network binding") + defer logger.Info("reconcile complete") + + readyCondition := metav1.Condition{ + Type: networkingv1alpha.NetworkBindingReady, + Status: metav1.ConditionFalse, + Reason: "Unknown", + ObservedGeneration: binding.Generation, + Message: "Unknown state", + } + + defer func() { + if err != nil { + // Don't update the status if errors are encountered + return + } + statusChanged := apimeta.SetStatusCondition(&binding.Status.Conditions, readyCondition) + + if statusChanged { + err = r.Client.Status().Update(ctx, &binding) + } + }() + + networkNamespace := binding.Spec.Network.Namespace + + if len(networkNamespace) == 0 { + // Fall back to binding's namespace if NetworkRef does not specify one. + networkNamespace = binding.Namespace + } + + var network networkingv1alpha.Network + networkObjectKey := client.ObjectKey{ + Namespace: networkNamespace, + Name: binding.Spec.Network.Name, + } + if err := r.Client.Get(ctx, networkObjectKey, &network); err != nil { + readyCondition.Reason = "NetworkNotFound" + readyCondition.Message = "The network referenced in the binding was not found." + return ctrl.Result{}, fmt.Errorf("failed fetching network for binding: %w", err) + } + + networkContextName, err := networkContextNameForBinding(&binding) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to determine network context name: %w", err) + } + + var networkContext networkingv1alpha.NetworkContext + networkContextObjectKey := client.ObjectKey{ + Namespace: networkNamespace, + Name: networkContextName, + } + if err := r.Client.Get(ctx, networkContextObjectKey, &networkContext); client.IgnoreNotFound(err) != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching network context: %w", err) + } + + if networkContext.CreationTimestamp.IsZero() { + networkContext = networkingv1alpha.NetworkContext{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: networkNamespace, + Name: networkContextName, + }, + Spec: networkingv1alpha.NetworkContextSpec{ + Network: networkingv1alpha.LocalNetworkRef{ + Name: binding.Spec.Network.Name, + }, + Topology: binding.Spec.Topology, + }, + } + + if err := controllerutil.SetControllerReference(&network, &networkContext, r.Scheme); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to set controller on network context: %w", err) + } + + if err := r.Client.Create(ctx, &networkContext); err != nil { + return ctrl.Result{}, fmt.Errorf("failed creating network context: %w", err) + } + } + + if !apimeta.IsStatusConditionTrue(networkContext.Status.Conditions, networkingv1alpha.NetworkContextReady) { + logger.Info("network context is not ready") + readyCondition.Reason = "NetworkContextNotReady" + readyCondition.Message = "Network context is not ready." + + // Choosing to requeue here instead of establishing a watch on contexts, as + // once the context is created an ready, future bindings will immediately + // become ready. + return ctrl.Result{Requeue: true}, nil + } + + binding.Status.NetworkContextRef = &networkingv1alpha.NetworkContextRef{ + Namespace: networkContext.Namespace, + Name: networkContext.Name, + } + + readyCondition.Status = metav1.ConditionTrue + readyCondition.Reason = "NetworkContextReady" + readyCondition.Message = "Network context is ready." + + if err := r.Client.Status().Update(ctx, &binding); err != nil { + return ctrl.Result{}, fmt.Errorf("failed updating binding status: %w", err) + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NetworkBindingReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&networkingv1alpha.NetworkBinding{}, builder.WithPredicates( + predicate.NewPredicateFuncs(func(object client.Object) bool { + o := object.(*networkingv1alpha.NetworkBinding) + return o.Status.NetworkContextRef == nil + }), + )). + Complete(r) +} + +func networkContextNameForBinding(binding *networkingv1alpha.NetworkBinding) (string, error) { + if binding.CreationTimestamp.IsZero() { + return "", fmt.Errorf("binding has not been created") + } + topologyBytes, err := json.Marshal(binding.Spec.Topology) + if err != nil { + return "", fmt.Errorf("failed marshaling topology to json: %w", err) + } + + f := fnv.New32a() + f.Write(topologyBytes) + topologyHash := rand.SafeEncodeString(fmt.Sprint(f.Sum32())) + + return fmt.Sprintf("%s-%s", binding.Spec.Network.Name, topologyHash), nil +} diff --git a/internal/controller/networkbinding_controller_test.go b/internal/controller/networkbinding_controller_test.go new file mode 100644 index 0000000..72629a5 --- /dev/null +++ b/internal/controller/networkbinding_controller_test.go @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +var _ = Describe("NetworkBinding Controller", func() { + Context("When reconciling a new resource", Ordered, func() { + const networkName = "test-binding-network" + const bindingName = "test-binding" + + ctx := context.Background() + + networkNamespacedName := types.NamespacedName{ + Name: networkName, + Namespace: "default", + } + network := &networkingv1alpha.Network{} + + bindingNamespacedName := types.NamespacedName{ + Name: bindingName, + Namespace: "default", + } + binding := &networkingv1alpha.NetworkBinding{} + + BeforeEach(func() { + By("creating a Network") + err := k8sClient.Get(ctx, networkNamespacedName, network) + if err != nil && errors.IsNotFound(err) { + network = &networkingv1alpha.Network{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: networkName, + }, + Spec: networkingv1alpha.NetworkSpec{ + IPAM: networkingv1alpha.NetworkIPAM{ + Mode: networkingv1alpha.NetworkIPAMModeAuto, + }, + }, + } + Expect(k8sClient.Create(ctx, network)).To(Succeed()) + } + Expect(client.IgnoreNotFound(err)).To(BeNil()) + + By("creating a NetworkBinding") + err = k8sClient.Get(ctx, bindingNamespacedName, binding) + if err != nil && errors.IsNotFound(err) { + resource := &networkingv1alpha.NetworkBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: bindingName, + Namespace: "default", + }, + Spec: networkingv1alpha.NetworkBindingSpec{ + Network: networkingv1alpha.NetworkRef{ + Name: network.Name, + }, + Topology: map[string]string{ + "topo-key": "value", + }, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + Expect(client.IgnoreNotFound(err)).To(BeNil()) + + }) + + AfterEach(func() { + binding := &networkingv1alpha.NetworkBinding{} + Expect(k8sClient.Get(ctx, bindingNamespacedName, binding)).To(Succeed()) + + networkContextName, err := networkContextNameForBinding(binding) + Expect(err).To(BeNil()) + Expect(k8sClient.Delete(ctx, binding)).To(Succeed()) + + networkContext := &networkingv1alpha.NetworkContext{} + networkContextNamespacedName := types.NamespacedName{ + Name: networkContextName, + Namespace: "default", + } + Expect(k8sClient.Get(ctx, networkContextNamespacedName, networkContext)).To(Succeed()) + Expect(k8sClient.Delete(ctx, networkContext)) + + network := &networkingv1alpha.Network{} + Expect(k8sClient.Get(ctx, networkNamespacedName, network)).To(Succeed()) + Expect(k8sClient.Delete(ctx, network)).To(Succeed()) + }) + + It("should successfully create a NetworkContext", func() { + err := k8sClient.Get(ctx, bindingNamespacedName, binding) + Expect(err).To(BeNil()) + + bindingReady := apimeta.IsStatusConditionTrue(binding.Status.Conditions, networkingv1alpha.NetworkBindingReady) + Expect(bindingReady).To(BeFalse()) + + networkContextName, err := networkContextNameForBinding(binding) + Expect(err).To(BeNil()) + + var networkContext networkingv1alpha.NetworkContext + networkContextObjectKey := client.ObjectKey{ + Namespace: binding.Namespace, + Name: networkContextName, + } + + Eventually(ctx, func() error { + return k8sClient.Get(ctx, networkContextObjectKey, &networkContext) + }).Should(BeNil()) + }) + + It("should become Ready once the referenced NetworkContext is Ready", func() { + networkContextName, err := networkContextNameForBinding(binding) + Expect(err).To(BeNil()) + + var networkContext networkingv1alpha.NetworkContext + networkContextObjectKey := client.ObjectKey{ + Namespace: binding.Namespace, + Name: networkContextName, + } + + Eventually(ctx, func() error { + return k8sClient.Get(ctx, networkContextObjectKey, &networkContext) + }).Should(BeNil()) + + // We set the status manually here, as external controllers are responsible + // for updating Context readiness right now. + // + // TODO(jreese) - Consider having a `Programmed` condition that external + // controllers use, and have a NSO controller update the `Ready` condition? + + apimeta.SetStatusCondition(&networkContext.Status.Conditions, metav1.Condition{ + Type: networkingv1alpha.NetworkContextReady, + Status: metav1.ConditionTrue, + Reason: "Test", + Message: "test condition", + }) + + Expect(k8sClient.Status().Update(ctx, &networkContext)).To(Succeed()) + + Eventually(func() bool { + err := k8sClient.Get(ctx, bindingNamespacedName, binding) + Expect(err).To(BeNil()) + + return apimeta.IsStatusConditionTrue(binding.Status.Conditions, networkingv1alpha.NetworkBindingReady) + }).Should(BeTrue()) + }) + }) +}) diff --git a/internal/controller/networkcontext_controller.go b/internal/controller/networkcontext_controller.go new file mode 100644 index 0000000..344a7e6 --- /dev/null +++ b/internal/controller/networkcontext_controller.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +// NetworkContextReconciler reconciles a NetworkContext object +type NetworkContextReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networkcontexts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networkcontexts/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networkcontexts/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the NetworkContext object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/reconcile +func (r *NetworkContextReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + + // TODO(user): your logic here + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NetworkContextReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&networkingv1alpha.NetworkContext{}). + Named("networkcontext"). + Complete(r) +} diff --git a/internal/controller/networkcontext_controller_test.go b/internal/controller/networkcontext_controller_test.go new file mode 100644 index 0000000..2148d56 --- /dev/null +++ b/internal/controller/networkcontext_controller_test.go @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +var _ = Describe("NetworkContext Controller", Pending, func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + networkcontext := &networkingv1alpha.NetworkContext{} + + BeforeEach(func() { + By("creating the custom resource for the Kind NetworkContext") + err := k8sClient.Get(ctx, typeNamespacedName, networkcontext) + if err != nil && errors.IsNotFound(err) { + resource := &networkingv1alpha.NetworkContext{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &networkingv1alpha.NetworkContext{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance NetworkContext") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &NetworkContextReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/networkpolicy_controller.go b/internal/controller/networkpolicy_controller.go new file mode 100644 index 0000000..1ed0b72 --- /dev/null +++ b/internal/controller/networkpolicy_controller.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +// NetworkPolicyReconciler reconciles a NetworkPolicy object +type NetworkPolicyReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networkpolicies,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networkpolicies/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=networkpolicies/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the NetworkPolicy object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/reconcile +func (r *NetworkPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + + // TODO(user): your logic here + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NetworkPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&networkingv1alpha.NetworkPolicy{}). + Named("networkpolicy"). + Complete(r) +} diff --git a/internal/controller/networkpolicy_controller_test.go b/internal/controller/networkpolicy_controller_test.go new file mode 100644 index 0000000..b3449fd --- /dev/null +++ b/internal/controller/networkpolicy_controller_test.go @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +var _ = Describe("NetworkPolicy Controller", Pending, func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + networkpolicy := &networkingv1alpha.NetworkPolicy{} + + BeforeEach(func() { + By("creating the custom resource for the Kind NetworkPolicy") + err := k8sClient.Get(ctx, typeNamespacedName, networkpolicy) + if err != nil && errors.IsNotFound(err) { + resource := &networkingv1alpha.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &networkingv1alpha.NetworkPolicy{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance NetworkPolicy") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &NetworkPolicyReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/subnet_controller.go b/internal/controller/subnet_controller.go new file mode 100644 index 0000000..e9a4bb5 --- /dev/null +++ b/internal/controller/subnet_controller.go @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + "fmt" + + "google.golang.org/protobuf/proto" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +// SubnetReconciler reconciles a Subnet object +type SubnetReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=subnets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=subnets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=subnets/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the Subnet object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/reconcile +func (r *SubnetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + var subnet networkingv1alpha.Subnet + if err := r.Client.Get(ctx, req.NamespacedName, &subnet); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + if !subnet.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + + logger.Info("reconciling subnet") + defer logger.Info("reconcile complete") + + // TODO(jreese) finalizer work + + var networkContext networkingv1alpha.NetworkContext + networkContextObjectKey := client.ObjectKey{ + Namespace: subnet.Namespace, + Name: subnet.Spec.NetworkContext.Name, + } + if err := r.Client.Get(ctx, networkContextObjectKey, &networkContext); err != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching network context: %w", err) + } + + // TODO(jreese) get topology key from well known package + cityCode, ok := networkContext.Spec.Topology["topology.datum.net/city-code"] + if !ok { + return ctrl.Result{}, fmt.Errorf("unable to find topology key: topology.datum.net/city-code") + } + + // TODO(jreese) move to proper higher level subnet allocation logic, this is + // for the rough POC! Pay attention to the subnet class, etc. + // + // GCP allocates a /20 per region. Distribution seems to be as new regions + // come online, a /20 is allocated, but there appears to be at least a /15 + // between each region's /20. For example: + // + // europe-west9 10.200.0.0/20 + // us-east5 10.202.0.0/20 + // europe-southwest1 10.204.0.0/20 + // us-south1 10.206.0.0/20 + // me-west1 10.208.0.0/20 + // + // There's a few scenarios where this isn't the case. + + var startAddress string + switch cityCode { + case "DFW": + startAddress = "10.128.0.0" + case "DLS": + startAddress = "10.130.0.0" + case "LHR": + startAddress = "10.132.0.0" + } + + subnet.Status.StartAddress = proto.String(startAddress) + subnet.Status.PrefixLength = proto.Int32(20) + + apimeta.SetStatusCondition(&subnet.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionTrue, + Reason: "PrefixAllocated", + ObservedGeneration: subnet.Generation, + Message: "Subnet has been allocated a prefix", + }) + + if err := r.Client.Status().Update(ctx, &subnet); err != nil { + return ctrl.Result{}, fmt.Errorf("failed updating subnet status") + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *SubnetReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&networkingv1alpha.Subnet{}, builder.WithPredicates( + predicate.NewPredicateFuncs(func(object client.Object) bool { + // Don't bother processing subnets that have been allocated and are not + // deleting + o := object.(*networkingv1alpha.Subnet) + return o.Status.StartAddress == nil || !o.DeletionTimestamp.IsZero() + }), + )). + Named("subnet"). + Complete(r) +} diff --git a/internal/controller/subnet_controller_test.go b/internal/controller/subnet_controller_test.go new file mode 100644 index 0000000..e404eff --- /dev/null +++ b/internal/controller/subnet_controller_test.go @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +var _ = Describe("Subnet Controller", Pending, func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + subnet := &networkingv1alpha.Subnet{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Subnet") + err := k8sClient.Get(ctx, typeNamespacedName, subnet) + if err != nil && errors.IsNotFound(err) { + resource := &networkingv1alpha.Subnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &networkingv1alpha.Subnet{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Subnet") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &SubnetReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/subnetclaim_controller.go b/internal/controller/subnetclaim_controller.go new file mode 100644 index 0000000..bad7f80 --- /dev/null +++ b/internal/controller/subnetclaim_controller.go @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +// SubnetClaimReconciler reconciles a SubnetClaim object +type SubnetClaimReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=subnetclaims,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=subnetclaims/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=networking.datumapis.com,resources=subnetclaims/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the SubnetClaim object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/reconcile +func (r *SubnetClaimReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + var claim networkingv1alpha.SubnetClaim + if err := r.Client.Get(ctx, req.NamespacedName, &claim); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + if !claim.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + + logger.Info("reconciling subnet claim") + defer logger.Info("reconcile complete") + + // TODO(jreese) move to a network context level subnet allocator, instead of + // the 1:1 SubnetClaim:Subnet that's here right now. + + var subnet networkingv1alpha.Subnet + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(&claim), &subnet); client.IgnoreNotFound(err) != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching subnet: %w", err) + } + + if subnet.CreationTimestamp.IsZero() { + var networkContext networkingv1alpha.NetworkContext + networkContextObjectKey := client.ObjectKey{ + Namespace: claim.Namespace, + Name: claim.Spec.NetworkContext.Name, + } + if err := r.Client.Get(ctx, networkContextObjectKey, &networkContext); err != nil { + return ctrl.Result{}, fmt.Errorf("failed fetching network context: %w", err) + } + + subnet = networkingv1alpha.Subnet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: claim.Namespace, + Name: claim.Name, + }, + Spec: networkingv1alpha.SubnetSpec{ + SubnetClass: claim.Spec.SubnetClass, + NetworkContext: claim.Spec.NetworkContext, + Topology: claim.Spec.Topology, + }, + } + + if err := controllerutil.SetControllerReference(&networkContext, &subnet, r.Scheme); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to set controller on subnet: %w", err) + } + + if err := r.Client.Create(ctx, &subnet); err != nil { + return ctrl.Result{}, fmt.Errorf("failed creating subnet: %w", err) + } + + apimeta.SetStatusCondition(&claim.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "SubnetNotReady", + ObservedGeneration: claim.Generation, + Message: "Subnet is not ready", + }) + + if err := r.Client.Status().Update(ctx, &claim); err != nil { + return ctrl.Result{}, fmt.Errorf("failed updating claim status") + } + + return ctrl.Result{}, nil + } + + if !apimeta.IsStatusConditionTrue(subnet.Status.Conditions, "Ready") { + logger.Info("subnet is not ready") + return ctrl.Result{}, nil + } + + claim.Status.SubnetRef = &networkingv1alpha.LocalSubnetReference{ + Name: subnet.Name, + } + + apimeta.SetStatusCondition(&claim.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionTrue, + Reason: "SubnetReady", + ObservedGeneration: claim.Generation, + Message: "Subnet ready", + }) + + if err := r.Client.Status().Update(ctx, &claim); err != nil { + return ctrl.Result{}, fmt.Errorf("failed updating claim status") + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *SubnetClaimReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&networkingv1alpha.SubnetClaim{}, builder.WithPredicates( + predicate.NewPredicateFuncs(func(object client.Object) bool { + // Don't bother processing deployments that have been scheduled + o := object.(*networkingv1alpha.SubnetClaim) + return o.Status.SubnetRef == nil + }), + )). + // TODO(jreese) change when we don't have claims 1:1 with subnets + Watches(&networkingv1alpha.Subnet{}, &handler.EnqueueRequestForObject{}). + Named("subnetclaim"). + Complete(r) +} diff --git a/internal/controller/subnetclaim_controller_test.go b/internal/controller/subnetclaim_controller_test.go new file mode 100644 index 0000000..390b8d3 --- /dev/null +++ b/internal/controller/subnetclaim_controller_test.go @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" +) + +var _ = Describe("SubnetClaim Controller", Pending, func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + subnetclaim := &networkingv1alpha.SubnetClaim{} + + BeforeEach(func() { + By("creating the custom resource for the Kind SubnetClaim") + err := k8sClient.Get(ctx, typeNamespacedName, subnetclaim) + if err != nil && errors.IsNotFound(err) { + resource := &networkingv1alpha.SubnetClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &networkingv1alpha.SubnetClaim{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance SubnetClaim") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &SubnetClaimReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go new file mode 100644 index 0000000..d24c0f4 --- /dev/null +++ b/internal/controller/suite_test.go @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package controller + +import ( + "context" + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + + networkingv1alpha "go.datum.net/network-services-operator/api/v1alpha" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var k8sManager manager.Manager +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + ControlPlane: envtest.ControlPlane{ + APIServer: &envtest.APIServer{}, + }, + } + + testEnv.ControlPlane.APIServer.Configure().Set("advertise-address", "127.0.0.1") + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = networkingv1alpha.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + err = (&NetworkBindingReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager) + Expect(err).To(BeNil()) + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) From 7bf659a3ae53a3ed764ad926ba160b4343675a10 Mon Sep 17 00:00:00 2001 From: Joshua Reese Date: Tue, 19 Nov 2024 02:14:34 +0000 Subject: [PATCH 2/3] Addressed linter violations. --- .../networkbinding_controller_test.go | 20 +++++++++---------- internal/controller/suite_test.go | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/internal/controller/networkbinding_controller_test.go b/internal/controller/networkbinding_controller_test.go index 72629a5..5012fe1 100644 --- a/internal/controller/networkbinding_controller_test.go +++ b/internal/controller/networkbinding_controller_test.go @@ -52,7 +52,7 @@ var _ = Describe("NetworkBinding Controller", func() { } Expect(k8sClient.Create(ctx, network)).To(Succeed()) } - Expect(client.IgnoreNotFound(err)).To(BeNil()) + Expect(client.IgnoreNotFound(err)).To(Succeed()) By("creating a NetworkBinding") err = k8sClient.Get(ctx, bindingNamespacedName, binding) @@ -73,7 +73,7 @@ var _ = Describe("NetworkBinding Controller", func() { } Expect(k8sClient.Create(ctx, resource)).To(Succeed()) } - Expect(client.IgnoreNotFound(err)).To(BeNil()) + Expect(client.IgnoreNotFound(err)).To(Succeed()) }) @@ -82,7 +82,7 @@ var _ = Describe("NetworkBinding Controller", func() { Expect(k8sClient.Get(ctx, bindingNamespacedName, binding)).To(Succeed()) networkContextName, err := networkContextNameForBinding(binding) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(k8sClient.Delete(ctx, binding)).To(Succeed()) networkContext := &networkingv1alpha.NetworkContext{} @@ -91,7 +91,7 @@ var _ = Describe("NetworkBinding Controller", func() { Namespace: "default", } Expect(k8sClient.Get(ctx, networkContextNamespacedName, networkContext)).To(Succeed()) - Expect(k8sClient.Delete(ctx, networkContext)) + Expect(k8sClient.Delete(ctx, networkContext)).To(Succeed()) network := &networkingv1alpha.Network{} Expect(k8sClient.Get(ctx, networkNamespacedName, network)).To(Succeed()) @@ -100,13 +100,13 @@ var _ = Describe("NetworkBinding Controller", func() { It("should successfully create a NetworkContext", func() { err := k8sClient.Get(ctx, bindingNamespacedName, binding) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) bindingReady := apimeta.IsStatusConditionTrue(binding.Status.Conditions, networkingv1alpha.NetworkBindingReady) Expect(bindingReady).To(BeFalse()) networkContextName, err := networkContextNameForBinding(binding) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) var networkContext networkingv1alpha.NetworkContext networkContextObjectKey := client.ObjectKey{ @@ -116,12 +116,12 @@ var _ = Describe("NetworkBinding Controller", func() { Eventually(ctx, func() error { return k8sClient.Get(ctx, networkContextObjectKey, &networkContext) - }).Should(BeNil()) + }).Should(Succeed()) }) It("should become Ready once the referenced NetworkContext is Ready", func() { networkContextName, err := networkContextNameForBinding(binding) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) var networkContext networkingv1alpha.NetworkContext networkContextObjectKey := client.ObjectKey{ @@ -131,7 +131,7 @@ var _ = Describe("NetworkBinding Controller", func() { Eventually(ctx, func() error { return k8sClient.Get(ctx, networkContextObjectKey, &networkContext) - }).Should(BeNil()) + }).Should(Succeed()) // We set the status manually here, as external controllers are responsible // for updating Context readiness right now. @@ -150,7 +150,7 @@ var _ = Describe("NetworkBinding Controller", func() { Eventually(func() bool { err := k8sClient.Get(ctx, bindingNamespacedName, binding) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) return apimeta.IsStatusConditionTrue(binding.Status.Conditions, networkingv1alpha.NetworkBindingReady) }).Should(BeTrue()) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index d24c0f4..467510a 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -88,7 +88,7 @@ var _ = BeforeSuite(func() { Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) go func() { defer GinkgoRecover() From a447d061c176deae00367c768810f783955b49ef Mon Sep 17 00:00:00 2001 From: Joshua Reese Date: Tue, 19 Nov 2024 02:29:08 +0000 Subject: [PATCH 3/3] Remove unnecessary duplicate status update. --- internal/controller/networkbinding_controller.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/internal/controller/networkbinding_controller.go b/internal/controller/networkbinding_controller.go index 6488cec..c12223e 100644 --- a/internal/controller/networkbinding_controller.go +++ b/internal/controller/networkbinding_controller.go @@ -149,9 +149,7 @@ func (r *NetworkBindingReconciler) Reconcile(ctx context.Context, req ctrl.Reque readyCondition.Reason = "NetworkContextReady" readyCondition.Message = "Network context is ready." - if err := r.Client.Status().Update(ctx, &binding); err != nil { - return ctrl.Result{}, fmt.Errorf("failed updating binding status: %w", err) - } + // Update is handled in the defer function above. return ctrl.Result{}, nil }