From d7c4ba2bb99927c2a915d2891f8538b08cefe228 Mon Sep 17 00:00:00 2001 From: Andrea Mazzotti Date: Wed, 15 May 2024 11:46:53 +0200 Subject: [PATCH] Check k3s-serving secret to determine controlPlane.Status.Initialized Signed-off-by: Andrea Mazzotti --- .../kthreescontrolplane_controller.go | 5 +- pkg/k3s/workload_cluster.go | 26 ++++++ pkg/k3s/workload_cluster_test.go | 81 +++++++++++++++++++ 3 files changed, 111 insertions(+), 1 deletion(-) create mode 100644 pkg/k3s/workload_cluster_test.go diff --git a/controlplane/controllers/kthreescontrolplane_controller.go b/controlplane/controllers/kthreescontrolplane_controller.go index 07506b92..d74ddccf 100644 --- a/controlplane/controllers/kthreescontrolplane_controller.go +++ b/controlplane/controllers/kthreescontrolplane_controller.go @@ -397,9 +397,12 @@ func (r *KThreesControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c kcp.Status.ReadyReplicas = status.ReadyNodes kcp.Status.UnavailableReplicas = replicas - status.ReadyNodes + if status.HasK3sServingSecret { + kcp.Status.Initialized = true + } + if kcp.Status.ReadyReplicas > 0 { kcp.Status.Ready = true - kcp.Status.Initialized = true conditions.MarkTrue(kcp, controlplanev1.AvailableCondition) } diff --git a/pkg/k3s/workload_cluster.go b/pkg/k3s/workload_cluster.go index 804e938d..9ee162e3 100644 --- a/pkg/k3s/workload_cluster.go +++ b/pkg/k3s/workload_cluster.go @@ -25,6 +25,7 @@ import ( "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" controlplanev1 "github.com/k3s-io/cluster-api-k3s/controlplane/api/v1beta2" "github.com/k3s-io/cluster-api-k3s/pkg/etcd" @@ -34,6 +35,7 @@ import ( const ( kubeProxyKey = "kube-proxy" labelNodeRoleControlPlane = "node-role.kubernetes.io/master" + k3sServingSecretKey = "k3s-serving" ) var ( @@ -74,6 +76,8 @@ type ClusterStatus struct { Nodes int32 // ReadyNodes are the count of nodes that are reporting ready ReadyNodes int32 + // HasK3sServingSecret will be true if the k3s-serving secret has been uploaded, false otherwise. + HasK3sServingSecret bool } func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) { @@ -105,6 +109,28 @@ func (w *Workload) ClusterStatus(ctx context.Context) (ClusterStatus, error) { } } + // Get the 'k3s-serving' secret in the 'kube-system' namespace. + // + // The resource we fetch has no particular importance, + // this is just to verify that the Control Plane has been initialized, + // by fetching any resource that has been uploaded. + // Since the `k3s-serving` secret contains the cluster certificate, + // this secret is guaranteed to exist in any k3s deployment, + // therefore it can be reliably used for this test. + key := ctrlclient.ObjectKey{ + Name: k3sServingSecretKey, + Namespace: metav1.NamespaceSystem, + } + + err = w.Client.Get(ctx, key, &corev1.Secret{}) + // In case of error we do assume the control plane is not initialized yet. + if err != nil { + logger := log.FromContext(ctx) + logger.Info("Control Plane does not seem to be initialized yet.", "reason", err.Error()) + } + + status.HasK3sServingSecret = err == nil + return status, nil } diff --git a/pkg/k3s/workload_cluster_test.go b/pkg/k3s/workload_cluster_test.go new file mode 100644 index 00000000..5e3a5dd0 --- /dev/null +++ b/pkg/k3s/workload_cluster_test.go @@ -0,0 +1,81 @@ +package k3s + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestClusterStatus(t *testing.T) { + node1 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + Labels: map[string]string{ + labelNodeRoleControlPlane: "true", + }, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{{ + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }}, + }, + } + node2 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + Labels: map[string]string{ + labelNodeRoleControlPlane: "true", + }, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{{ + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + }}, + }, + } + servingSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: k3sServingSecretKey, + Namespace: metav1.NamespaceSystem, + }, + } + tests := []struct { + name string + objs []client.Object + expectErr bool + expectHasSecret bool + }{ + { + name: "returns cluster status", + objs: []client.Object{node1, node2}, + expectHasSecret: false, + }, + { + name: "returns cluster status with k3s-serving secret", + objs: []client.Object{node1, node2, servingSecret}, + expectHasSecret: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() + w := &Workload{ + Client: fakeClient, + } + status, err := w.ClusterStatus(context.TODO()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(status.Nodes).To(BeEquivalentTo(2)) + g.Expect(status.ReadyNodes).To(BeEquivalentTo(1)) + g.Expect(status.HasK3sServingSecret).To(Equal(tt.expectHasSecret)) + }) + } +}