diff --git a/controllers/toolchainclustercache/healthchecker.go b/controllers/toolchaincluster/healthchecker.go similarity index 64% rename from controllers/toolchainclustercache/healthchecker.go rename to controllers/toolchaincluster/healthchecker.go index bd1dab3c..01d2ead9 100644 --- a/controllers/toolchainclustercache/healthchecker.go +++ b/controllers/toolchaincluster/healthchecker.go @@ -1,26 +1,18 @@ -package toolchainclustercache +package toolchaincluster import ( "context" - "fmt" "strings" - "time" toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" - "github.com/codeready-toolchain/toolchain-common/pkg/cluster" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" kubeclientset "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/manager" ) -var logger = logf.Log.WithName("toolchaincluster_healthcheck") - const ( healthzOk = "/healthz responded with ok" healthzNotOk = "/healthz responded without ok" @@ -28,13 +20,6 @@ const ( clusterReachableMsg = "cluster is reachable" ) -func StartHealthChecks(ctx context.Context, mgr manager.Manager, namespace string, period time.Duration) { - logger.Info("starting health checks", "period", period) - go wait.Until(func() { - updateClusterStatuses(ctx, namespace, mgr.GetClient()) - }, period, ctx.Done()) -} - type HealthChecker struct { localClusterClient client.Client remoteClusterClient client.Client @@ -42,51 +27,6 @@ type HealthChecker struct { logger logr.Logger } -// updateClusterStatuses checks cluster health and updates status of all ToolchainClusters -func updateClusterStatuses(ctx context.Context, namespace string, cl client.Client) { - clusters := &toolchainv1alpha1.ToolchainClusterList{} - err := cl.List(ctx, clusters, client.InNamespace(namespace)) - if err != nil { - logger.Error(err, "unable to list existing ToolchainClusters") - return - } - if len(clusters.Items) == 0 { - logger.Info("no ToolchainCluster found") - } - - for _, obj := range clusters.Items { - clusterObj := obj.DeepCopy() - clusterLogger := logger.WithValues("cluster-name", clusterObj.Name) - - cachedCluster, ok := cluster.GetCachedToolchainCluster(clusterObj.Name) - if !ok { - clusterLogger.Error(fmt.Errorf("cluster %s not found in cache", clusterObj.Name), "failed to retrieve stored data for cluster") - clusterObj.Status.Conditions = []toolchainv1alpha1.ToolchainClusterCondition{clusterOfflineCondition()} - if err := cl.Status().Update(ctx, clusterObj); err != nil { - clusterLogger.Error(err, "failed to update the status of ToolchainCluster") - } - continue - } - - clientSet, err := kubeclientset.NewForConfig(cachedCluster.RestConfig) - if err != nil { - clusterLogger.Error(err, "cannot create ClientSet for a ToolchainCluster") - continue - } - - healthChecker := &HealthChecker{ - localClusterClient: cl, - remoteClusterClient: cachedCluster.Client, - remoteClusterClientset: clientSet, - logger: clusterLogger, - } - // clusterLogger.Info("getting the current state of ToolchainCluster") - if err := healthChecker.updateIndividualClusterStatus(ctx, clusterObj); err != nil { - clusterLogger.Error(err, "unable to update cluster status of ToolchainCluster") - } - } -} - func (hc *HealthChecker) updateIndividualClusterStatus(ctx context.Context, toolchainCluster *toolchainv1alpha1.ToolchainCluster) error { currentClusterStatus := hc.getClusterHealthStatus(ctx) diff --git a/controllers/toolchaincluster/healthchecker_test.go b/controllers/toolchaincluster/healthchecker_test.go new file mode 100644 index 00000000..773bb4b3 --- /dev/null +++ b/controllers/toolchaincluster/healthchecker_test.go @@ -0,0 +1,177 @@ +package toolchaincluster + +import ( + "context" + "testing" + + toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" + "github.com/codeready-toolchain/toolchain-common/pkg/cluster" + "github.com/codeready-toolchain/toolchain-common/pkg/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/h2non/gock.v1" + corev1 "k8s.io/api/core/v1" + kubeclientset "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var logger = logf.Log.WithName("toolchaincluster_healthcheck") + +func TestClusterHealthChecks(t *testing.T) { + + // given + defer gock.Off() + tcNs := "test-namespace" + gock.New("http://cluster.com"). + Get("healthz"). + Persist(). + Reply(200). + BodyString("ok") + gock.New("http://unstable.com"). + Get("healthz"). + Persist(). + Reply(200). + BodyString("unstable") + gock.New("http://not-found.com"). + Get("healthz"). + Persist(). + Reply(404) + + tests := map[string]struct { + tctype string + apiendpoint string + clusterconditions []toolchainv1alpha1.ToolchainClusterCondition + status toolchainv1alpha1.ToolchainClusterStatus + }{ + //ToolchainCluster.status doesn't contain any conditions + "UnstableNoCondition": { + tctype: "unstable", + apiendpoint: "http://unstable.com", + clusterconditions: []toolchainv1alpha1.ToolchainClusterCondition{unhealthy(), notOffline()}, + status: toolchainv1alpha1.ToolchainClusterStatus{}, + }, + "StableNoCondition": { + tctype: "stable", + apiendpoint: "http://cluster.com", + clusterconditions: []toolchainv1alpha1.ToolchainClusterCondition{healthy()}, + status: toolchainv1alpha1.ToolchainClusterStatus{}, + }, + "NotFoundNoCondition": { + tctype: "not-found", + apiendpoint: "http://not-found.com", + clusterconditions: []toolchainv1alpha1.ToolchainClusterCondition{offline()}, + status: toolchainv1alpha1.ToolchainClusterStatus{}, + }, + //ToolchainCluster.status already contains conditions + "UnstableContainsCondition": { + tctype: "unstable", + apiendpoint: "http://unstable.com", + clusterconditions: []toolchainv1alpha1.ToolchainClusterCondition{unhealthy(), notOffline()}, + status: withStatus(healthy()), + }, + "StableContainsCondition": { + tctype: "stable", + apiendpoint: "http://cluster.com", + clusterconditions: []toolchainv1alpha1.ToolchainClusterCondition{healthy()}, + status: withStatus(offline()), + }, + "NotFoundContainsCondition": { + tctype: "not-found", + apiendpoint: "http://not-found.com", + clusterconditions: []toolchainv1alpha1.ToolchainClusterCondition{offline()}, + status: withStatus(healthy()), + }, + //if the connection cannot be established at beginning, then it should be offline + "OfflineConnectionNotEstablished": { + tctype: "failing", + apiendpoint: "http://failing.com", + clusterconditions: []toolchainv1alpha1.ToolchainClusterCondition{offline()}, + status: toolchainv1alpha1.ToolchainClusterStatus{}, + }, + //if no zones nor region is retrieved, then keep the current + "NoZoneKeepCurrent": { + tctype: "stable", + apiendpoint: "http://cluster.com", + clusterconditions: []toolchainv1alpha1.ToolchainClusterCondition{healthy()}, + status: withStatus(offline()), + }, + } + for k, tc := range tests { + t.Run(k, func(t *testing.T) { + tctype, sec := newToolchainCluster(tc.tctype, tcNs, tc.apiendpoint, tc.status) + cl := test.NewFakeClient(t, tctype, sec) + reset := setupCachedClusters(t, cl, tctype) + defer reset() + cachedtc, found := cluster.GetCachedToolchainCluster(tctype.Name) + require.True(t, found) + cacheclient, err := kubeclientset.NewForConfig(cachedtc.RestConfig) + require.NoError(t, err) + healthChecker := &HealthChecker{ + localClusterClient: cl, + remoteClusterClient: cachedtc.Client, + remoteClusterClientset: cacheclient, + logger: logger, + } + // when + err = healthChecker.updateIndividualClusterStatus(context.TODO(), tctype) + + //then + require.NoError(t, err) + assertClusterStatus(t, cl, tc.tctype, tc.clusterconditions...) + }) + } +} + +func withStatus(conditions ...toolchainv1alpha1.ToolchainClusterCondition) toolchainv1alpha1.ToolchainClusterStatus { + return toolchainv1alpha1.ToolchainClusterStatus{ + Conditions: conditions, + } +} +func assertClusterStatus(t *testing.T, cl client.Client, clusterName string, clusterConds ...toolchainv1alpha1.ToolchainClusterCondition) { + tc := &toolchainv1alpha1.ToolchainCluster{} + err := cl.Get(context.TODO(), test.NamespacedName("test-namespace", clusterName), tc) + require.NoError(t, err) + assert.Len(t, tc.Status.Conditions, len(clusterConds)) +ExpConditions: + for _, expCond := range clusterConds { + for _, cond := range tc.Status.Conditions { + if expCond.Type == cond.Type { + assert.Equal(t, expCond.Status, cond.Status) + assert.Equal(t, expCond.Reason, cond.Reason) + assert.Equal(t, expCond.Message, cond.Message) + continue ExpConditions + } + } + assert.Failf(t, "condition not found", "the list of conditions %v doesn't contain the expected condition %v", tc.Status.Conditions, expCond) + } +} +func healthy() toolchainv1alpha1.ToolchainClusterCondition { + return toolchainv1alpha1.ToolchainClusterCondition{ + Type: toolchainv1alpha1.ToolchainClusterReady, + Status: corev1.ConditionTrue, + Reason: "ClusterReady", + Message: "/healthz responded with ok", + } +} +func unhealthy() toolchainv1alpha1.ToolchainClusterCondition { + return toolchainv1alpha1.ToolchainClusterCondition{Type: toolchainv1alpha1.ToolchainClusterReady, + Status: corev1.ConditionFalse, + Reason: "ClusterNotReady", + Message: "/healthz responded without ok", + } +} +func offline() toolchainv1alpha1.ToolchainClusterCondition { + return toolchainv1alpha1.ToolchainClusterCondition{Type: toolchainv1alpha1.ToolchainClusterOffline, + Status: corev1.ConditionTrue, + Reason: "ClusterNotReachable", + Message: "cluster is not reachable", + } +} +func notOffline() toolchainv1alpha1.ToolchainClusterCondition { + return toolchainv1alpha1.ToolchainClusterCondition{Type: toolchainv1alpha1.ToolchainClusterOffline, + Status: corev1.ConditionFalse, + Reason: "ClusterReachable", + Message: "cluster is reachable", + } +} diff --git a/controllers/toolchaincluster/toolchaincluster_controller.go b/controllers/toolchaincluster/toolchaincluster_controller.go new file mode 100644 index 00000000..fb564cc1 --- /dev/null +++ b/controllers/toolchaincluster/toolchaincluster_controller.go @@ -0,0 +1,82 @@ +package toolchaincluster + +import ( + "context" + "fmt" + "time" + + toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" + "github.com/codeready-toolchain/toolchain-common/pkg/cluster" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + kubeclientset "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// Reconciler reconciles a ToolchainCluster object +type Reconciler struct { + Client client.Client + Scheme *runtime.Scheme + RequeAfter time.Duration +} + +// SetupWithManager sets up the controller with the Manager. +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&toolchainv1alpha1.ToolchainCluster{}). + Complete(r) +} + +// Reconcile reads that state of the cluster for a ToolchainCluster object and makes changes based on the state read +// and what is in the ToolchainCluster.Spec. It updates the status of the individual cluster +// Note: +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { + reqLogger := log.FromContext(ctx) + reqLogger.Info("Reconciling ToolchainCluster") + + // Fetch the ToolchainCluster instance + toolchainCluster := &toolchainv1alpha1.ToolchainCluster{} + err := r.Client.Get(ctx, request.NamespacedName, toolchainCluster) + if err != nil { + if errors.IsNotFound(err) { + // Stop monitoring the toolchain cluster as it is deleted + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + cachedCluster, ok := cluster.GetCachedToolchainCluster(toolchainCluster.Name) + if !ok { + err := fmt.Errorf("cluster %s not found in cache", toolchainCluster.Name) + toolchainCluster.Status.Conditions = []toolchainv1alpha1.ToolchainClusterCondition{clusterOfflineCondition()} + if err := r.Client.Status().Update(ctx, toolchainCluster); err != nil { + reqLogger.Error(err, "failed to update the status of ToolchainCluster") + } + return reconcile.Result{}, err + } + + clientSet, err := kubeclientset.NewForConfig(cachedCluster.RestConfig) + if err != nil { + reqLogger.Error(err, "cannot create ClientSet for the ToolchainCluster") + return reconcile.Result{}, err + } + healthChecker := &HealthChecker{ + localClusterClient: r.Client, + remoteClusterClient: cachedCluster.Client, + remoteClusterClientset: clientSet, + logger: reqLogger, + } + //update the status of the individual cluster. + if err := healthChecker.updateIndividualClusterStatus(ctx, toolchainCluster); err != nil { + reqLogger.Error(err, "unable to update cluster status of ToolchainCluster") + return reconcile.Result{}, err + } + + return reconcile.Result{RequeueAfter: r.RequeAfter}, nil +} diff --git a/controllers/toolchaincluster/toolchaincluster_controller_test.go b/controllers/toolchaincluster/toolchaincluster_controller_test.go new file mode 100644 index 00000000..22727789 --- /dev/null +++ b/controllers/toolchaincluster/toolchaincluster_controller_test.go @@ -0,0 +1,164 @@ +package toolchaincluster + +import ( + "context" + "fmt" + "testing" + "time" + + toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" + "github.com/codeready-toolchain/toolchain-common/pkg/cluster" + "github.com/codeready-toolchain/toolchain-common/pkg/test" + "github.com/stretchr/testify/require" + "gopkg.in/h2non/gock.v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var requeAfter = 10 * time.Second + +func TestClusterControllerChecks(t *testing.T) { + // given + + defer gock.Off() + tcNs := "test-namespace" + gock.New("http://cluster.com"). + Get("healthz"). + Persist(). + Reply(200). + BodyString("ok") + gock.New("http://unstable.com"). + Get("healthz"). + Persist(). + Reply(200). + BodyString("unstable") + gock.New("http://not-found.com"). + Get("healthz"). + Persist(). + Reply(404) + + t.Run("ToolchainCluster not found", func(t *testing.T) { + // given + NotFound, sec := newToolchainCluster("notfound", tcNs, "http://not-found.com", toolchainv1alpha1.ToolchainClusterStatus{}) + + cl := test.NewFakeClient(t, sec) + reset := setupCachedClusters(t, cl, NotFound) + defer reset() + controller, req := prepareReconcile(NotFound, cl, requeAfter) + + // when + recresult, err := controller.Reconcile(context.TODO(), req) + + // then + require.Equal(t, err, nil) + require.Equal(t, reconcile.Result{Requeue: false, RequeueAfter: 0}, recresult) + + }) + + t.Run("Error while getting ToolchainCluster", func(t *testing.T) { + // given + tc, sec := newToolchainCluster("tc", tcNs, "http://tc.com", toolchainv1alpha1.ToolchainClusterStatus{}) + + cl := test.NewFakeClient(t, sec) + + cl.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { + if _, ok := obj.(*toolchainv1alpha1.ToolchainCluster); ok { + return fmt.Errorf("mock error") + } + return cl.Client.Get(ctx, key, obj, opts...) + } + + controller, req := prepareReconcile(tc, cl, requeAfter) + + // when + recresult, err := controller.Reconcile(context.TODO(), req) + + // then + require.EqualError(t, err, "mock error") + require.Equal(t, reconcile.Result{Requeue: false, RequeueAfter: 0}, recresult) + + }) + + t.Run("reconcile successful and requeued", func(t *testing.T) { + // given + stable, sec := newToolchainCluster("stable", tcNs, "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{}) + + cl := test.NewFakeClient(t, stable, sec) + reset := setupCachedClusters(t, cl, stable) + defer reset() + controller, req := prepareReconcile(stable, cl, requeAfter) + + // when + recresult, err := controller.Reconcile(context.TODO(), req) + + // then + require.Equal(t, err, nil) + require.Equal(t, reconcile.Result{RequeueAfter: requeAfter}, recresult) + assertClusterStatus(t, cl, "stable", healthy()) + + }) + + t.Run("toolchain cluster cache not found", func(t *testing.T) { + // given + stable, _ := newToolchainCluster("stable", tcNs, "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{}) + + cl := test.NewFakeClient(t, stable) + + controller, req := prepareReconcile(stable, cl, requeAfter) + + // when + _, err := controller.Reconcile(context.TODO(), req) + + // then + require.EqualError(t, err, "cluster stable not found in cache") + actualtoolchaincluster := &toolchainv1alpha1.ToolchainCluster{} + err = cl.Client.Get(context.TODO(), types.NamespacedName{Name: "stable", Namespace: tcNs}, actualtoolchaincluster) + require.NoError(t, err) + assertClusterStatus(t, cl, "stable", offline()) + + }) + +} + +func setupCachedClusters(t *testing.T, cl *test.FakeClient, clusters ...*toolchainv1alpha1.ToolchainCluster) func() { + service := cluster.NewToolchainClusterServiceWithClient(cl, logf.Log, test.MemberOperatorNs, 0, func(config *rest.Config, options client.Options) (client.Client, error) { + // make sure that insecure is false to make Gock mocking working properly + config.Insecure = false + return client.New(config, options) + }) + for _, clustr := range clusters { + err := service.AddOrUpdateToolchainCluster(clustr) + require.NoError(t, err) + tc, found := cluster.GetCachedToolchainCluster(clustr.Name) + require.True(t, found) + tc.Client = test.NewFakeClient(t) + } + return func() { + for _, clustr := range clusters { + service.DeleteToolchainCluster(clustr.Name) + } + } +} + +func newToolchainCluster(name, tcNs string, apiEndpoint string, status toolchainv1alpha1.ToolchainClusterStatus) (*toolchainv1alpha1.ToolchainCluster, *corev1.Secret) { + toolchainCluster, secret := test.NewToolchainClusterWithEndpoint(name, tcNs, "secret", apiEndpoint, status, map[string]string{"namespace": "test-namespace"}) + return toolchainCluster, secret +} + +func prepareReconcile(toolchainCluster *toolchainv1alpha1.ToolchainCluster, cl *test.FakeClient, requeAfter time.Duration) (Reconciler, reconcile.Request) { + controller := Reconciler{ + Client: cl, + Scheme: scheme.Scheme, + RequeAfter: requeAfter, + } + req := reconcile.Request{ + NamespacedName: test.NamespacedName(toolchainCluster.Namespace, toolchainCluster.Name), + } + return controller, req +} diff --git a/controllers/toolchainclustercache/healthchecker_test.go b/controllers/toolchainclustercache/healthchecker_test.go deleted file mode 100644 index 422e19db..00000000 --- a/controllers/toolchainclustercache/healthchecker_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package toolchainclustercache - -import ( - "context" - "testing" - - toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" - "github.com/codeready-toolchain/toolchain-common/pkg/cluster" - "github.com/codeready-toolchain/toolchain-common/pkg/test" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gopkg.in/h2non/gock.v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" -) - -func TestClusterHealthChecks(t *testing.T) { - // given - defer gock.Off() - tcNs := "test-namespace" - gock.New("http://cluster.com"). - Get("healthz"). - Persist(). - Reply(200). - BodyString("ok") - gock.New("http://unstable.com"). - Get("healthz"). - Persist(). - Reply(200). - BodyString("unstable") - gock.New("http://not-found.com"). - Get("healthz"). - Persist(). - Reply(404) - - t.Run("ToolchainCluster.status doesn't contain any conditions", func(t *testing.T) { - unstable, sec := newToolchainCluster("unstable", tcNs, "http://unstable.com", toolchainv1alpha1.ToolchainClusterStatus{}) - notFound, _ := newToolchainCluster("not-found", tcNs, "http://not-found.com", toolchainv1alpha1.ToolchainClusterStatus{}) - stable, _ := newToolchainCluster("stable", tcNs, "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{}) - - cl := test.NewFakeClient(t, unstable, notFound, stable, sec) - reset := setupCachedClusters(t, cl, unstable, notFound, stable) - defer reset() - - // when - updateClusterStatuses(context.TODO(), "test-namespace", cl) - - // then - assertClusterStatus(t, cl, "unstable", notOffline(), unhealthy()) - assertClusterStatus(t, cl, "not-found", offline()) - assertClusterStatus(t, cl, "stable", healthy()) - }) - - t.Run("ToolchainCluster.status already contains conditions", func(t *testing.T) { - unstable, sec := newToolchainCluster("unstable", tcNs, "http://unstable.com", withStatus(healthy())) - notFound, _ := newToolchainCluster("not-found", tcNs, "http://not-found.com", withStatus(notOffline(), unhealthy())) - stable, _ := newToolchainCluster("stable", tcNs, "http://cluster.com", withStatus(offline())) - cl := test.NewFakeClient(t, unstable, notFound, stable, sec) - resetCache := setupCachedClusters(t, cl, unstable, notFound, stable) - defer resetCache() - - // when - updateClusterStatuses(context.TODO(), "test-namespace", cl) - - // then - assertClusterStatus(t, cl, "unstable", notOffline(), unhealthy()) - assertClusterStatus(t, cl, "not-found", offline()) - assertClusterStatus(t, cl, "stable", healthy()) - }) - - t.Run("if no zones nor region is retrieved, then keep the current", func(t *testing.T) { - stable, sec := newToolchainCluster("stable", tcNs, "http://cluster.com", withStatus(offline())) - cl := test.NewFakeClient(t, stable, sec) - resetCache := setupCachedClusters(t, cl, stable) - defer resetCache() - - // when - updateClusterStatuses(context.TODO(), "test-namespace", cl) - - // then - assertClusterStatus(t, cl, "stable", healthy()) - }) - - t.Run("if the connection cannot be established at beginning, then it should be offline", func(t *testing.T) { - stable, sec := newToolchainCluster("failing", tcNs, "http://failing.com", toolchainv1alpha1.ToolchainClusterStatus{}) - - cl := test.NewFakeClient(t, stable, sec) - - // when - updateClusterStatuses(context.TODO(), "test-namespace", cl) - - // then - assertClusterStatus(t, cl, "failing", offline()) - }) -} - -func setupCachedClusters(t *testing.T, cl *test.FakeClient, clusters ...*toolchainv1alpha1.ToolchainCluster) func() { - service := cluster.NewToolchainClusterServiceWithClient(cl, logf.Log, test.MemberOperatorNs, 0, func(config *rest.Config, options client.Options) (client.Client, error) { - // make sure that insecure is false to make Gock mocking working properly - config.Insecure = false - return client.New(config, options) - }) - for _, clustr := range clusters { - err := service.AddOrUpdateToolchainCluster(clustr) - require.NoError(t, err) - tc, found := cluster.GetCachedToolchainCluster(clustr.Name) - require.True(t, found) - tc.Client = test.NewFakeClient(t) - } - return func() { - for _, clustr := range clusters { - service.DeleteToolchainCluster(clustr.Name) - } - } -} - -func withStatus(conditions ...toolchainv1alpha1.ToolchainClusterCondition) toolchainv1alpha1.ToolchainClusterStatus { - return toolchainv1alpha1.ToolchainClusterStatus{ - Conditions: conditions, - } -} - -func newToolchainCluster(name, tcNs string, apiEndpoint string, status toolchainv1alpha1.ToolchainClusterStatus) (*toolchainv1alpha1.ToolchainCluster, *corev1.Secret) { - toolchainCluster, secret := test.NewToolchainClusterWithEndpoint(name, tcNs, "secret", apiEndpoint, status, map[string]string{"namespace": "test-namespace"}) - return toolchainCluster, secret -} - -func assertClusterStatus(t *testing.T, cl client.Client, clusterName string, clusterConds ...toolchainv1alpha1.ToolchainClusterCondition) { - tc := &toolchainv1alpha1.ToolchainCluster{} - err := cl.Get(context.TODO(), test.NamespacedName("test-namespace", clusterName), tc) - require.NoError(t, err) - assert.Len(t, tc.Status.Conditions, len(clusterConds)) -ExpConditions: - for _, expCond := range clusterConds { - for _, cond := range tc.Status.Conditions { - if expCond.Type == cond.Type { - assert.Equal(t, expCond.Status, cond.Status) - assert.Equal(t, expCond.Reason, cond.Reason) - assert.Equal(t, expCond.Message, cond.Message) - continue ExpConditions - } - } - assert.Failf(t, "condition not found", "the list of conditions %v doesn't contain the expected condition %v", tc.Status.Conditions, expCond) - } -} - -func healthy() toolchainv1alpha1.ToolchainClusterCondition { - return toolchainv1alpha1.ToolchainClusterCondition{ - Type: toolchainv1alpha1.ToolchainClusterReady, - Status: corev1.ConditionTrue, - Reason: "ClusterReady", - Message: "/healthz responded with ok", - } -} -func unhealthy() toolchainv1alpha1.ToolchainClusterCondition { - return toolchainv1alpha1.ToolchainClusterCondition{Type: toolchainv1alpha1.ToolchainClusterReady, - Status: corev1.ConditionFalse, - Reason: "ClusterNotReady", - Message: "/healthz responded without ok", - } -} -func offline() toolchainv1alpha1.ToolchainClusterCondition { - return toolchainv1alpha1.ToolchainClusterCondition{Type: toolchainv1alpha1.ToolchainClusterOffline, - Status: corev1.ConditionTrue, - Reason: "ClusterNotReachable", - Message: "cluster is not reachable", - } -} -func notOffline() toolchainv1alpha1.ToolchainClusterCondition { - return toolchainv1alpha1.ToolchainClusterCondition{Type: toolchainv1alpha1.ToolchainClusterOffline, - Status: corev1.ConditionFalse, - Reason: "ClusterReachable", - Message: "cluster is reachable", - } -}