diff --git a/pkg/k8s/k8s.go b/pkg/k8s/k8s.go index 78102753..ee917cc5 100644 --- a/pkg/k8s/k8s.go +++ b/pkg/k8s/k8s.go @@ -3,18 +3,12 @@ package k8s import ( "context" - "os" - "path/filepath" - "regexp" - "strings" "time" "github.com/sirupsen/logrus" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" ) const ( @@ -29,8 +23,8 @@ const ( ) type Client struct { - clientset *kubernetes.Clientset - discoveryClient *discovery.DiscoveryClient + clientset kubernetes.Interface + discoveryClient discovery.DiscoveryInterface dynamicClient dynamic.Interface namespace string } @@ -75,7 +69,21 @@ func New(ctx context.Context, namespace string) (*Client, error) { return kc, nil } -func (c *Client) Clientset() *kubernetes.Clientset { +func NewCustom( + cs kubernetes.Interface, + dc discovery.DiscoveryInterface, + dC dynamic.Interface, + namespace string, +) *Client { + return &Client{ + clientset: cs, + discoveryClient: dc, + dynamicClient: dC, + namespace: namespace, + } +} + +func (c *Client) Clientset() kubernetes.Interface { return c.clientset } @@ -87,48 +95,6 @@ func (c *Client) Namespace() string { return c.namespace } -// isClusterEnvironment checks if the program is running in a Kubernetes cluster. -func isClusterEnvironment() bool { - return fileExists(tokenPath) && fileExists(certPath) -} - -func fileExists(path string) bool { - _, err := os.Stat(path) - return err == nil -} - -// getClusterConfig returns the appropriate Kubernetes cluster configuration. -func getClusterConfig() (*rest.Config, error) { - if isClusterEnvironment() { - return rest.InClusterConfig() - } - - // build the configuration from the kubeconfig file - kubeconfig := filepath.Join(os.Getenv("HOME"), ".kube", "config") - return clientcmd.BuildConfigFromFlags("", kubeconfig) -} - -// precompile the regular expression to avoid recompiling it on every function call -var invalidCharsRegexp = regexp.MustCompile(`[^a-z0-9-]+`) - -// SanitizeName ensures compliance with Kubernetes DNS-1123 subdomain names. It: -// 1. Converts the input string to lowercase. -// 2. Replaces underscores and any non-DNS-1123 compliant characters with hyphens. -// 3. Trims leading and trailing hyphens. -// 4. Ensures the name does not exceed 63 characters, trimming excess characters if necessary -// and ensuring it does not end with a hyphen after trimming. -// -// Use this function to sanitize strings to be used as Kubernetes names for resources. -func SanitizeName(name string) string { - sanitized := strings.ToLower(name) - // Replace underscores and any other disallowed characters with hyphens - sanitized = invalidCharsRegexp.ReplaceAllString(sanitized, "-") - // Trim leading and trailing hyphens - sanitized = strings.Trim(sanitized, "-") - if len(sanitized) > 63 { - sanitized = sanitized[:63] - // Ensure it does not end with a hyphen after cutting it to the max length - sanitized = strings.TrimRight(sanitized, "-") - } - return sanitized +func (c *Client) DiscoveryClient() discovery.DiscoveryInterface { + return c.discoveryClient } diff --git a/pkg/k8s/k8s_configmap_test.go b/pkg/k8s/k8s_configmap_test.go new file mode 100644 index 00000000..0df2d217 --- /dev/null +++ b/pkg/k8s/k8s_configmap_test.go @@ -0,0 +1,271 @@ +package k8s_test + +import ( + "context" + "errors" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestGetConfigMap() { + tests := []struct { + name string + configMapName string + setupMock func(*fake.Clientset) + expectedErr error + expectedCM *v1.ConfigMap + }{ + { + name: "successful retrieval", + configMapName: "test-configmap", + setupMock: func(clientset *fake.Clientset) { + err := createConfigMap(clientset, "test-configmap", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedErr: nil, + expectedCM: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-configmap", + Namespace: suite.namespace, + }, + }, + }, + { + name: "configmap not found", + configMapName: "non-existent-configmap", + setupMock: func(clientset *fake.Clientset) { + // No setup needed for this case + }, + expectedErr: k8s.ErrGettingConfigmap.WithParams("non-existent-configmap"). + Wrap(errors.New("configmaps \"non-existent-configmap\" not found")), + expectedCM: nil, + }, + { + name: "client error", + configMapName: "error-configmap", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "configmaps", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrGettingConfigmap.WithParams("error-configmap"). + Wrap(errors.New("internal server error")), + expectedCM: nil, + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + cm, err := suite.client.GetConfigMap(context.Background(), tt.configMapName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.EqualValues(suite.T(), tt.expectedCM, cm) + }) + } +} + +func (suite *TestSuite) TestConfigMapExists() { + tests := []struct { + name string + configMapName string + setupMock func(*fake.Clientset) + expectedExist bool + expectedErr error + }{ + { + name: "configmap exists", + configMapName: "existing-configmap", + setupMock: func(clientset *fake.Clientset) { + err := createConfigMap(clientset, "existing-configmap", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedExist: true, + expectedErr: nil, + }, + { + name: "configmap does not exist", + configMapName: "non-existent-configmap", + setupMock: func(clientset *fake.Clientset) {}, + expectedExist: false, + expectedErr: nil, + }, + { + name: "client error", + configMapName: "error-configmap", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "configmaps", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedExist: false, + expectedErr: k8s.ErrGettingConfigmap.WithParams("error-configmap"). + Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + exists, err := suite.client.ConfigMapExists(context.Background(), tt.configMapName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.expectedExist, exists) + }) + } +} + +func (suite *TestSuite) TestCreateConfigMap() { + tests := []struct { + name string + configMap *v1.ConfigMap + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + configMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-configmap", + Namespace: suite.namespace, + }, + Data: map[string]string{"key": "value"}, + }, + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "configmap already exists", + configMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-configmap", + Namespace: suite.namespace, + }, + }, + setupMock: func(clientset *fake.Clientset) { + err := createConfigMap(clientset, "existing-configmap", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedErr: k8s.ErrConfigmapAlreadyExists.WithParams("existing-configmap"). + Wrap(errors.New("configmap already exists")), + }, + { + name: "client error", + configMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "error-configmap", + Namespace: suite.namespace, + }, + }, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("create", "configmaps", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrCreatingConfigmap.WithParams("error-configmap"). + Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + cm, err := suite.client.CreateConfigMap(context.Background(), tt.configMap.Name, tt.configMap.Labels, tt.configMap.Data) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.EqualValues(suite.T(), tt.configMap, cm) + }) + } +} + +func (suite *TestSuite) TestDeleteConfigMap() { + tests := []struct { + name string + configMapName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + configMapName: "existing-configmap", + setupMock: func(clientset *fake.Clientset) { + err := createConfigMap(clientset, "existing-configmap", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedErr: nil, + }, + { + name: "configmap does not exist", + configMapName: "non-existent-configmap", + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: k8s.ErrConfigmapDoesNotExist.WithParams("non-existent-configmap"). + Wrap(errors.New("configmap does not exist")), + }, + { + name: "client error", + configMapName: "error-configmap", + setupMock: func(clientset *fake.Clientset) { + // if it does not exist, it return nil as error + // so we need to add it to the fake client to be able to pass the existence check + err := createConfigMap(clientset, "error-configmap", suite.namespace) + require.NoError(suite.T(), err) + + clientset.PrependReactor("delete", "configmaps", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingConfigmap.WithParams("error-configmap"). + Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteConfigMap(context.Background(), tt.configMapName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func createConfigMap(clientset *fake.Clientset, name, namespace string) error { + _, err := clientset.CoreV1().ConfigMaps(namespace).Create(context.Background(), &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }, metav1.CreateOptions{}) + return err +} diff --git a/pkg/k8s/k8s_custom_resource.go b/pkg/k8s/k8s_custom_resource.go index 6634d9eb..365a9e6f 100644 --- a/pkg/k8s/k8s_custom_resource.go +++ b/pkg/k8s/k8s_custom_resource.go @@ -30,7 +30,7 @@ func (c *Client) CreateCustomResource( }, } - if _, err := c.dynamicClient.Resource(*gvr).Namespace(c.namespace).Create(context.TODO(), resourceUnstructured, metav1.CreateOptions{}); err != nil { + if _, err := c.dynamicClient.Resource(*gvr).Namespace(c.namespace).Create(ctx, resourceUnstructured, metav1.CreateOptions{}); err != nil { return ErrCreatingCustomResource.WithParams(gvr.Resource).Wrap(err) } diff --git a/pkg/k8s/k8s_custom_resource_test.go b/pkg/k8s/k8s_custom_resource_test.go new file mode 100644 index 00000000..f5bfc746 --- /dev/null +++ b/pkg/k8s/k8s_custom_resource_test.go @@ -0,0 +1,150 @@ +package k8s_test + +import ( + "context" + "errors" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + discfake "k8s.io/client-go/discovery/fake" + dynfake "k8s.io/client-go/dynamic/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestCreateCustomResource() { + tests := []struct { + name string + resource *schema.GroupVersionResource + obj *map[string]interface{} + setupMock func(*dynfake.FakeDynamicClient) + expectedErr error + }{ + { + name: "successful creation", + resource: &schema.GroupVersionResource{ + Group: "example.com", + Version: "v1", + Resource: "examples", + }, + obj: &map[string]interface{}{ + "spec": map[string]interface{}{ + "key": "value", + }, + }, + setupMock: func(dynamicClient *dynfake.FakeDynamicClient) {}, + expectedErr: nil, + }, + { + name: "client error", + resource: &schema.GroupVersionResource{ + Group: "example.com", + Version: "v1", + Resource: "examples", + }, + obj: &map[string]interface{}{ + "spec": map[string]interface{}{ + "key": "value", + }, + }, + setupMock: func(dynamicClient *dynfake.FakeDynamicClient) { + dynamicClient.PrependReactor("create", "examples", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrCreatingCustomResource.WithParams("examples"). + Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.DynamicClient().(*dynfake.FakeDynamicClient)) + + err := suite.client.CreateCustomResource(context.Background(), "test-resource", tt.resource, tt.obj) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestCustomResourceDefinitionExists() { + tests := []struct { + name string + resource *schema.GroupVersionResource + setupMock func(*discfake.FakeDiscovery) + expectedExists bool + }{ + { + name: "resource definition exists", + resource: &schema.GroupVersionResource{ + Group: "example.com", + Version: "v1", + Resource: "example-kind", + }, + setupMock: func(discoveryClient *discfake.FakeDiscovery) { + discoveryClient.Resources = []*metav1.APIResourceList{ + { + GroupVersion: "example.com/v1", + APIResources: []metav1.APIResource{ + { + Name: "examples", + // must be equal to the kind in the resource.Resource definition + Kind: "example-kind", + }, + }, + }, + } + }, + expectedExists: true, + }, + { + name: "resource definition does not exist", + resource: &schema.GroupVersionResource{ + Group: "example.com", + Version: "v1", + Resource: "nonexistent", + }, + setupMock: func(discoveryClient *discfake.FakeDiscovery) { + discoveryClient.Resources = []*metav1.APIResourceList{ + { + GroupVersion: "example.com/v1", + APIResources: []metav1.APIResource{}, + }, + } + }, + expectedExists: false, + }, + { + name: "discovery client error", + resource: &schema.GroupVersionResource{ + Group: "example.com", + Version: "v1", + Resource: "examples", + }, + setupMock: func(discoveryClient *discfake.FakeDiscovery) { + discoveryClient.PrependReactor("get", "serverresourcesforgroupversion", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedExists: false, + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.DiscoveryClient().(*discfake.FakeDiscovery)) + + exists := suite.client.CustomResourceDefinitionExists(context.Background(), tt.resource) + assert.Equal(suite.T(), tt.expectedExists, exists) + }) + } +} diff --git a/pkg/k8s/k8s_daemonset_test.go b/pkg/k8s/k8s_daemonset_test.go new file mode 100644 index 00000000..25266aa8 --- /dev/null +++ b/pkg/k8s/k8s_daemonset_test.go @@ -0,0 +1,402 @@ +package k8s_test + +import ( + "context" + "errors" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestDaemonSetExists() { + tests := []struct { + name string + daemonSetName string + setupMock func(*fake.Clientset) + expectedExists bool + expectedErr error + }{ + { + name: "daemonset exists", + daemonSetName: "existing-daemonset", + setupMock: func(clientset *fake.Clientset) { + require.NoError(suite.T(), createDaemonSet(clientset, "existing-daemonset", suite.namespace)) + }, + expectedExists: true, + expectedErr: nil, + }, + { + name: "daemonset does not exist", + daemonSetName: "non-existent-daemonset", + setupMock: func(clientset *fake.Clientset) {}, + expectedExists: false, + expectedErr: nil, + }, + { + name: "client error", + daemonSetName: "error-daemonset", + setupMock: func(clientset *fake.Clientset) { + require.NoError(suite.T(), createDaemonSet(clientset, "error-daemonset", suite.namespace)) + clientset.PrependReactor("get", "daemonsets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedExists: false, + expectedErr: k8s.ErrGettingDaemonset.WithParams("error-daemonset"), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + exists, err := suite.client.DaemonSetExists(context.Background(), tt.daemonSetName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.expectedExists, exists) + }) + } +} + +func (suite *TestSuite) TestGetDaemonSet() { + tests := []struct { + name string + daemonSetName string + setupMock func(*fake.Clientset) + expectedErr error + expectedDS *appv1.DaemonSet + }{ + { + name: "successful retrieval", + daemonSetName: "test-daemonset", + setupMock: func(clientset *fake.Clientset) { + require.NoError(suite.T(), createDaemonSet(clientset, "test-daemonset", suite.namespace)) + }, + expectedErr: nil, + expectedDS: &appv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-daemonset", + Namespace: suite.namespace, + }, + }, + }, + { + name: "daemonset not found", + daemonSetName: "non-existent-daemonset", + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: k8s.ErrGettingDaemonset.Wrap(errors.New("daemonsets \"non-existent-daemonset\" not found")), + expectedDS: nil, + }, + { + name: "client error", + daemonSetName: "error-daemonset", + setupMock: func(clientset *fake.Clientset) { + require.NoError(suite.T(), createDaemonSet(clientset, "error-daemonset", suite.namespace)) + clientset.PrependReactor("get", "daemonsets", + func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrGettingDaemonset.WithParams("error-daemonset").Wrap(errors.New("internal server error")), + expectedDS: nil, + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + ds, err := suite.client.GetDaemonSet(context.Background(), tt.daemonSetName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.EqualValues(suite.T(), tt.expectedDS, ds) + }) + } +} + +func (suite *TestSuite) TestCreateDaemonSet() { + tests := []struct { + name string + daemonSetName string + labels map[string]string + initContainers []v1.Container + containers []v1.Container + setupMock func(*fake.Clientset) + expectedErr error + expectedDS *appv1.DaemonSet + }{ + { + name: "successful creation", + daemonSetName: "new-daemonset", + labels: map[string]string{"app": "test"}, + initContainers: []v1.Container{}, + containers: []v1.Container{ + { + Name: "container", + Image: "nginx", + }, + }, + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + expectedDS: &appv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-daemonset", + Namespace: suite.namespace, + Labels: map[string]string{"app": "test"}, + }, + Spec: appv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: v1.PodSpec{ + InitContainers: []v1.Container{}, + Containers: []v1.Container{ + { + Name: "container", + Image: "nginx", + }, + }, + }, + }, + }, + }, + }, + { + name: "client error", + daemonSetName: "error-daemonset", + labels: map[string]string{"app": "test"}, + initContainers: []v1.Container{}, + containers: []v1.Container{ + { + Name: "container", + Image: "nginx", + }, + }, + setupMock: func(clientset *fake.Clientset) { + require.NoError(suite.T(), createDaemonSet(clientset, "error-daemonset", suite.namespace)) + clientset.PrependReactor("create", "daemonsets", + func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrCreatingDaemonset.WithParams("error-daemonset").Wrap(errors.New("internal server error")), + expectedDS: nil, + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + ds, err := suite.client.CreateDaemonSet(context.Background(), tt.daemonSetName, tt.labels, tt.initContainers, tt.containers) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.EqualValues(suite.T(), tt.expectedDS, ds) + }) + } +} + +func (suite *TestSuite) TestUpdateDaemonSet() { + tests := []struct { + name string + daemonSetName string + labels map[string]string + initContainers []v1.Container + containers []v1.Container + setupMock func(*fake.Clientset) + expectedErr error + expectedDS *appv1.DaemonSet + }{ + { + name: "successful update", + daemonSetName: "existing-daemonset", + labels: map[string]string{"app": "test"}, + initContainers: []v1.Container{}, + containers: []v1.Container{ + { + Name: "container", + Image: "nginx", + }, + }, + setupMock: func(clientset *fake.Clientset) { + _, err := clientset.AppsV1().DaemonSets(suite.namespace).Create(context.Background(), &appv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-daemonset", + Namespace: suite.namespace, + Labels: map[string]string{"app": "test"}, + }, + Spec: appv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: v1.PodSpec{ + InitContainers: []v1.Container{}, + Containers: []v1.Container{ + { + Name: "container", + Image: "nginx", + }, + }, + }, + }, + }, + }, metav1.CreateOptions{}) + require.NoError(suite.T(), err) + }, + expectedErr: nil, + expectedDS: &appv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-daemonset", + Namespace: suite.namespace, + Labels: map[string]string{"app": "test"}, + }, + Spec: appv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: v1.PodSpec{ + InitContainers: []v1.Container{}, + Containers: []v1.Container{ + { + Name: "container", + Image: "nginx", + }, + }, + }, + }, + }, + }, + }, + { + name: "client error", + daemonSetName: "error-daemonset", + labels: map[string]string{"app": "test"}, + initContainers: []v1.Container{}, + containers: []v1.Container{ + { + Name: "container", + Image: "nginx", + }, + }, + setupMock: func(clientset *fake.Clientset) { + require.NoError(suite.T(), createDaemonSet(clientset, "error-daemonset", suite.namespace)) + clientset.PrependReactor("update", "daemonsets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrUpdatingDaemonset.WithParams("error-daemonset").Wrap(errors.New("internal server error")), + expectedDS: nil, + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + ds, err := suite.client.UpdateDaemonSet(context.Background(), tt.daemonSetName, tt.labels, tt.initContainers, tt.containers) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.EqualValues(suite.T(), tt.expectedDS, ds) + }) + } +} + +func (suite *TestSuite) TestDeleteDaemonSet() { + tests := []struct { + name string + daemonSetName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + daemonSetName: "existing-daemonset", + setupMock: func(clientset *fake.Clientset) { + require.NoError(suite.T(), createDaemonSet(clientset, "existing-daemonset", suite.namespace)) + }, + expectedErr: nil, + }, + { + name: "daemonset does not exist", + daemonSetName: "non-existent-daemonset", + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: k8s.ErrDeletingDaemonset.WithParams("non-existent-daemonset").Wrap(errors.New("daemonset does not exist")), + }, + { + name: "client error", + daemonSetName: "error-daemonset", + setupMock: func(clientset *fake.Clientset) { + require.NoError(suite.T(), createDaemonSet(clientset, "error-daemonset", suite.namespace)) + clientset.PrependReactor("delete", "daemonsets", + func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingDaemonset.WithParams("error-daemonset").Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteDaemonSet(context.Background(), tt.daemonSetName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func createDaemonSet(clientset *fake.Clientset, name, namespace string) error { + _, err := clientset.AppsV1().DaemonSets(namespace).Create(context.Background(), &appv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }, metav1.CreateOptions{}) + return err +} diff --git a/pkg/k8s/k8s_deployment_test.go b/pkg/k8s/k8s_deployment_test.go new file mode 100644 index 00000000..d0a2102e --- /dev/null +++ b/pkg/k8s/k8s_deployment_test.go @@ -0,0 +1,94 @@ +package k8s_test + +import ( + "context" + "errors" + "time" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestWaitForDeployment() { + tests := []struct { + name string + deploymentName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "deployment becomes ready", + deploymentName: "ready-deployment", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "deployments", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &appsv1.Deployment{ + Status: appsv1.DeploymentStatus{ + ReadyReplicas: 1, + }, + }, nil + }) + }, + expectedErr: nil, + }, + { + name: "deployment not found", + deploymentName: "non-existent-deployment", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "deployments", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("deployments \"non-existent-deployment\" not found") + }) + }, + expectedErr: k8s.ErrWaitingForDeployment.WithParams("non-existent-deployment"). + Wrap(errors.New("deployments \"non-existent-deployment\" not found")), + }, + { + name: "client error", + deploymentName: "error-deployment", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "deployments", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrWaitingForDeployment.WithParams("error-deployment"). + Wrap(errors.New("internal server error")), + }, + { + name: "context timeout", + deploymentName: "timeout-deployment", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "deployments", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &appsv1.Deployment{ + Status: appsv1.DeploymentStatus{ + ReadyReplicas: 0, + }, + }, nil + }) + }, + expectedErr: k8s.ErrWaitingForDeployment.WithParams("timeout-deployment").Wrap(context.DeadlineExceeded), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + suite.T().Parallel() + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() + + err := suite.client.WaitForDeployment(ctx, tt.deploymentName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} diff --git a/pkg/k8s/k8s_namespace_test.go b/pkg/k8s/k8s_namespace_test.go new file mode 100644 index 00000000..09210879 --- /dev/null +++ b/pkg/k8s/k8s_namespace_test.go @@ -0,0 +1,238 @@ +package k8s_test + +import ( + "context" + "errors" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestCreateNamespace() { + tests := []struct { + name string + namespace string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + namespace: "new-namespace", + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "namespace already exists", + namespace: "existing-namespace", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("create", "namespaces", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, nil + }) + }, + expectedErr: nil, + }, + { + name: "client error", + namespace: "error-namespace", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("create", "namespaces", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, k8s.ErrCreatingNamespace.WithParams("error-namespace"). + Wrap(errors.New("internal server error")) + }) + }, + expectedErr: k8s.ErrCreatingNamespace.WithParams("error-namespace"). + Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.CreateNamespace(context.Background(), tt.namespace) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestDeleteNamespace() { + tests := []struct { + name string + namespace string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + namespace: "existing-namespace", + setupMock: func(clientset *fake.Clientset) { + err := createNamespace(clientset, "existing-namespace") + require.NoError(suite.T(), err) + }, + expectedErr: nil, + }, + { + name: "namespace not found", + namespace: "non-existent-namespace", + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: k8s.ErrDeletingNamespace.WithParams("non-existent-namespace"). + Wrap(errors.New("namespaces \"non-existent-namespace\" not found")), + }, + { + name: "client error", + namespace: "error-namespace", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "namespaces", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingNamespace.WithParams("error-namespace"). + Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteNamespace(context.Background(), tt.namespace) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestGetNamespace() { + tests := []struct { + name string + namespace string + setupMock func(*fake.Clientset) + expectedErr error + expectedNS *corev1.Namespace + }{ + { + name: "successful retrieval", + namespace: "existing-namespace", + setupMock: func(clientset *fake.Clientset) { + err := createNamespace(clientset, "existing-namespace") + require.NoError(suite.T(), err) + }, + expectedErr: nil, + expectedNS: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-namespace", + }, + }, + }, + { + name: "namespace not found", + namespace: "non-existent-namespace", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "namespaces", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("namespaces \"non-existent-namespace\" not found") + }) + }, + expectedErr: k8s.ErrGettingNamespace.WithParams("non-existent-namespace"). + Wrap(errors.New("namespaces \"non-existent-namespace\" not found")), + expectedNS: nil, + }, + { + name: "client error", + namespace: "error-namespace", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "namespaces", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrGettingNamespace.WithParams("error-namespace"). + Wrap(errors.New("internal server error")), + expectedNS: nil, + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + ns, err := suite.client.GetNamespace(context.Background(), tt.namespace) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.EqualValues(suite.T(), tt.expectedNS, ns) + }) + } +} + +func (suite *TestSuite) TestNamespaceExists() { + tests := []struct { + name string + namespace string + setupMock func(*fake.Clientset) + expectedExist bool + }{ + { + name: "namespace exists", + namespace: "existing-namespace", + setupMock: func(clientset *fake.Clientset) { + err := createNamespace(clientset, "existing-namespace") + require.NoError(suite.T(), err) + }, + expectedExist: true, + }, + { + name: "namespace does not exist", + namespace: "non-existent-namespace", + setupMock: func(clientset *fake.Clientset) {}, + expectedExist: false, + }, + { + name: "client error", + namespace: "error-namespace", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "namespaces", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedExist: false, + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + exists := suite.client.NamespaceExists(context.Background(), tt.namespace) + assert.Equal(suite.T(), tt.expectedExist, exists) + }) + } +} + +func createNamespace(clientset *fake.Clientset, name string) error { + _, err := clientset.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, metav1.CreateOptions{}) + return err +} diff --git a/pkg/k8s/k8s_networkpolicy_test.go b/pkg/k8s/k8s_networkpolicy_test.go new file mode 100644 index 00000000..118f297f --- /dev/null +++ b/pkg/k8s/k8s_networkpolicy_test.go @@ -0,0 +1,234 @@ +package k8s_test + +import ( + "context" + "errors" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestCreateNetworkPolicy() { + tests := []struct { + name string + npName string + selectorMap map[string]string + ingressSelectorMap map[string]string + egressSelectorMap map[string]string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + npName: "test-np", + selectorMap: map[string]string{"app": "test"}, + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error", + npName: "error-np", + selectorMap: map[string]string{"app": "error"}, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("create", "networkpolicies", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, k8s.ErrCreatingNetworkPolicy.WithParams("error-np"). + Wrap(errors.New("internal server error")) + }) + }, + expectedErr: k8s.ErrCreatingNetworkPolicy.WithParams("error-np").Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.CreateNetworkPolicy(context.Background(), tt.npName, tt.selectorMap, tt.ingressSelectorMap, tt.egressSelectorMap) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestDeleteNetworkPolicy() { + tests := []struct { + name string + npName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + npName: "existing-np", + setupMock: func(clientset *fake.Clientset) { + err := createNetworkPolicy(clientset, "existing-np", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedErr: nil, + }, + { + name: "network policy not found", + npName: "non-existent-np", + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: k8s.ErrDeletingNetworkPolicy.WithParams("non-existent-np"). + Wrap(errors.New("networkpolicies \"non-existent-np\" not found")), + }, + { + name: "client error", + npName: "error-np", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "networkpolicies", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingNetworkPolicy.WithParams("error-np"). + Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteNetworkPolicy(context.Background(), tt.npName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestGetNetworkPolicy() { + tests := []struct { + name string + npName string + setupMock func(*fake.Clientset) + expectedErr error + expectedNP *v1.NetworkPolicy + }{ + { + name: "successful retrieval", + npName: "existing-np", + setupMock: func(clientset *fake.Clientset) { + err := createNetworkPolicy(clientset, "existing-np", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedErr: nil, + expectedNP: &v1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-np", + Namespace: suite.namespace, + }, + }, + }, + { + name: "network policy not found", + npName: "non-existent-np", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "networkpolicies", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("networkpolicies \"non-existent-np\" not found") + }) + }, + expectedErr: k8s.ErrGettingNetworkPolicy.WithParams("non-existent-np"). + Wrap(errors.New("networkpolicies \"non-existent-np\" not found")), + expectedNP: nil, + }, + { + name: "client error", + npName: "error-np", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "networkpolicies", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrGettingNetworkPolicy.WithParams("error-np"). + Wrap(errors.New("internal server error")), + expectedNP: nil, + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + np, err := suite.client.GetNetworkPolicy(context.Background(), tt.npName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.EqualValues(suite.T(), tt.expectedNP, np) + }) + } +} + +func (suite *TestSuite) TestNetworkPolicyExists() { + tests := []struct { + name string + npName string + setupMock func(*fake.Clientset) + expectedExist bool + }{ + { + name: "network policy exists", + npName: "existing-np", + setupMock: func(clientset *fake.Clientset) { + err := createNetworkPolicy(clientset, "existing-np", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedExist: true, + }, + { + name: "network policy does not exist", + npName: "non-existent-np", + setupMock: func(clientset *fake.Clientset) {}, + expectedExist: false, + }, + { + name: "client error", + npName: "error-np", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "networkpolicies", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedExist: false, + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + exists := suite.client.NetworkPolicyExists(context.Background(), tt.npName) + assert.Equal(suite.T(), tt.expectedExist, exists) + }) + } +} + +func createNetworkPolicy(clientset *fake.Clientset, name, namespace string) error { + _, err := clientset.NetworkingV1().NetworkPolicies(namespace).Create(context.Background(), &v1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }, metav1.CreateOptions{}) + return err +} diff --git a/pkg/k8s/k8s_pod_test.go b/pkg/k8s/k8s_pod_test.go new file mode 100644 index 00000000..301908da --- /dev/null +++ b/pkg/k8s/k8s_pod_test.go @@ -0,0 +1,360 @@ +package k8s_test + +import ( + "context" + "errors" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestDeployPod() { + tests := []struct { + name string + podConfig k8s.PodConfig + init bool + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + podConfig: k8s.PodConfig{ + Namespace: suite.namespace, + Name: "test-pod", + Labels: map[string]string{"app": "test"}, + }, + init: false, + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error", + podConfig: k8s.PodConfig{ + Namespace: suite.namespace, + Name: "error-pod", + Labels: map[string]string{"app": "error"}, + }, + init: false, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("create", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrCreatingPod.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + pod, err := suite.client.DeployPod(context.Background(), tt.podConfig, tt.init) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.podConfig.Name, pod.Name) + }) + } +} + +func (suite *TestSuite) TestReplacePod() { + tests := []struct { + name string + podConfig k8s.PodConfig + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful replacement", + podConfig: k8s.PodConfig{ + Namespace: suite.namespace, + Name: "test-pod", + Labels: map[string]string{"app": "test"}, + }, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, nil + }) + }, + expectedErr: nil, + }, + { + name: "client error on deletion", + podConfig: k8s.PodConfig{ + Namespace: suite.namespace, + Name: "error-pod", + Labels: map[string]string{"app": "error"}, + }, + setupMock: func(clientset *fake.Clientset) { + err := createPod(clientset, "error-pod", suite.namespace) + require.NoError(suite.T(), err) + // The pod exist and there is some error deleting it. + + clientset.PrependReactor("delete", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingPod.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + pod, err := suite.client.ReplacePod(context.Background(), tt.podConfig) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.podConfig.Name, pod.Name) + }) + } +} + +func (suite *TestSuite) TestIsPodRunning() { + tests := []struct { + name string + podName string + setupMock func(*fake.Clientset) + expectedErr error + expectedRun bool + }{ + { + name: "pod is running", + podName: "running-pod", + setupMock: func(clientset *fake.Clientset) { + clientset.CoreV1().Pods(suite.namespace).Create(context.Background(), &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "running-pod", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Ready: true, + }, + }, + }, + }, metav1.CreateOptions{}) + }, + expectedRun: true, + expectedErr: nil, + }, + { + name: "pod is not running", + podName: "not-running-pod", + setupMock: func(clientset *fake.Clientset) { + clientset.CoreV1().Pods(suite.namespace).Create(context.Background(), &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not-running-pod", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Ready: false, + }, + }, + }, + }, metav1.CreateOptions{}) + }, + expectedRun: false, + expectedErr: nil, + }, + { + name: "client error", + podName: "error-pod", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedRun: false, + expectedErr: k8s.ErrGettingPod.WithParams("error-pod").Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + running, err := suite.client.IsPodRunning(context.Background(), tt.podName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.expectedRun, running) + }) + } +} + +func (suite *TestSuite) TestRunCommandInPod() { + // TestRunCommandInPod is not implemented. + // + // The RunCommandInPod function involves complex interactions with the Kubernetes API, + // specifically around executing commands within a pod using SPDY protocol. This process + // includes setting up SPDY streams and handling bi-directional communication, which are + // challenging to accurately mock in a unit test environment. + // + // The primary reasons for not implementing a unit test for this function include: + // 1. Dependency on SPDY protocol and complex networking interactions that are difficult to simulate. + // 2. Requirement for real Kubernetes cluster behavior to validate the execution of commands within a pod. + // 3. High complexity and low benefit of mocking deep Kubernetes internals and network protocols. + // + // Given these complexities, it is recommended to test the RunCommandInPod function in an + // integration or end-to-end testing environment where real Kubernetes clusters and network + // conditions can be used to validate its behavior. +} + +func (suite *TestSuite) TestDeletePodWithGracePeriod() { + tests := []struct { + name string + podName string + gracePeriod *int64 + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + podName: "existing-pod", + setupMock: func(clientset *fake.Clientset) { + err := createPod(clientset, "existing-pod", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedErr: nil, + }, + { + name: "pod not found", + podName: "non-existent-pod", + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error", + podName: "error-pod", + setupMock: func(clientset *fake.Clientset) { + err := createPod(clientset, "error-pod", suite.namespace) + require.NoError(suite.T(), err) + // The pod exist and there is some error deleting it. + + clientset.PrependReactor("delete", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingPodFailed.WithParams("error-pod").Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeletePodWithGracePeriod(context.Background(), tt.podName, tt.gracePeriod) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestDeletePod() { + tests := []struct { + name string + podName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + podName: "existing-pod", + setupMock: func(clientset *fake.Clientset) { + err := createPod(clientset, "existing-pod", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedErr: nil, + }, + { + name: "pod not found", + podName: "non-existent-pod", + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error", + podName: "error-pod", + setupMock: func(clientset *fake.Clientset) { + err := createPod(clientset, "error-pod", suite.namespace) + require.NoError(suite.T(), err) + + clientset.PrependReactor("delete", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingPodFailed.WithParams("error-pod").Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeletePod(context.Background(), tt.podName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestPortForwardPod() { + // TestPortForwardPod is not implemented. + // + // The PortForwardPod function involves complex interactions with the Kubernetes API + // that are difficult to mock accurately. Specifically, it relies on SPDY protocol + // upgrades and bi-directional streaming, which are not easily simulated in a unit + // testing environment. + // + // The primary challenges include: + // - The use of spdy.RoundTripperFor to upgrade HTTP connections to SPDY, which + // involves lower-level network interactions that are not straightforward to + // mock with standard testing tools. + // - The need to simulate bi-directional streaming between the local machine and + // the Kubernetes API server, which requires a robust networking setup. + // + // Given these complexities, it is recommended to test the PortForwardPod function + // in an integration or end-to-end testing environment where real Kubernetes clusters + // and network conditions can be used to validate its behavior. +} + +func createPod(clientset *fake.Clientset, name, namespace string) error { + _, err := clientset.CoreV1().Pods(namespace).Create(context.Background(), &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }, metav1.CreateOptions{}) + return err +} diff --git a/pkg/k8s/k8s_pvc_test.go b/pkg/k8s/k8s_pvc_test.go new file mode 100644 index 00000000..9f1b12fe --- /dev/null +++ b/pkg/k8s/k8s_pvc_test.go @@ -0,0 +1,134 @@ +package k8s_test + +import ( + "context" + "errors" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestCreatePersistentVolumeClaim() { + tests := []struct { + name string + pvcName string + labels map[string]string + size resource.Quantity + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + pvcName: "test-pvc", + labels: map[string]string{"app": "test"}, + size: resource.MustParse("1Gi"), + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error", + pvcName: "error-pvc", + labels: map[string]string{"app": "error"}, + size: resource.MustParse("1Gi"), + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("create", "persistentvolumeclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrCreatingPersistentVolumeClaim.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.CreatePersistentVolumeClaim(context.Background(), tt.pvcName, tt.labels, tt.size) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestDeletePersistentVolumeClaim() { + tests := []struct { + name string + pvcName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + pvcName: "test-pvc", + setupMock: func(clientset *fake.Clientset) { + clientset.Fake.PrependReactor("get", "persistentvolumeclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "test-pvc", + }, + }, nil + }) + clientset.Fake.PrependReactor("delete", "persistentvolumeclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, nil + }) + }, + expectedErr: nil, + }, + { + name: "pvc not found", + pvcName: "missing-pvc", + setupMock: func(clientset *fake.Clientset) { + clientset.Fake.PrependReactor("get", "persistentvolumeclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("not found") + }) + }, + expectedErr: nil, // it should skip deletion if pvc not found + }, + { + name: "client error on delete", + pvcName: "error-pvc", + setupMock: func(clientset *fake.Clientset) { + clientset.Fake.PrependReactor("get", "persistentvolumeclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "error-pvc", + }, + }, nil + }) + clientset.Fake.PrependReactor("delete", "persistentvolumeclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingPersistentVolumeClaim.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeletePersistentVolumeClaim(context.Background(), tt.pvcName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} diff --git a/pkg/k8s/k8s_replicaset_test.go b/pkg/k8s/k8s_replicaset_test.go new file mode 100644 index 00000000..04d198d3 --- /dev/null +++ b/pkg/k8s/k8s_replicaset_test.go @@ -0,0 +1,456 @@ +package k8s_test + +import ( + "context" + "errors" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" + "k8s.io/utils/ptr" +) + +func (suite *TestSuite) TestCreateReplicaSet() { + tests := []struct { + name string + rsConfig k8s.ReplicaSetConfig + init bool + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + rsConfig: k8s.ReplicaSetConfig{ + Name: "test-rs", + Namespace: suite.namespace, + Labels: map[string]string{"app": "test"}, + Replicas: 1, + PodConfig: k8s.PodConfig{ + Namespace: suite.namespace, + Name: "test-pod", + Labels: map[string]string{"app": "test"}, + }, + }, + init: false, + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error", + rsConfig: k8s.ReplicaSetConfig{ + Name: "error-rs", + Namespace: suite.namespace, + Labels: map[string]string{"app": "error"}, + Replicas: 1, + PodConfig: k8s.PodConfig{ + Namespace: suite.namespace, + Name: "error-pod", + Labels: map[string]string{"app": "error"}, + }, + }, + init: false, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("create", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrCreatingReplicaSet.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + rs, err := suite.client.CreateReplicaSet(context.Background(), tt.rsConfig, tt.init) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.rsConfig.Name, rs.Name) + }) + } +} + +func (suite *TestSuite) TestReplaceReplicaSetWithGracePeriod() { + gracePeriod := int64(10) + tests := []struct { + name string + rsConfig k8s.ReplicaSetConfig + gracePeriod *int64 + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful replacement", + rsConfig: k8s.ReplicaSetConfig{ + Name: "test-rs", + Namespace: suite.namespace, + Labels: map[string]string{"app": "test"}, + Replicas: 1, + PodConfig: k8s.PodConfig{ + Namespace: suite.namespace, + Name: "test-pod", + Labels: map[string]string{"app": "test"}, + }, + }, + gracePeriod: &gracePeriod, + setupMock: func(clientset *fake.Clientset) { + err := createReplicaSet(clientset, "test-rs", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedErr: nil, + }, + { + name: "client error on delete", + rsConfig: k8s.ReplicaSetConfig{ + Name: "error-rs", + Namespace: suite.namespace, + Labels: map[string]string{"app": "error"}, + Replicas: 1, + PodConfig: k8s.PodConfig{ + Namespace: suite.namespace, + Name: "error-pod", + Labels: map[string]string{"app": "error"}, + }, + }, + gracePeriod: &gracePeriod, + setupMock: func(clientset *fake.Clientset) { + // if it does not exist, it return nil as error + // so we need to add it to the be bale to pass the existence check + err := createReplicaSet(clientset, "error-rs", suite.namespace) + require.NoError(suite.T(), err) + + clientset.PrependReactor("delete", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingReplicaSet.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + rs, err := suite.client.ReplaceReplicaSetWithGracePeriod(context.Background(), tt.rsConfig, tt.gracePeriod) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.rsConfig.Name, rs.Name) + }) + } +} + +func (suite *TestSuite) TestReplaceReplicaSet() { + tests := []struct { + name string + rsConfig k8s.ReplicaSetConfig + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful replacement", + rsConfig: k8s.ReplicaSetConfig{ + Name: "test-rs", + Namespace: suite.namespace, + Labels: map[string]string{"app": "test"}, + Replicas: 1, + PodConfig: k8s.PodConfig{ + Namespace: suite.namespace, + Name: "test-pod", + Labels: map[string]string{"app": "test"}, + }, + }, + setupMock: func(clientset *fake.Clientset) { + err := createReplicaSet(clientset, "test-rs", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedErr: nil, + }, + { + name: "client error on delete", + rsConfig: k8s.ReplicaSetConfig{ + Name: "error-rs", + Namespace: suite.namespace, + Labels: map[string]string{"app": "error"}, + Replicas: 1, + PodConfig: k8s.PodConfig{ + Namespace: suite.namespace, + Name: "error-pod", + Labels: map[string]string{"app": "error"}, + }, + }, + setupMock: func(clientset *fake.Clientset) { + // if it does not exist, it return nil as error + // so we need to add it to the be bale to pass the existence check + err := createReplicaSet(clientset, "error-rs", suite.namespace) + require.NoError(suite.T(), err) + + clientset.PrependReactor("delete", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingReplicaSet.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + rs, err := suite.client.ReplaceReplicaSet(context.Background(), tt.rsConfig) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.rsConfig.Name, rs.Name) + }) + } +} + +func (suite *TestSuite) TestIsReplicaSetRunning() { + tests := []struct { + name string + rsName string + setupMock func(*fake.Clientset) + expectedRes bool + expectedErr error + }{ + { + name: "replica set is running", + rsName: "test-rs", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &appv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rs", + Namespace: suite.namespace, + }, + Spec: appv1.ReplicaSetSpec{ + Replicas: ptr.To[int32](1), + }, + Status: appv1.ReplicaSetStatus{ + ReadyReplicas: 1, + }, + }, nil + }) + }, + expectedRes: true, + expectedErr: nil, + }, + { + name: "replica set is not running", + rsName: "test-rs", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &appv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rs", + Namespace: suite.namespace, + }, + Spec: appv1.ReplicaSetSpec{ + Replicas: ptr.To[int32](1), + }, + Status: appv1.ReplicaSetStatus{ + ReadyReplicas: 0, + }, + }, nil + }) + }, + expectedRes: false, + expectedErr: nil, + }, + { + name: "client error", + rsName: "error-rs", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedRes: false, + expectedErr: k8s.ErrGettingPod.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + res, err := suite.client.IsReplicaSetRunning(context.Background(), tt.rsName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.expectedRes, res) + }) + } +} + +func (suite *TestSuite) TestDeleteReplicaSetWithGracePeriod() { + gracePeriod := int64(10) + tests := []struct { + name string + rsName string + gracePeriod *int64 + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + rsName: "test-rs", + gracePeriod: &gracePeriod, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &appv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rs", + Namespace: suite.namespace, + }, + }, nil + }) + clientset.PrependReactor("delete", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, nil + }) + }, + expectedErr: nil, + }, + { + name: "replica set not found", + rsName: "missing-rs", + gracePeriod: &gracePeriod, + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error on delete", + rsName: "error-rs", + gracePeriod: &gracePeriod, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &appv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "error-rs", + Namespace: suite.namespace, + }, + }, nil + }) + clientset.PrependReactor("delete", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingReplicaSet.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteReplicaSetWithGracePeriod(context.Background(), tt.rsName, tt.gracePeriod) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestDeleteReplicaSet() { + tests := []struct { + name string + rsName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + rsName: "test-rs", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &appv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rs", + Namespace: suite.namespace, + }, + }, nil + }) + clientset.PrependReactor("delete", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, nil + }) + }, + expectedErr: nil, + }, + { + name: "replica set not found", + rsName: "missing-rs", + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error on delete", + rsName: "error-rs", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &appv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "error-rs", + Namespace: suite.namespace, + }, + }, nil + }) + clientset.PrependReactor("delete", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingReplicaSet.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteReplicaSet(context.Background(), tt.rsName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func createReplicaSet(clientset *fake.Clientset, name, namespace string) error { + _, err := clientset.AppsV1().ReplicaSets(namespace).Create(context.Background(), &appv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }, metav1.CreateOptions{}) + return err +} diff --git a/pkg/k8s/k8s_role_test.go b/pkg/k8s/k8s_role_test.go new file mode 100644 index 00000000..010549bd --- /dev/null +++ b/pkg/k8s/k8s_role_test.go @@ -0,0 +1,225 @@ +package k8s_test + +import ( + "context" + "errors" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestCreateRole() { + tests := []struct { + name string + roleName string + labels map[string]string + policyRules []rbacv1.PolicyRule + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + roleName: "test-role", + labels: map[string]string{"app": "test"}, + policyRules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Verbs: []string{"get", "list"}, + Resources: []string{"pods"}, + }, + }, + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error", + roleName: "error-role", + labels: map[string]string{"app": "error"}, + policyRules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Verbs: []string{"get", "list"}, + Resources: []string{"pods"}, + }, + }, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("create", "roles", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: errors.New("internal server error"), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.CreateRole(context.Background(), tt.roleName, tt.labels, tt.policyRules) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestDeleteRole() { + tests := []struct { + name string + roleName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + roleName: "test-role", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "roles", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, nil + }) + }, + expectedErr: nil, + }, + { + name: "client error", + roleName: "error-role", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "roles", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: errors.New("internal server error"), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteRole(context.Background(), tt.roleName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestCreateClusterRole() { + tests := []struct { + name string + roleName string + labels map[string]string + policyRules []rbacv1.PolicyRule + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + roleName: "test-cluster-role", + labels: map[string]string{"app": "test"}, + policyRules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Verbs: []string{"get", "list"}, + Resources: []string{"pods"}, + }, + }, + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error", + roleName: "error-cluster-role", + labels: map[string]string{"app": "error"}, + policyRules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Verbs: []string{"get", "list"}, + Resources: []string{"pods"}, + }, + }, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "clusterroles", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, nil + }) + clientset.PrependReactor("create", "clusterroles", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrClusterRoleAlreadyExists.WithParams("error-cluster-role").Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.CreateClusterRole(context.Background(), tt.roleName, tt.labels, tt.policyRules) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestDeleteClusterRole() { + tests := []struct { + name string + roleName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + roleName: "test-cluster-role", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "clusterroles", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, nil + }) + }, + expectedErr: nil, + }, + { + name: "client error", + roleName: "error-cluster-role", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "clusterroles", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: errors.New("internal server error"), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteClusterRole(context.Background(), tt.roleName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + }) + } +} diff --git a/pkg/k8s/k8s_rolebinding_test.go b/pkg/k8s/k8s_rolebinding_test.go new file mode 100644 index 00000000..2247a8d6 --- /dev/null +++ b/pkg/k8s/k8s_rolebinding_test.go @@ -0,0 +1,217 @@ +package k8s_test + +import ( + "context" + "errors" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestCreateRoleBinding() { + tests := []struct { + name string + roleBindingName string + labels map[string]string + role string + serviceAccount string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + roleBindingName: "test-rolebinding", + labels: map[string]string{"app": "test"}, + role: "test-role", + serviceAccount: "test-sa", + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error", + roleBindingName: "error-rolebinding", + labels: map[string]string{"app": "error"}, + role: "error-role", + serviceAccount: "error-sa", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("create", "rolebindings", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: errors.New("internal server error"), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.CreateRoleBinding(context.Background(), tt.roleBindingName, tt.labels, tt.role, tt.serviceAccount) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestDeleteRoleBinding() { + tests := []struct { + name string + bindingName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + bindingName: "test-rolebinding", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "rolebindings", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, nil + }) + }, + expectedErr: nil, + }, + { + name: "client error", + bindingName: "error-rolebinding", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "rolebindings", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: errors.New("internal server error"), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteRoleBinding(context.Background(), tt.bindingName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestCreateClusterRoleBinding() { + tests := []struct { + name string + bindingName string + labels map[string]string + clusterRole string + serviceAccount string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + bindingName: "test-clusterrolebinding", + labels: map[string]string{"app": "test"}, + clusterRole: "test-clusterrole", + serviceAccount: "test-sa", + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "cluster role binding already exists", + bindingName: "existing-clusterrolebinding", + labels: map[string]string{"app": "existing"}, + clusterRole: "existing-clusterrole", + serviceAccount: "existing-sa", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "clusterrolebindings", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &rbacv1.ClusterRoleBinding{}, nil + }) + }, + expectedErr: k8s.ErrClusterRoleBindingAlreadyExists.WithParams("existing-clusterrolebinding"), + }, + { + name: "client error", + bindingName: "error-clusterrolebinding", + labels: map[string]string{"app": "error"}, + clusterRole: "error-clusterrole", + serviceAccount: "error-sa", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "clusterrolebindings", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrClusterRoleBindingAlreadyExists.WithParams("error-clusterrolebinding").Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.CreateClusterRoleBinding(context.Background(), tt.bindingName, tt.labels, tt.clusterRole, tt.serviceAccount) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestDeleteClusterRoleBinding() { + tests := []struct { + name string + bindingName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + bindingName: "test-clusterrolebinding", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "clusterrolebindings", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, nil + }) + }, + expectedErr: nil, + }, + { + name: "client error", + bindingName: "error-clusterrolebinding", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "clusterrolebindings", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: errors.New("internal server error"), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteClusterRoleBinding(context.Background(), tt.bindingName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + }) + } +} diff --git a/pkg/k8s/k8s_service.go b/pkg/k8s/k8s_service.go index 1b09745e..d0b28b7c 100644 --- a/pkg/k8s/k8s_service.go +++ b/pkg/k8s/k8s_service.go @@ -149,33 +149,36 @@ func (c *Client) WaitForService(ctx context.Context, name string) error { ticker := time.NewTicker(waitRetry) defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return ErrTimeoutWaitingForServiceReady - - case <-ticker.C: - ready, err := c.isServiceReady(ctx, name) - if err != nil { - return ErrCheckingServiceReady.WithParams(name).Wrap(err) - } - if !ready { - continue + for firstIteration := true; ; firstIteration = false { + if !firstIteration { + select { + case <-ctx.Done(): + return ErrTimeoutWaitingForServiceReady + case <-ticker.C: + // Wait for the next tick before proceeding } + } - // Check if service is reachable - endpoint, err := c.GetServiceEndpoint(ctx, name) - if err != nil { - return ErrGettingServiceEndpoint.WithParams(name).Wrap(err) - } + ready, err := c.isServiceReady(ctx, name) + if err != nil { + return ErrCheckingServiceReady.WithParams(name).Wrap(err) + } + if !ready { + continue + } - if err := checkServiceConnectivity(endpoint); err != nil { - continue - } + // Check if service is reachable + endpoint, err := c.GetServiceEndpoint(ctx, name) + if err != nil { + return ErrGettingServiceEndpoint.WithParams(name).Wrap(err) + } - // Service is reachable - return nil + if err := checkServiceConnectivity(endpoint); err != nil { + continue } + + // Service is reachable + return nil } } @@ -206,7 +209,7 @@ func (c *Client) GetServiceEndpoint(ctx context.Context, name string) (string, e // Use the first node for simplicity, you might need to handle multiple nodes var nodeIP string for _, address := range nodes.Items[0].Status.Addresses { - if address.Type == "ExternalIP" { + if address.Type == v1.NodeExternalIP { nodeIP = address.Address break } @@ -231,6 +234,7 @@ func (c *Client) isServiceReady(ctx context.Context, name string) (bool, error) if err != nil { return false, ErrGettingService.WithParams(name).Wrap(err) } + switch service.Spec.Type { case v1.ServiceTypeLoadBalancer: return len(service.Status.LoadBalancer.Ingress) > 0, nil diff --git a/pkg/k8s/k8s_service_test.go b/pkg/k8s/k8s_service_test.go new file mode 100644 index 00000000..7d10b293 --- /dev/null +++ b/pkg/k8s/k8s_service_test.go @@ -0,0 +1,537 @@ +package k8s_test + +import ( + "context" + "errors" + "net" + "time" + + "github.com/celestiaorg/knuu/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestGetService() { + tests := []struct { + name string + svcName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful retrieval", + svcName: "test-service", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: suite.namespace, + }, + }, nil + }) + }, + expectedErr: nil, + }, + { + name: "client error", + svcName: "error-service", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrGettingService.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + svc, err := suite.client.GetService(context.Background(), tt.svcName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.svcName, svc.Name) + }) + } +} + +func (suite *TestSuite) TestCreateService() { + tests := []struct { + name string + svcName string + labels map[string]string + selectorMap map[string]string + portsTCP []int + portsUDP []int + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + svcName: "test-service", + labels: map[string]string{"app": "test"}, + selectorMap: map[string]string{"app": "test"}, + portsTCP: []int{80}, + portsUDP: []int{53}, + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error", + svcName: "error-service", + labels: map[string]string{"app": "error"}, + selectorMap: map[string]string{"app": "error"}, + portsTCP: []int{80}, + portsUDP: []int{53}, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("create", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrCreatingService.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + svc, err := suite.client.CreateService(context.Background(), tt.svcName, tt.labels, tt.selectorMap, tt.portsTCP, tt.portsUDP) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.svcName, svc.Name) + }) + } +} + +func (suite *TestSuite) TestPatchService() { + tests := []struct { + name string + svcName string + labels map[string]string + selectorMap map[string]string + portsTCP []int + portsUDP []int + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful patch", + svcName: "test-service", + labels: map[string]string{"app": "test"}, + selectorMap: map[string]string{"app": "test"}, + portsTCP: []int{80}, + portsUDP: []int{53}, + setupMock: func(clientset *fake.Clientset) { + err := createService(clientset, "test-service", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedErr: nil, + }, + { + name: "client error", + svcName: "error-service", + labels: map[string]string{"app": "error"}, + selectorMap: map[string]string{"app": "error"}, + portsTCP: []int{80}, + portsUDP: []int{53}, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("update", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrPatchingService.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + svc, err := suite.client.PatchService(context.Background(), tt.svcName, tt.labels, tt.selectorMap, tt.portsTCP, tt.portsUDP) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.svcName, svc.Name) + }) + } +} + +func (suite *TestSuite) TestDeleteService() { + tests := []struct { + name string + svcName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + svcName: "test-service", + setupMock: func(clientset *fake.Clientset) { + err := createService(clientset, "test-service", suite.namespace) + require.NoError(suite.T(), err) + }, + expectedErr: nil, + }, + { + name: "client error", + svcName: "error-service", + setupMock: func(clientset *fake.Clientset) { + err := createService(clientset, "error-service", suite.namespace) + require.NoError(suite.T(), err) + + clientset.PrependReactor("delete", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrDeletingService.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteService(context.Background(), tt.svcName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestGetServiceIP() { + tests := []struct { + name string + svcName string + setupMock func(*fake.Clientset) + expectedIP string + expectedErr error + }{ + { + name: "successful retrieval", + svcName: "test-service", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: suite.namespace, + }, + Spec: v1.ServiceSpec{ + ClusterIP: "10.0.0.1", + }, + }, nil + }) + }, + expectedIP: "10.0.0.1", + expectedErr: nil, + }, + { + name: "client error", + svcName: "error-service", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedIP: "", + expectedErr: k8s.ErrGettingService.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + ip, err := suite.client.GetServiceIP(context.Background(), tt.svcName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.expectedIP, ip) + }) + } +} + +func (suite *TestSuite) TestWaitForService() { + tests := []struct { + name string + svcName string + setupMock func(*fake.Clientset) + serviceEndpoint string + expectedErr error + }{ + { + name: "successful wait load balancer", + svcName: "test-service", + serviceEndpoint: "127.0.0.1:8171", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: suite.namespace, + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeLoadBalancer, + Ports: []v1.ServicePort{ + { + Port: 8171, + }, + }, + }, + Status: v1.ServiceStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + IP: "127.0.0.1", + }, + }, + }, + }, + }, nil + }) + }, + expectedErr: nil, + }, + { + name: "successful wait node port", + svcName: "test-service", + serviceEndpoint: "127.0.0.1:8172", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: suite.namespace, + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeNodePort, + Ports: []v1.ServicePort{ + { + NodePort: 8172, + }, + }, + }, + }, nil + }) + clientset.PrependReactor("list", "nodes", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.NodeList{ + Items: []v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Address: "127.0.0.1", + Type: v1.NodeExternalIP, + }, + }, + }, + }, + }, + }, nil + }) + }, + expectedErr: nil, + }, + { + name: "successful wait cluster IP", + svcName: "test-service", + serviceEndpoint: "127.0.0.1:8173", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: suite.namespace, + }, + Spec: v1.ServiceSpec{ + ExternalIPs: []string{"127.0.0.1"}, + ClusterIP: "127.0.0.1", + Ports: []v1.ServicePort{ + { + Port: 8173, + }, + }, + }, + }, nil + }) + }, + expectedErr: nil, + }, + { + name: "context canceled", + svcName: "canceled-service", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "canceled-service", + Namespace: suite.namespace, + }, + }, nil + }) + }, + expectedErr: k8s.ErrTimeoutWaitingForServiceReady, + }, + { + name: "client error", + svcName: "error-service", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: k8s.ErrCheckingServiceReady.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + suite.T().Parallel() + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + if tt.serviceEndpoint != "" { + listener, err := startDummyServer(tt.serviceEndpoint) + require.NoError(suite.T(), err) + defer listener.Close() + } + + ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() + + err := suite.client.WaitForService(ctx, tt.svcName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.ErrorIs(suite.T(), err, tt.expectedErr) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +// reset && go test -v ./pkg/k8s/ --run TestKubeManagerTestSuite/TestWaitForService + +func (suite *TestSuite) TestGetServiceEndpoint() { + tests := []struct { + name string + svcName string + setupMock func(*fake.Clientset) + expectedEP string + expectedErr error + }{ + { + name: "successful retrieval for ClusterIP", + svcName: "test-service", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: suite.namespace, + }, + Spec: v1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []v1.ServicePort{ + { + Port: 80, + }, + }, + Type: v1.ServiceTypeClusterIP, + }, + }, nil + }) + }, + expectedEP: "10.0.0.1:80", + expectedErr: nil, + }, + { + name: "client error", + svcName: "error-service", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("get", "services", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedEP: "", + expectedErr: k8s.ErrGettingService.Wrap(errors.New("internal server error")), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + ep, err := suite.client.GetServiceEndpoint(context.Background(), tt.svcName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + assert.Equal(suite.T(), tt.expectedEP, ep) + }) + } +} + +func createService(clientset *fake.Clientset, name, namespace string) error { + _, err := clientset.CoreV1().Services(namespace).Create(context.Background(), &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }, metav1.CreateOptions{}) + return err +} + +func startDummyServer(address string) (net.Listener, error) { + listener, err := net.Listen("tcp", address) + if err != nil { + return nil, err + } + go func() { + for { + conn, err := listener.Accept() + if err != nil { + return + } + conn.Close() + } + }() + return listener, nil +} diff --git a/pkg/k8s/k8s_serviceaccount_test.go b/pkg/k8s/k8s_serviceaccount_test.go new file mode 100644 index 00000000..d94ab056 --- /dev/null +++ b/pkg/k8s/k8s_serviceaccount_test.go @@ -0,0 +1,101 @@ +package k8s_test + +import ( + "context" + "errors" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +func (suite *TestSuite) TestCreateServiceAccount() { + tests := []struct { + name string + saName string + labels map[string]string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful creation", + saName: "test-sa", + labels: map[string]string{"app": "test"}, + setupMock: func(clientset *fake.Clientset) {}, + expectedErr: nil, + }, + { + name: "client error", + saName: "error-sa", + labels: map[string]string{"app": "error"}, + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("create", "serviceaccounts", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: errors.New("internal server error"), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.CreateServiceAccount(context.Background(), tt.saName, tt.labels) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + }) + } +} + +func (suite *TestSuite) TestDeleteServiceAccount() { + tests := []struct { + name string + saName string + setupMock func(*fake.Clientset) + expectedErr error + }{ + { + name: "successful deletion", + saName: "test-sa", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "serviceaccounts", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, nil + }) + }, + expectedErr: nil, + }, + { + name: "client error", + saName: "error-sa", + setupMock: func(clientset *fake.Clientset) { + clientset.PrependReactor("delete", "serviceaccounts", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("internal server error") + }) + }, + expectedErr: errors.New("internal server error"), + }, + } + + for _, tt := range tests { + suite.Run(tt.name, func() { + tt.setupMock(suite.client.Clientset().(*fake.Clientset)) + + err := suite.client.DeleteServiceAccount(context.Background(), tt.saName) + if tt.expectedErr != nil { + require.Error(suite.T(), err) + assert.Equal(suite.T(), tt.expectedErr.Error(), err.Error()) + return + } + + require.NoError(suite.T(), err) + }) + } +} diff --git a/pkg/k8s/suite_setup_test.go b/pkg/k8s/suite_setup_test.go new file mode 100644 index 00000000..31af007d --- /dev/null +++ b/pkg/k8s/suite_setup_test.go @@ -0,0 +1,36 @@ +package k8s_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + "k8s.io/apimachinery/pkg/runtime" + discfake "k8s.io/client-go/discovery/fake" + dynfake "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" + + "github.com/celestiaorg/knuu/pkg/k8s" +) + +type TestSuite struct { + suite.Suite + client *k8s.Client + namespace string +} + +func (suite *TestSuite) SetupTest() { + clientset := fake.NewSimpleClientset() + discoveryClient := &discfake.FakeDiscovery{Fake: &k8stesting.Fake{}} + dynamicClient := dynfake.NewSimpleDynamicClient(runtime.NewScheme()) + suite.namespace = "test" + + suite.client = k8s.NewCustom(clientset, discoveryClient, dynamicClient, suite.namespace) +} + +func (suite *TestSuite) TearDownTest() { +} + +func TestKubeManagerTestSuite(t *testing.T) { + suite.Run(t, new(TestSuite)) +} diff --git a/pkg/k8s/types.go b/pkg/k8s/types.go index 586895f5..54408b28 100644 --- a/pkg/k8s/types.go +++ b/pkg/k8s/types.go @@ -9,12 +9,13 @@ import ( rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) type KubeManager interface { - Clientset() *kubernetes.Clientset + Clientset() kubernetes.Interface CreateClusterRole(ctx context.Context, name string, labels map[string]string, policyRules []rbacv1.PolicyRule) error CreateClusterRoleBinding(ctx context.Context, name string, labels map[string]string, clusterRole, serviceAccount string) error CreateConfigMap(ctx context.Context, name string, labels, data map[string]string) (*corev1.ConfigMap, error) @@ -44,6 +45,7 @@ type KubeManager interface { DeleteService(ctx context.Context, name string) error DeleteServiceAccount(ctx context.Context, name string) error DeployPod(ctx context.Context, podConfig PodConfig, init bool) (*corev1.Pod, error) + DiscoveryClient() discovery.DiscoveryInterface DynamicClient() dynamic.Interface GetConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error) GetDaemonSet(ctx context.Context, name string) (*appv1.DaemonSet, error) @@ -68,9 +70,6 @@ type KubeManager interface { ReplaceReplicaSet(ctx context.Context, ReplicaSetConfig ReplicaSetConfig) (*appv1.ReplicaSet, error) ReplaceReplicaSetWithGracePeriod(ctx context.Context, ReplicaSetConfig ReplicaSetConfig, gracePeriod *int64) (*appv1.ReplicaSet, error) RunCommandInPod(ctx context.Context, podName, containerName string, cmd []string) (string, error) - getPersistentVolumeClaim(ctx context.Context, name string) (*corev1.PersistentVolumeClaim, error) - getPod(ctx context.Context, name string) (*corev1.Pod, error) - getReplicaSet(ctx context.Context, name string) (*appv1.ReplicaSet, error) ConfigMapExists(ctx context.Context, name string) (bool, error) UpdateDaemonSet(ctx context.Context, name string, labels map[string]string, initContainers []corev1.Container, containers []corev1.Container) (*appv1.DaemonSet, error) WaitForDeployment(ctx context.Context, name string) error diff --git a/pkg/k8s/utils.go b/pkg/k8s/utils.go new file mode 100644 index 00000000..571e09d9 --- /dev/null +++ b/pkg/k8s/utils.go @@ -0,0 +1,57 @@ +package k8s + +import ( + "os" + "path/filepath" + "regexp" + "strings" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// isClusterEnvironment checks if the program is running in a Kubernetes cluster. +func isClusterEnvironment() bool { + return fileExists(tokenPath) && fileExists(certPath) +} + +func fileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +// getClusterConfig returns the appropriate Kubernetes cluster configuration. +func getClusterConfig() (*rest.Config, error) { + if isClusterEnvironment() { + return rest.InClusterConfig() + } + + // build the configuration from the kubeconfig file + kubeconfig := filepath.Join(os.Getenv("HOME"), ".kube", "config") + return clientcmd.BuildConfigFromFlags("", kubeconfig) +} + +// precompile the regular expression to avoid recompiling it on every function call +var invalidCharsRegexp = regexp.MustCompile(`[^a-z0-9-]+`) + +// SanitizeName ensures compliance with Kubernetes DNS-1123 subdomain names. It: +// 1. Converts the input string to lowercase. +// 2. Replaces underscores and any non-DNS-1123 compliant characters with hyphens. +// 3. Trims leading and trailing hyphens. +// 4. Ensures the name does not exceed 63 characters, trimming excess characters if necessary +// and ensuring it does not end with a hyphen after trimming. +// +// Use this function to sanitize strings to be used as Kubernetes names for resources. +func SanitizeName(name string) string { + sanitized := strings.ToLower(name) + // Replace underscores and any other disallowed characters with hyphens + sanitized = invalidCharsRegexp.ReplaceAllString(sanitized, "-") + // Trim leading and trailing hyphens + sanitized = strings.Trim(sanitized, "-") + if len(sanitized) > 63 { + sanitized = sanitized[:63] + // Ensure it does not end with a hyphen after cutting it to the max length + sanitized = strings.TrimRight(sanitized, "-") + } + return sanitized +} diff --git a/pkg/k8s/utils_test.go b/pkg/k8s/utils_test.go new file mode 100644 index 00000000..20f2c4b9 --- /dev/null +++ b/pkg/k8s/utils_test.go @@ -0,0 +1,88 @@ +package k8s + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSanitizeName(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "simple case", + input: "SimpleName", + expected: "simplename", + }, + { + name: "contains invalid characters", + input: "Name_With_Invalid_Characters!", + expected: "name-with-invalid-characters", + }, + { + name: "too long name", + input: strings.Repeat("a", 64), + expected: strings.Repeat("a", 63), + }, + { + name: "leading and trailing hyphens", + input: "---name---", + expected: "name", + }, + { + name: "name with mixed case", + input: "MixedCASEname", + expected: "mixedcasename", + }, + { + name: "name with spaces", + input: "name with spaces", + expected: "name-with-spaces", + }, + { + name: "name with dots and underscores", + input: "name.with.dots_and_underscores", + expected: "name-with-dots-and-underscores", + }, + { + name: "name with special characters", + input: "name!@#with$%^special&*()characters", + expected: "name-with-special-characters", + }, + { + name: "name with trailing hyphens after length cut", + input: strings.Repeat("a", 62) + "-b", + expected: strings.Repeat("a", 62), + }, + { + name: "empty name", + input: "", + expected: "", + }, + { + name: "name with only invalid characters", + input: "!!@@##$$", + expected: "", + }, + { + name: "name with leading and trailing spaces", + input: " leading-and-trailing-spaces ", + expected: "leading-and-trailing-spaces", + }, + { + name: "name with a mix of allowed and invalid characters", + input: "Name123_with.Mixed_Characters!", + expected: "name123-with-mixed-characters", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, SanitizeName(tt.input)) + }) + } +} diff --git a/pkg/knuu/knuu_test.go b/pkg/knuu/knuu_test.go index 35413ebf..b4097322 100644 --- a/pkg/knuu/knuu_test.go +++ b/pkg/knuu/knuu_test.go @@ -26,7 +26,7 @@ type mockK8s struct { mock.Mock } -func (m *mockK8s) Clientset() *kubernetes.Clientset { +func (m *mockK8s) Clientset() kubernetes.Interface { return &kubernetes.Clientset{} }