diff --git a/Makefile b/Makefile index f2128eb..1b50e42 100644 --- a/Makefile +++ b/Makefile @@ -32,6 +32,9 @@ LDFLAGS ?= "-X $(BUILD_TIME_VAR)=$(BUILD_TIMESTAMP) -X $(BUILD_VERSION_VAR)=$(VE KUSTOMIZE_VERSION ?= v4.5.7 CONTROLLER_TOOLS_VERSION ?= v0.15.0 KIND_NODE_IMAGE_VERSION ?= v1.30.2 +BATS_VERSION ?= 1.11.0 +SHELLCHECK_VER ?= v0.10.0 +KIND_VERSION ?= v0.23.0 TRIVY_VERSION ?= 0.52.2 ## Tool Binaries @@ -40,6 +43,9 @@ CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen ENVTEST ?= $(LOCALBIN)/setup-envtest GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint HELM := helm +KIND := kind +ENVSUBST := envsubst +BATS := bats TRIVY := trivy # Image URL to use all building/pushing image targets @@ -153,23 +159,32 @@ endif ## -------------------------------------- .PHONY: local-setup -local-setup: docker-build ## setup and run sync controller locally +local-setup: docker-build setup-kind-cluster helm-manifest-install ## setup and run sync controller locally + kubectl apply -f ./hack/localsetup/e2e-providerspc.yaml + kubectl apply -f ./hack/localsetup/e2e-secret-sync.yaml + +.PHONY: setup-kind-cluster +setup-kind-cluster: kind delete cluster --name sync-controller kind create cluster --name sync-controller \ --image kindest/node:$(KIND_NODE_IMAGE_VERSION) \ --config=./hack/localsetup/kind-config.yaml kind load docker-image --name sync-controller $(IMAGE_TAG) +.PHONY: helm-manifest-install ## Install Helm manifests +helm-manifest-install: cp manifest_staging/charts/secrets-store-sync-controller/values.yaml manifest_staging/charts/secrets-store-sync-controller/temp_values.yaml - sed -i '' '/providerContainer:/,/providervol:/s/^#//g' manifest_staging/charts/secrets-store-sync-controller/temp_values.yaml + @if [[ "$$(uname)" == "Darwin" ]]; then \ + sed -i '' '/providerContainer:/,/providervol:/s/^#//g' manifest_staging/charts/secrets-store-sync-controller/temp_values.yaml; \ + else \ + sed -i '/providerContainer:/,/providervol:/s/^#//g' manifest_staging/charts/secrets-store-sync-controller/temp_values.yaml; \ + fi helm install secrets-store-sync-controller \ -f manifest_staging/charts/secrets-store-sync-controller/temp_values.yaml \ --set image.tag=$(VERSION) \ manifest_staging/charts/secrets-store-sync-controller rm -f manifest_staging/charts/secrets-store-sync-controller/temp_values.yaml - kubectl apply -f ./hack/localsetup/e2e-providerspc.yaml - kubectl apply -f ./hack/localsetup/e2e-secret-sync.yaml ## -------------------------------------- ## Testing Binaries @@ -178,6 +193,28 @@ local-setup: docker-build ## setup and run sync controller locally $(HELM): ## Install helm3 if not present helm version --short | grep -q v3 || (curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash) +$(BATS): ## Install bats for running the tests + bats --version | grep -q $(BATS_VERSION) || (curl -sSLO https://github.com/bats-core/bats-core/archive/v${BATS_VERSION}.tar.gz && tar -zxvf v${BATS_VERSION}.tar.gz && bash bats-core-${BATS_VERSION}/install.sh /usr/local) + +$(ENVSUBST): ## Install envsubst for running the tests + envsubst -V || (apt-get -o Acquire::Retries=30 update && apt-get -o Acquire::Retries=30 install gettext-base -y) + +SHELLCHECK := $(TOOLS_BIN_DIR)/shellcheck-$(SHELLCHECK_VER) +$(SHELLCHECK): OS := $(shell uname | tr '[:upper:]' '[:lower:]') +$(SHELLCHECK): ARCH := $(shell uname -m) +$(SHELLCHECK): + mkdir -p $(TOOLS_BIN_DIR) + rm -rf "$(SHELLCHECK)*" + curl -sfOL "https://github.com/koalaman/shellcheck/releases/download/$(SHELLCHECK_VER)/shellcheck-$(SHELLCHECK_VER).$(OS).$(ARCH).tar.xz" + tar xf shellcheck-$(SHELLCHECK_VER).$(OS).$(ARCH).tar.xz + cp "shellcheck-$(SHELLCHECK_VER)/shellcheck" "$(SHELLCHECK)" + ln -sf "$(SHELLCHECK)" "$(TOOLS_BIN_DIR)/shellcheck" + chmod +x "$(TOOLS_BIN_DIR)/shellcheck" "$(SHELLCHECK)" + rm -rf shellcheck* + +$(KIND): ## Download and install kind + kind --version | grep -q $(KIND_VERSION) || (curl -L https://github.com/kubernetes-sigs/kind/releases/download/$(KIND_VERSION)/kind-linux-amd64 --output kind && chmod +x kind && mv kind /usr/local/bin/) + $(TRIVY): ## Install trivy for image vulnerability scan trivy -v | grep -q $(TRIVY_VERSION) || (curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v$(TRIVY_VERSION)) @@ -199,8 +236,9 @@ image-scan: $(TRIVY) ## -------------------------------------- ## Linting ## -------------------------------------- + .PHONY: test-style -test-style: lint lint-charts +test-style: lint lint-charts shellcheck $(GOLANGCI_LINT): ## Build golangci-lint from tools folder. cd $(TOOLS_MOD_DIR) && \ @@ -212,5 +250,21 @@ lint: $(GOLANGCI_LINT) $(GOLANGCI_LINT) run --timeout=5m -v lint-charts: $(HELM) # Run helm lint tests - # ToDO: Add helm lint for 'charts' dir once released first version - $(HELM) lint manifest_staging/charts/secrets-store-sync-controller + helm lint manifest_staging/charts/secrets-store-sync-controller + +.PHONY: shellcheck +shellcheck: $(SHELLCHECK) + find . \( -name '*.sh' -o -name '*.bash' \) | xargs $(SHELLCHECK) + +## -------------------------------------- +## E2E Testing +## -------------------------------------- + +.PHONY: e2e-setup ## Setup environment for e2e tests +e2e-setup: $(HELM) $(BATS) $(ENVSUBST) $(KIND) + + +# Run the e2e provider tests +.PHONY: run-e2e-provider-tests +run-e2e-provider-tests: e2e-setup docker-build setup-kind-cluster helm-manifest-install + bats -t -T test/bats/e2e-provider.bats diff --git a/manifest_staging/charts/secrets-store-sync-controller/values.yaml b/manifest_staging/charts/secrets-store-sync-controller/values.yaml index fdf2459..3338c26 100644 --- a/manifest_staging/charts/secrets-store-sync-controller/values.yaml +++ b/manifest_staging/charts/secrets-store-sync-controller/values.yaml @@ -50,10 +50,10 @@ podAnnotations: podLabels: control-plane: controller-manager secrets-store.io/system: "true" + app: secrets-store-sync-controller nodeSelector: - tolerations: - operator: Exists diff --git a/test/bats/README.md b/test/bats/README.md new file mode 100644 index 0000000..3668d10 --- /dev/null +++ b/test/bats/README.md @@ -0,0 +1,28 @@ +# Testing Secrets Store Sync Controller with e2e provider + +This directory contains e2e test scripts for the Secrets Store Sync Controller. Before running the e2e tests, install +the Secrets Store Sync Controller with the `e2e provider` as outlined [here](../../README.md#getting-started). + +## Running the tests + +1. To run the tests, from the root directory run: + + ```shell + make run-e2e-provider-tests + ``` + +## Testing + +This doc lists the different Secret Sync scenarios tested as part of CI. + +## E2E tests + +| Test Description | E2E | +|--------------------------------------------------------------------------------------------------------|-----| +| Check if `secretproviderclasses` CRD is established | ✔️ | +| Check if `secretsyncs` CRD is established | ✔️ | +| Test if RBAC roles and role bindings exist | ✔️ | +| Deploy `e2e-providerspc` SecretProviderClass CRD | ✔️ | +| Deploy `e2e-providerspc` SecretSync CRD | ✔️ | +| Deploy SecretProviderClass and SecretSync in different namespaces and check that no secret is created | ✔️ | +| Deploy SecretProviderClass and SecretSync, ensure secret is created, then delete SecretSync and verify | ✔️ | diff --git a/test/bats/e2e-provider.bats b/test/bats/e2e-provider.bats new file mode 100644 index 0000000..6acb73d --- /dev/null +++ b/test/bats/e2e-provider.bats @@ -0,0 +1,119 @@ +#!/usr/bin/env bats + +load helpers + +BATS_RESOURCE_MANIFESTS_DIR=hack/localsetup +BATS_RESOURCE_YAML_DIR=test/bats/tests/e2e_provider +WAIT_TIME=60 +SLEEP_TIME=1 + +@test "secretproviderclasses crd is established" { + kubectl wait --for condition=established --timeout=60s crd/secretproviderclasses.secrets-store.csi.x-k8s.io + + run kubectl get crd/secretproviderclasses.secrets-store.csi.x-k8s.io + assert_success +} + +@test "secretsync crd is established" { + kubectl wait --for condition=established --timeout=60s crd/secretsyncs.secret-sync.x-k8s.io + + run kubectl get crd/secretsyncs.secret-sync.x-k8s.io + assert_success +} + +@test "Test rbac roles and role bindings exist" { + run kubectl get clusterrole/secrets-store-sync-controller-manager-role + assert_success + + run kubectl get clusterrolebinding/secrets-store-sync-controller-manager-rolebinding + assert_success +} + +@test "[v1alpha1] validate secret creation and deletion with SecretProviderClass and SecretSync" { + kubectl create namespace test-v1alpha1 --dry-run=client -o yaml | kubectl apply -f - + + # Create the SPC + kubectl apply -n test-v1alpha1 -f $BATS_RESOURCE_MANIFESTS_DIR/e2e-providerspc.yaml + + cmd="kubectl get secretproviderclasses.secrets-store.csi.x-k8s.io/e2e-providerspc -n test-v1alpha1 -o yaml | grep e2e-providerspc" + wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd" + + # Create the SecretSync + kubectl apply -n test-v1alpha1 -f $BATS_RESOURCE_MANIFESTS_DIR/e2e-secret-sync.yaml + + cmd="kubectl get secretsyncs.secret-sync.x-k8s.io/sse2esecret -n test-v1alpha1 -o yaml | grep sse2esecret" + wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd" + + # Retrieve the secret + cmd="kubectl get secret sse2esecret -n test-v1alpha1 -o yaml | grep 'apiVersion: secret-sync.x-k8s.io/v1alpha1'" + wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd" + + # Check the data in the secret + expected_data="secret" + secret_data=$(kubectl get secret sse2esecret -n test-v1alpha1 -o jsonpath='{.data.bar}' | base64 --decode) + [ "$secret_data" = "$expected_data" ] + # Check owner_count is 1 + cmd="compare_owner_count sse2esecret test-v1alpha1 1" + wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd" + + # Delete the SecretSync + cmd="kubectl delete secretsync sse2esecret -n test-v1alpha1" + wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd" + + # Check that the secret is deleted + cmd="kubectl get secret sse2esecret -n test-v1alpha1" + wait_for_process $WAIT_TIME $SLEEP_TIME "! $cmd" +} + +@test "SecretProviderClass and SecretSync are deployed in different namespaces" { + # Create namespaces + kubectl create namespace spc-namespace --dry-run=client -o yaml | kubectl apply -f - + kubectl create namespace ss-namespace --dry-run=client -o yaml | kubectl apply -f - + + # Deploy the SecretProviderClass in spc-namespace + kubectl apply -n spc-namespace -f $BATS_RESOURCE_MANIFESTS_DIR/e2e-providerspc.yaml + + cmd="kubectl get secretproviderclasses.secrets-store.csi.x-k8s.io/e2e-providerspc -n spc-namespace -o yaml | grep e2e-providerspc" + wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd" + + # Deploy the SecretSync in ss-namespace + kubectl apply -n ss-namespace -f $BATS_RESOURCE_MANIFESTS_DIR/e2e-secret-sync.yaml + + cmd="kubectl get secretsyncs.secret-sync.x-k8s.io/sse2esecret -n ss-namespace -o yaml | grep sse2esecret" + wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd" + + # Check the status of SecretSync in ss-namespace + status=$(kubectl get secretsyncs.secret-sync.x-k8s.io/sse2esecret -n ss-namespace -o jsonpath='{.status.conditions[0]}') + + expected_message="Secret update failed because the controller could not retrieve the Secret Provider Class or the SPC is misconfigured. Check the logs or the events for more information." + expected_reason="ControllerSPCError" + expected_status="False" + + # Extract individual fields from the status + message=$(echo $status | jq -r .message) + reason=$(echo $status | jq -r .reason) + status_value=$(echo $status | jq -r .status) + + # Verify the status fields + [ "$message" = "$expected_message" ] + [ "$reason" = "$expected_reason" ] + [ "$status_value" = "$expected_status" ] + + # Check that the secret is not created in ss-namespace + cmd="kubectl get secret sse2esecret -n ss-namespace" + run $cmd + assert_failure +} + +teardown_file() { + archive_provider "app=secrets-store-sync-controller" || true + archive_info || true + + if [[ "${INPLACE_UPGRADE_TEST}" != "true" ]]; then + #cleanup + run kubectl delete namespace test-v1alpha1 + run kubectl delete namespace spc-namespace + run kubectl delete namespace ss-namespace + echo "Done cleaning up e2e tests" + fi +} diff --git a/test/bats/helpers.bash b/test/bats/helpers.bash new file mode 100644 index 0000000..898fa8c --- /dev/null +++ b/test/bats/helpers.bash @@ -0,0 +1,97 @@ +#!/bin/bash + +assert_success() { + if [[ "${status:-}" != 0 ]]; then + echo "expected: 0" + echo "actual: ${status:-}" + echo "output: ${output:-}" + return 1 + fi +} + +assert_failure() { + if [[ "${status:-}" == 0 ]]; then + echo "expected: non-zero exit code" + echo "actual: ${status:-}" + echo "output: ${output:-}" + return 1 + fi +} + +archive_provider() { + # Determine log directory + if [[ -z "${ARTIFACTS}" ]]; then + return 0 + fi + + FILE_PREFIX=$(date +"%FT%H%M%S") + + kubectl logs -l "$1" --tail -1 -n secrets-store-sync-controller-system > "${ARTIFACTS}/${FILE_PREFIX}-provider.logs" +} + +archive_info() { + # Determine log directory + if [[ -z "${ARTIFACTS}" ]]; then + return 0 + fi + + LOGS_DIR="${ARTIFACTS}/$(date +"%FT%H%M%S")" + mkdir -p "${LOGS_DIR}" + + # print all pod information + kubectl get pods -A -o json > "${LOGS_DIR}/pods.json" + + # print detailed pod information + kubectl describe pods --all-namespaces > "${LOGS_DIR}/pods-describe.txt" + + # print logs from the secrets-store-sync-controller + # + # assumes secrets-store-sync-controller is installed with helm into the `secrets-store-sync-controller-system` namespace which + # sets the `app` selector to `secrets-store-sync-controller`. + # + # Note: the yaml deployment would require `app=secrets-store-sync-controller` + kubectl logs -l app=secrets-store-sync-controller --tail -1 -c manager -n secrets-store-sync-controller-system > "${LOGS_DIR}/secrets-store-sync-controller.log" + kubectl logs -l app=secrets-store-sync-controller --tail -1 -c provider-e2e-installer -n secrets-store-sync-controller-system > "${LOGS_DIR}/e2e-provider.log" + + # print client and server version information + kubectl version > "${LOGS_DIR}/kubectl-version.txt" + + # print generic cluster information + kubectl cluster-info dump > "${LOGS_DIR}/cluster-info.txt" + + # collect metrics + local curl_pod_name + curl_pod_name="curl-$(openssl rand -hex 5)" + kubectl run "${curl_pod_name}" -n default --image=curlimages/curl:7.75.0 --labels="test=metrics_test" --overrides='{"spec": { "nodeSelector": {"kubernetes.io/os": "linux"}}}' -- tail -f /dev/null + kubectl wait --for=condition=Ready --timeout=60s -n default pod "${curl_pod_name}" + + for pod_ip in $(kubectl get pod -n secrets-store-sync-controller-system -l app=secrets-store-sync-controller -o jsonpath="{.items[*].status.podIP}") + do + kubectl exec -n default "${curl_pod_name}" -- curl -s http://"${pod_ip}":8085/metrics > "${LOGS_DIR}/${pod_ip}.metrics" + done + + kubectl delete pod -n default "${curl_pod_name}" +} + +compare_owner_count() { + secret="$1" + namespace="$2" + ownercount="$3" + + [[ "$(kubectl get secret "${secret}" -n "${namespace}" -o json | jq '.metadata.ownerReferences | length')" -eq $ownercount ]] +} + +wait_for_process() { + wait_time="$1" + sleep_time="$2" + cmd="$3" + while [ "$wait_time" -gt 0 ]; do + if eval "$cmd"; then + return 0 + else + sleep "$sleep_time" + wait_time=$((wait_time-sleep_time)) + fi + done + return 1 +}