From c8e157308ab31fe0ea4c4033f58938b1856c86b8 Mon Sep 17 00:00:00 2001 From: Konrad Ohms <40577406+konrad-ohms@users.noreply.github.com> Date: Mon, 7 Oct 2024 16:52:35 +0200 Subject: [PATCH] ci: Rewrite e2e test suite from bash to golang (#217) * ci: Implement first draft of new e2e test suite * ci: Ensure that e2e are not cachable * fix(ci): Adjust paths * test: Make cleanup more robust (remove finalizer on CR) * fix: Linting issue and run e2e without waiting for olm * test: Improve error handling * ci(test): Refactor e2e initialization * test: Extend initial install, reorder e2e execution * ci: Fixup script path * test: Use sdk to change scc instead of oc cli * fixup * test: Refactor install test * test: Check agent logs for successful connection * test: Wait for controller-manager before applying CR, Readme update * test: Use correct agent download key secret * test: Implement update install, introduce test api * test: Add test to check reconciliation of new operator * test: Refactor config and logging * test: Cleanup test API * test: Add initial install test, reuse logic if possible * test: Add multi-backend test to new e2e framework * test: Exec into pod to check configuration * test: Remove old e2e test bash script * test: Add explicit test for keysSecret and inline secret * chore: Fix linting issues * chore: Add support for building with podman * fix: Restore Dockerfile to work on non-amd64 hosts * chore: Add support for local podman build via buildkit If running on podman, the Dockerfile syntax is not accepted in all cases and the build might behave differently compared to Docker. Buildkit is the foundation of the docker build logic and can be used in isolation, also as part of a running container. Therefore, the Makefile now detects podman on docker on demand and installs the buildctl command line if necessary. The build is always executed via buildctl now, which should provide a consistent build behavior. * test: Align test suite with k8s multi-backend changes --- .gitignore | 1 + Dockerfile | 4 +- Makefile | 45 ++- README.md | 29 ++ ci/pipeline.yaml | 22 +- ci/pr-pipeline.yml | 24 +- ci/scripts/cluster-authentication.sh | 22 +- ci/scripts/end-to-end-test.sh | 339 ------------------ e2e/.env.template | 13 + e2e/agent_test_api.go | 514 +++++++++++++++++++++++++++ e2e/config.go | 121 +++++++ e2e/install_test.go | 95 +++++ e2e/main_test.go | 45 +++ e2e/multi_backend_test.go | 103 ++++++ go.mod | 10 +- go.sum | 10 + 16 files changed, 1019 insertions(+), 378 deletions(-) delete mode 100755 ci/scripts/end-to-end-test.sh create mode 100644 e2e/.env.template create mode 100644 e2e/agent_test_api.go create mode 100644 e2e/config.go create mode 100644 e2e/install_test.go create mode 100644 e2e/main_test.go create mode 100644 e2e/multi_backend_test.go diff --git a/.gitignore b/.gitignore index 977bf173..35028353 100644 --- a/.gitignore +++ b/.gitignore @@ -63,3 +63,4 @@ instana-agent-operator # CI backend.cfg +e2e/.env diff --git a/Dockerfile b/Dockerfile index cd331462..115626d1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # -# (c) Copyright IBM Corp. 2021 +# (c) Copyright IBM Corp. 2021, 2024 # (c) Copyright Instana Inc. # @@ -75,4 +75,4 @@ RUN chown -R ${USER_UID}:${USER_UID} .cache RUN chmod -R 777 .cache USER ${USER_UID}:${USER_UID} -ENTRYPOINT ["/manager"] +ENTRYPOINT ["/manager"] \ No newline at end of file diff --git a/Makefile b/Makefile index ac187920..a58c45d5 100644 --- a/Makefile +++ b/Makefile @@ -61,6 +61,12 @@ ifeq ($(uname), Darwin) get_ip_addr := ipconfig getifaddr en0 endif +# Detect if podman or docker is available locally +ifeq ($(shell command -v podman 2> /dev/null),) + CONTAINER_CMD = docker +else + CONTAINER_CMD = podman +endif all: build @@ -88,13 +94,16 @@ vet: ## Run go vet against code lint: golangci-lint ## Run the golang-ci linter $(GOLANGCI_LINT) run --timeout 5m -EXCLUDED_TEST_DIRS = mocks +EXCLUDED_TEST_DIRS = mocks e2e EXCLUDE_PATTERN = $(shell echo $(EXCLUDED_TEST_DIRS) | sed 's/ /|/g') PACKAGES = $(shell go list ./... | grep -vE "$(EXCLUDE_PATTERN)" | tr '\n' ' ') KUBEBUILDER_ASSETS=$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path) test: gen-mocks manifests generate fmt vet lint envtest ## Run tests but ignore specific directories that match EXCLUDED_TEST_DIRS KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test $(PACKAGES) -coverprofile=coverage.out +.PHONY: e2e +e2e: + go test -timeout=10m -count=1 -v github.com/instana/instana-agent-operator/e2e ##@ Build @@ -105,12 +114,13 @@ run: export DEBUG_MODE=true run: generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config (run the "install" target to install CRDs into the cluster) go run ./ -docker-build: test ## Build docker image with the manager. - docker build --build-arg VERSION=${VERSION} --build-arg GIT_COMMIT=${GIT_COMMIT} --build-arg DATE="$$(date)" -t ${IMG} . +docker-build: test container-build ## Build docker image with the manager. docker-push: ## Push the docker image with the manager. - docker push ${IMG} + ${CONTAINER_CMD} push ${IMG} +container-build: buildctl + $(BUILDCTL) --addr=${CONTAINER_CMD}-container://buildkitd build --frontend=dockerfile.v0 --local context=. --local dockerfile=. --output type=oci,name=${IMG} --opt build-arg:VERSION=0.0.1 --opt build-arg:GIT_COMMIT=${GIT_COMMIT} --opt build-arg:DATE="$$(date)" | $(CONTAINER_CMD) load ##@ Deployment @@ -167,6 +177,26 @@ endif operator-sdk: ## Download the Operator SDK binary locally if necessary. $(call curl-get-tool,$(OPERATOR_SDK),https://github.com/operator-framework/operator-sdk/releases/download/v1.16.0,operator-sdk_$${OS}_$${ARCH}) +BUILDCTL = $(shell pwd)/bin/buildctl +BUILDKITD_CONTAINER_NAME = buildkitd +# Test if buildctl is available in the GOPATH, if not, set to local and download if needed +buildctl: ## Download the buildctl cli locally if necessary. + @if [ "`podman ps -a -q -f name=$(BUILDKITD_CONTAINER_NAME)`" ]; then \ + if [ "`podman ps -aq -f status=exited -f name=$(BUILDKITD_CONTAINER_NAME)`" ]; then \ + echo "Starting buildkitd container $(BUILDKITD_CONTAINER_NAME)"; \ + $(CONTAINER_CMD) start $(BUILDKITD_CONTAINER_NAME) || true; \ + echo "Allowing 5 seconds to bootup"; \ + sleep 5; \ + else \ + echo "Buildkit daemon is already running, skip container creation"; \ + fi \ + else \ + echo "$(BUILDKITD_CONTAINER_NAME) container is not present, launching it now"; \ + $(CONTAINER_CMD) run -d --name buildkitd --privileged docker.io/moby/buildkit:v0.16.0; \ + echo "Allowing 5 seconds to bootup"; \ + sleep 5; \ + fi + $(call go-install-tool,$(BUILDCTL),github.com/moby/buildkit/cmd/buildctl@v0.16) # go-install-tool will 'go get' any package $2 and install it to $1. PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) @@ -220,8 +250,9 @@ bundle: operator-sdk manifests kustomize ## Create the OLM bundle $(OPERATOR_SDK) bundle validate ./bundle .PHONY: bundle-build -bundle-build: ## Build the bundle image for OLM. - docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . +bundle-build: buildctl ## Build the bundle image for OLM. + #docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + $(BUILDCTL) --addr=${CONTAINER_CMD}-container://buildkitd build --frontend gateway.v0 --opt source=docker/dockerfile --opt filename=./bundle.Dockerfile --local context=. --local dockerfile=. --output type=oci,name=${BUNDLE_IMG} | $(CONTAINER_CMD) load controller-yaml: manifests kustomize ## Output the YAML for deployment, so it can be packaged with the release. Use `make --silent` to suppress other output. cd config/manager && $(KUSTOMIZE) edit set image "instana/instana-agent-operator=$(IMG)" @@ -243,4 +274,4 @@ gen-mocks: get-mockgen mockgen --source ./pkg/k8s/object/builders/common/builder/builder.go --destination ./mocks/builder_mock.go --package mocks mockgen --source ./pkg/json_or_die/json.go --destination ./mocks/json_or_die_marshaler_mock.go --package mocks mockgen --source ./pkg/k8s/operator/status/agent_status_manager.go --destination ./mocks/agent_status_manager_mock.go --package mocks - mockgen --source ./pkg/k8s/operator/lifecycle/dependent_lifecycle_manager.go --destination ./mocks/dependent_lifecycle_manager_mock.go --package mocks + mockgen --source ./pkg/k8s/operator/lifecycle/dependent_lifecycle_manager.go --destination ./mocks/dependent_lifecycle_manager_mock.go --package mocks diff --git a/README.md b/README.md index 2432d9b8..9f14f409 100644 --- a/README.md +++ b/README.md @@ -131,3 +131,32 @@ Now you should have a successful running Operator. To remove the Operator again, run: * `kubectl delete -f config/samples/instana_v1_instanaagent_demo.yaml` * `make undeploy`. + +### Running tests + +Unit tests can be executed by running `make test` without adjustments of the local environment. + +For end-to-end testing, it is necessary to have a valid kubeconfig in the default location and to export variables before starting the test. +An example template file is available in [e2e/.env.template](./e2e/.env.template), copy it to `./e2e/.env` and adjust it accordingly. + +The test can be executed by sourcing the config `source ./e2e/.env` and running `make e2e` or by using the VSCode. + +For VSCode, ensure to have a valid `.vscode/settings.json` in your root folder. + +Example: + +```json +{ + "wcaForGP.enable": true, + "go.testEnvVars": { + "KUBEBUILDER_ASSETS": "~/.local/share/kubebuilder-envtest/k8s/1.30.0-linux-amd64", + "INSTANA_API_KEY": "xxx", + "ARTIFACTORY_USERNAME": "xxx", + "ARTIFACTORY_PASSWORD": "xxx", + "OPERATOR_IMAGE_NAME": "xxx", + "OPERATOR_IMAGE_TAG": "xxx" + }, + "wca.enable": false, + "go.testTimeout": "600s" +} +``` \ No newline at end of file diff --git a/ci/pipeline.yaml b/ci/pipeline.yaml index 668d6fa9..9883faae 100644 --- a/ci/pipeline.yaml +++ b/ci/pipeline.yaml @@ -524,13 +524,19 @@ jobs: INSTANA_ENDPOINT_HOST: ((instana-qa.endpoint_host)) INSTANA_ENDPOINT_PORT: 443 BUILD_BRANCH: main - INSTANA_API_KEY: ((qa-instana-api-token)) + INSTANA_API_KEY: ((qa-instana-agent-key)) ARTIFACTORY_USERNAME: ((delivery-instana-io-internal-project-artifact-read-writer-creds.username)) ARTIFACTORY_PASSWORD: ((delivery-instana-io-internal-project-artifact-read-writer-creds.password)) inputs: - name: pipeline-source run: - path: pipeline-source/ci/scripts/end-to-end-test.sh + path: bash + args: + - -ceu + - | + cd pipeline-source + bash ./ci/scripts/cluster-authentication.sh + make e2e on_success: put: gh-status inputs: [ pipeline-source ] @@ -603,13 +609,19 @@ jobs: INSTANA_API_URL: ((instana-qa.api_url)) INSTANA_API_TOKEN: ((instana-qa.api_token)) BUILD_BRANCH: main - INSTANA_API_KEY: ((qa-instana-api-token)) + INSTANA_API_KEY: ((qa-instana-agent-key)) ARTIFACTORY_USERNAME: ((delivery-instana-io-internal-project-artifact-read-writer-creds.username)) ARTIFACTORY_PASSWORD: ((delivery-instana-io-internal-project-artifact-read-writer-creds.password)) inputs: - name: pipeline-source run: - path: pipeline-source/ci/scripts/end-to-end-test.sh + path: bash + args: + - -ceu + - | + cd pipeline-source + bash ./ci/scripts/cluster-authentication.sh + make e2e on_success: put: gh-status inputs: [ pipeline-source ] @@ -684,7 +696,7 @@ jobs: # INSTANA_API_URL: ((instana-qa.api_url)) # INSTANA_API_TOKEN: ((instana-qa.api_token)) # BUILD_BRANCH: main - # INSTANA_API_KEY: ((qa-instana-api-token)) + # INSTANA_API_KEY: ((qa-instana-agent-key)) # ARTIFACTORY_USERNAME: ((delivery-instana-io-internal-project-artifact-read-writer-creds.username)) # ARTIFACTORY_PASSWORD: ((delivery-instana-io-internal-project-artifact-read-writer-creds.password)) # inputs: diff --git a/ci/pr-pipeline.yml b/ci/pr-pipeline.yml index c325bbac..1a470e7a 100644 --- a/ci/pr-pipeline.yml +++ b/ci/pr-pipeline.yml @@ -565,7 +565,7 @@ jobs: plan: - get: pipeline-source trigger: true - passed: [operator-olm-build, docker-build] + passed: [docker-build] - load_var: git-commit file: pipeline-source/.git/short_ref reveal: true @@ -600,13 +600,19 @@ jobs: INSTANA_ENDPOINT_HOST: ((instana-qa.endpoint_host)) INSTANA_ENDPOINT_PORT: 443 BUILD_BRANCH: ((branch)) - INSTANA_API_KEY: ((qa-instana-api-token)) + INSTANA_API_KEY: ((qa-instana-agent-key)) ARTIFACTORY_USERNAME: ((delivery-instana-io-internal-project-artifact-read-writer-creds.username)) ARTIFACTORY_PASSWORD: ((delivery-instana-io-internal-project-artifact-read-writer-creds.password)) inputs: - name: pipeline-source run: - path: pipeline-source/ci/scripts/end-to-end-test.sh + path: bash + args: + - -ceu + - | + cd pipeline-source + bash ./ci/scripts/cluster-authentication.sh + make e2e on_success: put: gh-status inputs: [pipeline-source] @@ -683,13 +689,19 @@ jobs: INSTANA_API_URL: ((instana-qa.api_url)) INSTANA_API_TOKEN: ((instana-qa.api_token)) BUILD_BRANCH: ((branch)) - INSTANA_API_KEY: ((qa-instana-api-token)) + INSTANA_API_KEY: ((qa-instana-agent-key)) ARTIFACTORY_USERNAME: ((delivery-instana-io-internal-project-artifact-read-writer-creds.username)) ARTIFACTORY_PASSWORD: ((delivery-instana-io-internal-project-artifact-read-writer-creds.password)) inputs: - name: pipeline-source run: - path: pipeline-source/ci/scripts/end-to-end-test.sh + path: bash + args: + - -ceu + - | + cd pipeline-source + bash ./ci/scripts/cluster-authentication.sh + make e2e on_success: put: gh-status inputs: [pipeline-source] @@ -768,7 +780,7 @@ jobs: # INSTANA_API_URL: ((instana-qa.api_url)) # INSTANA_API_TOKEN: ((instana-qa.api_token)) # BUILD_BRANCH: ((branch)) - # INSTANA_API_KEY: ((qa-instana-api-token)) + # INSTANA_API_KEY: ((qa-instana-agent-key)) # ARTIFACTORY_USERNAME: ((delivery-instana-io-internal-project-artifact-read-writer-creds.username)) # ARTIFACTORY_PASSWORD: ((delivery-instana-io-internal-project-artifact-read-writer-creds.password)) # inputs: diff --git a/ci/scripts/cluster-authentication.sh b/ci/scripts/cluster-authentication.sh index 9ad52110..116fb006 100644 --- a/ci/scripts/cluster-authentication.sh +++ b/ci/scripts/cluster-authentication.sh @@ -8,18 +8,6 @@ set -e set -o pipefail -function cleanup_namespace() { - echo "Deleting the namespaces" - if kubectl get namespace/instana-agent ; then - kubectl delete namespace/instana-agent - kubectl wait --for=delete namespace/instana-agent --timeout=30s - echo "Deleted namespace instana-agent" - else - echo "Namespace instana-agent does not exist; skipping delete" - fi - echo "OK" -} - case "${CLUSTER_TYPE}" in gke) echo 'Testing on a GKE cluster' @@ -42,7 +30,6 @@ case "${CLUSTER_TYPE}" in export USE_GKE_GCLOUD_AUTH_PLUGIN=True gcloud container clusters get-credentials "${GKE_CLUSTER_NAME}" --zone "${GKE_ZONE}" --project "${GKE_PROJECT}" - cleanup_namespace ;; openshift) echo "${KUBECONFIG_SOURCE}" > kubeconfig @@ -50,11 +37,10 @@ case "${CLUSTER_TYPE}" in KUBECONFIG="$(pwd)/kubeconfig" export KUBECONFIG - cleanup_namespace - - echo 'Download OpenShift CLI to modify rights for default ServiceAccount, so it has priviliged access' - curl -L --fail --show-error --silent https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz -o /tmp/oc.tar.gz - tar -xzvf /tmp/oc.tar.gz --overwrite --directory /tmp + # go test does not require oc cli + # echo 'Download OpenShift CLI to modify rights for default ServiceAccount, so it has priviliged access' + # curl -L --fail --show-error --silent https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz -o /tmp/oc.tar.gz + # tar -xzvf /tmp/oc.tar.gz --overwrite --directory /tmp ;; *) diff --git a/ci/scripts/end-to-end-test.sh b/ci/scripts/end-to-end-test.sh deleted file mode 100755 index 3d5f0eb0..00000000 --- a/ci/scripts/end-to-end-test.sh +++ /dev/null @@ -1,339 +0,0 @@ -#!/bin/bash - -# -# (c) Copyright IBM Corp. 2024 -# (c) Copyright Instana Inc. -# - -set -e -set -o pipefail - -POD_WAIT_TIME_OUT=120 # s Pod-check max waiting time -POD_WAIT_INTERVAL=5 # s Pod-check interval time -OPERATOR_LOG_LINE='Agent installed/upgraded successfully' -OPERATOR_LOG_LINE_NEW='successfully finished reconcile on agent CR' -NAMESPACE="instana-agent" - -# Wait for a pod to be running -# It uses the global variables: -# POD_WAIT_TIME_OUT, POD_WAIT_INTERVAL -# Takes label as a first arg and a second arg is deployment -function wait_for_running_pod() { - echo "=== wait_for_running_pod ===" - timeout=0 - status=0 - label=${1} - deployment=${2} - pods_are_running=false - - echo "Showing running pods" - kubectl get pods -n "${NAMESPACE}" - status=$(kubectl get pod -n "${NAMESPACE}" -l="${label}" -o go-template='{{ range .items }}{{ println .status.phase }}{{ end }}' | uniq) - echo "The status of pods from deployment ${deployment} in namespace ${NAMESPACE} is: \"$status\"" - while [[ "${timeout}" -le "${POD_WAIT_TIME_OUT}" ]]; do - if [[ "${#status[@]}" -eq "1" && "${status[0]}" == "Running" ]]; then - echo "The status of pods from deployment ${deployment} in namespace ${NAMESPACE} is: \"$status\". Ending waiting - loop here." - pods_are_running=true - break - fi - ((timeout+=POD_WAIT_INTERVAL)) - sleep $POD_WAIT_INTERVAL - echo "Showing running pods" - kubectl get pods -n "${NAMESPACE}" - status=$(kubectl get pod -n "${NAMESPACE}" -o go-template='{{ range .items }}{{ println .status.phase }}{{ end }}'| uniq) - echo "DEBUG, the status of pods from deployment ${deployment} in namespace ${NAMESPACE} is: \"$status\"" - done - if [[ "${pods_are_running}" == "false" ]]; then - echo "${NAMESPACE} failed to initialize. Exceeded timeout of - ${POD_WAIT_TIME_OUT} s. Exit here" - echo "Showing running pods" - kubectl get pods -n "${NAMESPACE}" - exit 1 - fi - return 0; -} - -# Checks if one of the controller-manager pods logged successful installation -function wait_for_successfull_agent_installation() { - echo "=== wait_for_successfull_agent_installation ===" - local timeout=0 - local label="app.kubernetes.io/name=instana-agent-operator" - local agent_found=false - - #Workaround as grep will return -1 if the line is not found. - #With pipefail enabled, this would fail the script if the if statement omitted. - if ! crd_installed_successfully=$(kubectl logs -l=${label} -n ${NAMESPACE} --tail=-1 | grep "${OPERATOR_LOG_LINE}"); then - # Try to fetch the new log line if the old one is not there - if ! crd_installed_successfully=$(kubectl logs -l=${label} -n ${NAMESPACE} --tail=-1 | grep "${OPERATOR_LOG_LINE_NEW}"); then - crd_installed_successfully="" - fi - fi - while [[ "${timeout}" -le "${POD_WAIT_TIME_OUT}" ]]; do - if [[ -n "${crd_installed_successfully}" ]]; then - echo "The agent has been installed/upgraded successfully. Ending waiting loop here." - agent_found=true - break - fi - ((timeout+=POD_WAIT_INTERVAL)) - sleep $POD_WAIT_INTERVAL - #Workaround as grep will return -1 if the line is not found. - #With pipefail enabled, this would fail the script if the if statement omitted. - if ! crd_installed_successfully=$(kubectl logs -l=${label} -n ${NAMESPACE} --tail=-1 | grep "${OPERATOR_LOG_LINE}"); then - # Try to fetch the new log line if the old one is not there - if ! crd_installed_successfully=$(kubectl logs -l=${label} -n ${NAMESPACE} --tail=-1 | grep "${OPERATOR_LOG_LINE_NEW}"); then - crd_installed_successfully="" - fi - fi - done - if [[ "${agent_found}" == "false" ]]; then - echo "Agent failed to be installed/upgraded successfully. Exceeded timeout of ${POD_WAIT_TIME_OUT} s. Exit here" - exit 1 - fi - - return 0; -} - -function ensure_new_operator_deployment() { - echo "=== ensure_new_operator_deployment ===" - local timeout=0 - - echo "Scaling controller-manager deployment down to zero" - kubectl scale -n ${NAMESPACE} --replicas=0 deployment/controller-manager - set +e - local operator_present=true - while [[ "${timeout}" -le "${POD_WAIT_TIME_OUT}" ]]; do - echo "Showing pods" - kubectl get -n ${NAMESPACE} pods - controller_manager_gone=$(kubectl get -n ${NAMESPACE} pods | grep controller-manager) - if [ "$controller_manager_gone" == "" ]; then - echo "Operator pods are gone" - operator_present=false - break - else - echo "Operator pods are still present" - fi - ((timeout+=POD_WAIT_INTERVAL)) - sleep $POD_WAIT_INTERVAL - done - - echo "=== Operator logs start ===" - kubectl logs -n ${NAMESPACE} -l "app.kubernetes.io/name=instana-agent-operator" - echo "=== Operator logs end ===" - echo - - if [[ "${operator_present}" == "true" ]]; then - echo "Failed to scale operator to 0 instance. Exceeded timeout of ${POD_WAIT_TIME_OUT} s. Exit here" - echo "Showing running pods" - kubectl get pods -n "${NAMESPACE}" - exit 1 - fi - - set -e - - echo "Scaling operator deployment to 1 instance" - kubectl scale -n ${NAMESPACE} --replicas=1 deployment/controller-manager - - set +e - timeout=0 - operator_present=false - while [[ "${timeout}" -le "${POD_WAIT_TIME_OUT}" ]]; do - echo "Showing pods" - kubectl get -n ${NAMESPACE} pods - controller_manager_present=$(kubectl get -n ${NAMESPACE} pods | grep "controller-manager" | grep "Running" | grep "1/1") - if [ "$controller_manager_present" == "" ]; then - echo "Operator pod is not running yet" - else - echo "Operator pod is running now" - operator_present=true - break - fi - ((timeout+=POD_WAIT_INTERVAL)) - sleep $POD_WAIT_INTERVAL - done - set -e - - echo "=== Operator logs start ===" - kubectl logs -n ${NAMESPACE} -l "app.kubernetes.io/name=instana-agent-operator" - echo "=== Operator logs end ===" - echo - - if [[ "${operator_present}" == "false" ]]; then - echo "Failed to scale operator to 1 instance. Exceeded timeout of ${POD_WAIT_TIME_OUT} s. Exit here" - echo "Showing running pods" - kubectl get pods -n "${NAMESPACE}" - exit 1 - fi -} - -function wait_for_running_cr_state() { - echo "=== wait_for_running_cr_state ===" - local timeout=0 - local cr_status="Failed" - - while [[ "${timeout}" -le "${POD_WAIT_TIME_OUT}" ]]; do - cr_status=$(kubectl -n ${NAMESPACE} get agent instana-agent -o yaml | yq .status.status) - echo "CR state: ${cr_status}" - if [[ "${cr_status}" == "Running" ]]; then - echo "The custom resource reflects the Running state correctly. Ending waiting loop here." - break - fi - ((timeout+=POD_WAIT_INTERVAL)) - sleep $POD_WAIT_INTERVAL - done - - if [[ "${cr_status}" != "Running" ]]; then - echo "The custom resource did not reflect the Running state correctly." - echo "Displaying state found on the CR" - kubectl -n ${NAMESPACE} get agent instana-agent -o yaml | yq .status - exit 1 - fi -} - -function install_cr() { - echo "=== install_cr ===" - # install the Custom Resource - echo "Contruct CR with the agent key, zone, port, and the host" - path_to_crd="config/samples/instana_v1_instanaagent.yaml" - yq eval -i '.spec.zone.name = env(NAME)' ${path_to_crd} - yq eval -i '.spec.cluster.name = env(NAME)' ${path_to_crd} - yq eval -i '.spec.agent.key = env(INSTANA_API_KEY)' ${path_to_crd} - yq eval -i '.spec.agent.endpointPort = strenv(INSTANA_ENDPOINT_PORT)' ${path_to_crd} - yq eval -i '.spec.agent.endpointHost = env(INSTANA_ENDPOINT_HOST)' ${path_to_crd} - - echo "Install the CR" - kubectl apply -f ${path_to_crd} -} - -function install_cr_multi_backend() { - echo "=== install_cr_multi_backend ===" - - # install the Custom Resource - path_to_crd="config/samples/instana_v1_instanaagent_multiple_backends.yaml" - - echo "Install the multi-backend CR" - # credentials are invalid here, but that's okay, we just test the operator behavior, not the agent - kubectl apply -f ${path_to_crd} -} - -function install_cr_multi_backend_external_keyssecret() { - echo "=== install_cr_multi_backend_external_keyssecret ===" - - # install the Custom Resource - path_to_crd="config/samples/instana_v1_instanaagent_multiple_backends_external_keyssecret.yaml" - path_to_keyssecret="config/samples/external_secret_instana_agent_key.yaml" - - echo "Install the keysSecret and CR" - # credentials are invalid here, but that's okay, we just test the operator behavior, not the agent - kubectl apply -f ${path_to_keyssecret} - kubectl apply -f ${path_to_crd} -} - -function verify_multi_backend_config_generation_and_injection() { - echo "=== function verify_multi_backend_config_generation_and_injection ===" - local timeout=0 - - echo "Checking if instana-agent-config secret is present with 2 backends" - kubectl get secret -n ${NAMESPACE} instana-agent-config -o yaml - kubectl get secret -n ${NAMESPACE} instana-agent-config -o yaml | yq '.data["com.instana.agent.main.sender.Backend-1.cfg"]' | base64 -d > backend.cfg - echo "Validate backend config structure for backend 1" - grep "host=first-backend.instana.io" backend.cfg - grep "port=443" backend.cfg - grep "protocol=HTTP/2" backend.cfg - # check for key, safe to log as just a dummy value - grep "key=xxx" backend.cfg - - kubectl get secret -n ${NAMESPACE} instana-agent-config -o yaml | yq '.data["com.instana.agent.main.sender.Backend-2.cfg"]' | base64 -d > backend.cfg - echo "Validate backend config structure for backend 2" - grep "host=second-backend.instana.io" backend.cfg - grep "port=443" backend.cfg - grep "protocol=HTTP/2" backend.cfg - # check for key, safe to log as just a dummy value - grep "key=yyy" backend.cfg - - echo "Validate that backend config files are available inside the agent pod" - echo "Getting pod name for exec" - pod_name=$(kubectl get pods -n ${NAMESPACE} -l app.kubernetes.io/component=instana-agent -o yaml | yq ".items[0].metadata.name") - - exec_successful=false - while [[ "${timeout}" -le "${POD_WAIT_TIME_OUT}" ]]; do - set +e - echo "Exec into pod ${pod_name} and see if etc/instana/com.instana.agent.main.sender.Backend-2.cfg is present" - - if kubectl exec -n ${NAMESPACE} "${pod_name}" -- cat /opt/instana/agent/etc/instana/com.instana.agent.main.sender.Backend-2.cfg; then - echo "Could cat file" - exec_successful=true - break - fi - set -e - ((timeout+=POD_WAIT_INTERVAL)) - sleep $POD_WAIT_INTERVAL - echo "Getting pod name for exec" - pod_name=$(kubectl get pods -n ${NAMESPACE} -l app.kubernetes.io/component=instana-agent -o yaml | yq ".items[0].metadata.name") - done - - if [[ "${exec_successful}" == "false" ]]; then - echo "Failed to cat file, check if the symlink logic in the entrypoint script of the agent container image is correct" - echo "Showing running pods" - kubectl get pods -n "${NAMESPACE}" - exit 1 - fi - - echo "Check if the right backend was mounted in Backend-1.cfg" - echo "Exec into pod ${pod_name} and see if etc/instana/com.instana.agent.main.sender.Backend-1.cfg is present" - kubectl exec -n ${NAMESPACE} "${pod_name}" -- cat /opt/instana/agent/etc/instana/com.instana.agent.main.sender.Backend-1.cfg | grep "host=first-backend.instana.io" - - echo "Check if the right backend was mounted in Backend-2.cfg" - kubectl exec -n ${NAMESPACE} "${pod_name}" -- cat /opt/instana/agent/etc/instana/com.instana.agent.main.sender.Backend-2.cfg | grep "host=second-backend.instana.io" - kubectl -n ${NAMESPACE} get agent instana-agent -o yaml -} - -source pipeline-source/ci/scripts/cluster-authentication.sh - -echo "Deploying the public operator" -wget https://github.com/instana/instana-agent-operator/releases/latest/download/instana-agent-operator.yaml -kubectl apply -f instana-agent-operator.yaml -echo "Verify that the controller manager pods are running" -wait_for_running_pod app.kubernetes.io/name=instana-agent-operator controller-manager - -pushd pipeline-source - install_cr - echo "Verify that the agent pods are running" - wait_for_running_pod app.kubernetes.io/name=instana-agent instana-agent - wait_for_successfull_agent_installation - - # upgrade the operator - echo "Deploying the operator from feature branch" - IMG="delivery.instana.io/int-docker-agent-local/instana-agent-operator/dev-build:${GIT_COMMIT}" - export IMG - echo "Create secret for $IMG" - kubectl create secret -n instana-agent docker-registry delivery.instana \ - --docker-server=delivery.instana.io \ - --docker-username="$ARTIFACTORY_USERNAME" \ - --docker-password="$ARTIFACTORY_PASSWORD" - - make install deploy - - echo "Add imagePullSecrets to the controller-manager deployment" - kubectl patch deployment controller-manager -n instana-agent -p '"spec": { "template" : {"spec": { "imagePullSecrets": [{"name": "delivery.instana"}]}}}' - ensure_new_operator_deployment - wait_for_running_pod app.kubernetes.io/name=instana-agent-operator controller-manager - - echo "Verify that the agent pods are running" - wait_for_running_pod app.kubernetes.io/name=instana-agent instana-agent - wait_for_successfull_agent_installation - wait_for_running_cr_state - echo "Upgrade has been successful" - - echo "Install CR to connect to an additional backend for both k8sensor and instana-agent" - install_cr_multi_backend - wait_for_running_pod app.kubernetes.io/name=instana-agent instana-agent - verify_multi_backend_config_generation_and_injection - - echo "Install CR to connect to an additional backend with external keysSecret" - install_cr_multi_backend_external_keyssecret - echo "Install CR to connect to an additional backend for both k8sensor and instana-agent" - install_cr_multi_backend - wait_for_running_pod app.kubernetes.io/name=instana-agent instana-agent - verify_multi_backend_config_generation_and_injection -popd diff --git a/e2e/.env.template b/e2e/.env.template new file mode 100644 index 00000000..f641f8b3 --- /dev/null +++ b/e2e/.env.template @@ -0,0 +1,13 @@ +##################################### +# e2e test config +##################################### + +# required +export INSTANA_API_KEY= +export ARTIFACTORY_USERNAME= +export ARTIFACTORY_PASSWORD= + +# optional +export ARTIFACTORY_HOST= +export OPERATOR_IMAGE_NAME= +export OPERATOR_IMAGE_TAG= diff --git a/e2e/agent_test_api.go b/e2e/agent_test_api.go new file mode 100644 index 00000000..e177680e --- /dev/null +++ b/e2e/agent_test_api.go @@ -0,0 +1,514 @@ +/* + * (c) Copyright IBM Corp. 2024 + * (c) Copyright Instana Inc. 2024 + */ + +package e2e + +import ( + "bytes" + "context" + "fmt" + "io" + "strconv" + "strings" + "testing" + "time" + + securityv1 "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1" + log "k8s.io/klog/v2" + + v1 "github.com/instana/instana-agent-operator/api/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + e2etypes "sigs.k8s.io/e2e-framework/pkg/types" + "sigs.k8s.io/e2e-framework/support/utils" +) + +// This file exposes the reusable assets which are used during the e2e test + +// env.Funcs to be used in the test initialization + +// DeleteAgentNamespace ensures a proper cleanup of existing instana agent installations. +// The namespace cannot be just deleted in all scenarios, as finalizers on the agent CR might block the namespace termination +func EnsureAgentNamespaceDeletion() env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + log.Infof("Ensure namespace %s is not present", cfg.Namespace()) + // Create a client to interact with the Kube API + r, err := resources.New(cfg.Client().RESTConfig()) + if err != nil { + return ctx, fmt.Errorf("failed to initialize client: %v", err) + } + + // Check if namespace exist, otherwise just skip over it + agentNamespace := &corev1.Namespace{} + err = r.Get(ctx, InstanaNamespace, InstanaNamespace, agentNamespace) + if errors.IsNotFound(err) { + log.Infof("Namespace %s was not found, skipping deletion", cfg.Namespace()) + return ctx, nil + } + // Something on the API request failed, this should fail the cleanup + if err != nil { + return ctx, fmt.Errorf("failed to get namespace: %v", err) + } + + // Cleanup a potentially existing Agent CR first + if _, err = DeleteAgentCRIfPresent()(ctx, cfg); err != nil { + return ctx, err + } + + // Delete the Namespace + log.Info("Deleting namespace and waiting for successful termination") + if err = r.Delete(ctx, agentNamespace); err != nil { + return ctx, fmt.Errorf("namespace deletion failed: %v", err) + } + + // Wait for the termination of the namespace + namespaceList := &corev1.NamespaceList{ + Items: []corev1.Namespace{ + *agentNamespace, + }, + } + + err = wait.For(conditions.New(r).ResourcesDeleted(namespaceList)) + if err != nil { + return ctx, fmt.Errorf("error while waiting for namespace deletion: %v", err) + } + log.Infof("Namespace %s is gone", cfg.Namespace()) + return ctx, nil + } +} + +func DeleteAgentCRIfPresent() env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + log.Info("Ensure agent CR is not present") + // Create a client to interact with the Kube API + r, err := resources.New(cfg.Client().RESTConfig()) + if err != nil { + return ctx, fmt.Errorf("cleanup: Error initializing client to delete agent CR: %v", err) + } + + // Assume an existing namespace at this point, check if an agent CR is present (requires to adjust schema of current client) + r.WithNamespace(InstanaNamespace) + err = v1.AddToScheme(r.GetScheme()) + if err != nil { + // If this fails, the cleanup will not work properly -> failing + return ctx, fmt.Errorf("cleanup: Error could not add agent types to current scheme: %v", err) + } + + // If the agent cr is available, but the operator is already gone, the finalizer will never be removed + // This will lead to a delayed namespace termination which never completes. To avoid that, patch the agent CR + // to remove the finalizer. Afterwards, it can be successfully deleted. + agent := &v1.InstanaAgent{} + err = r.Get(ctx, AgentCustomResourceName, InstanaNamespace, agent) + if errors.IsNotFound(err) { + // No agent cr found, skip this cleanup step + log.Info("No agent CR present, skipping deletion") + return ctx, nil + } + + // The agent CR could not be fetched due to a different reason, failing + if err != nil { + return ctx, fmt.Errorf("cleanup: Fetch agent CR failed: %v", err) + } + + // Removing the finalizer from the existing Agent CR to make it deletable + // kubectl patch agent instana-agent -p '{"metadata":{"finalizers":[]}}' --type=merge + log.Info("Patching agent cr to remove finalizers") + err = r.Patch(ctx, agent, k8s.Patch{ + PatchType: types.MergePatchType, + Data: []byte(`{"metadata":{"finalizers":[]}}`), + }) + if err != nil { + return ctx, fmt.Errorf("cleanup: Patch agent CR failed: %v", err) + } + + log.Info("Deleting CR") + // delete explicitly, namespace deletion would delete the agent CR as well if the finalizer is not present + err = r.Delete(ctx, agent) + + if err != nil { + // The deletion failed for some reason, failing the cleanup + return ctx, fmt.Errorf("cleanup: Delete agent CR failed: %v", err) + } + + agentCrList := &v1.InstanaAgentList{ + Items: []v1.InstanaAgent{*agent}, + } + + // Ensure to wait for the agent CR to disappear before continuing + err = wait.For(conditions.New(r).ResourcesDeleted(agentCrList)) + if err != nil { + return ctx, fmt.Errorf("cleanup: Waiting for agent CR deletion failed: %v", err) + } + log.Info("Agent CR is gone") + return ctx, nil + } +} + +// On OpenShift we need to ensure the instana-agent service account gets permission to the privilged security context +// This action is only necessary once per OCP cluster as it is not tight to a namespace, but to a cluster +func AdjustOcpPermissionsIfNecessary() env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + // Create a client to interact with the Kube API + clientSet, err := kubernetes.NewForConfig(cfg.Client().RESTConfig()) + if err != nil { + return ctx, fmt.Errorf("error creating a clientset: %v", err) + } + + discoveryClient := discovery.NewDiscoveryClient(clientSet.RESTClient()) + apiGroups, err := discoveryClient.ServerGroups() + if err != nil { + return ctx, fmt.Errorf("failed to fetch apiGroups: %v", err) + } + + isOpenShift := false + for _, group := range apiGroups.Groups { + if group.Name == "apps.openshift.io" { + isOpenShift = true + break + } + } + + if isOpenShift { + command := "oc adm policy add-scc-to-user privileged -z instana-agent -n instana-agent" + log.Infof("OpenShift detected, adding instana-agent service account to SecurityContextConstraints via api, command would be: %s\n", command) + + // replaced command execution with SDK call to not require `oc` cli + securityClient, err := securityv1.NewForConfig(cfg.Client().RESTConfig()) + if err != nil { + return ctx, fmt.Errorf("could not initialize securityClient: %v", err) + } + + // get security context constraints + scc, err := securityClient.SecurityContextConstraints().Get(ctx, "privileged", metav1.GetOptions{}) + if err != nil { + return ctx, fmt.Errorf("failed to get SecurityContextContraints: %v", err) + } + + // check if service account user is already listed in the scc + serviceAccountId := fmt.Sprintf("system:serviceaccount:%s:%s", InstanaNamespace, "instana-agent") + userFound := false + + for _, user := range scc.Users { + if user == serviceAccountId { + userFound = true + break + } + } + + if userFound { + log.Infof("Security Context Constraint \"privileged\" already lists service account user: %v\n", serviceAccountId) + return ctx, nil + } + + // updating Security Context Constraints to list instana service account + scc.Users = append(scc.Users, serviceAccountId) + + _, err = securityClient.SecurityContextConstraints().Update(ctx, scc, metav1.UpdateOptions{}) + if err != nil { + return ctx, fmt.Errorf("could not update Security Context Constraints on OCP cluster: %v", err) + } + + return ctx, nil + } else { + // non-ocp environments do not require changes in the Security Context Constraints + log.Info("Cluster is not an OpenShift cluster, no need to adjust the security context constraints") + } + return ctx, nil + } +} + +// Setup functions +func SetupOperatorDevBuild() e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + // Create pull secret for custom registry + t.Logf("Creating custom pull secret for %s", InstanaTestCfg.ContainerRegistry.Host) + p := utils.RunCommand( + fmt.Sprintf("kubectl create secret -n %s docker-registry %s --docker-server=%s --docker-username=%s --docker-password=%s", + cfg.Namespace(), + InstanaTestCfg.ContainerRegistry.Name, + InstanaTestCfg.ContainerRegistry.Host, + InstanaTestCfg.ContainerRegistry.User, + InstanaTestCfg.ContainerRegistry.Password), + ) + if p.Err() != nil { + t.Fatal("Error while creating pull secret", p.Err(), p.Out(), p.ExitCode()) + } + t.Log("Pull secret created") + + // Use make logic to ensure that local dev commands and test commands are in sync + cmd := fmt.Sprintf("bash -c 'cd .. && IMG=%s:%s make install deploy'", InstanaTestCfg.OperatorImage.Name, InstanaTestCfg.OperatorImage.Tag) + t.Logf("Deploy new dev build by running: %s", cmd) + p = utils.RunCommand(cmd) + if p.Err() != nil { + t.Fatal("Error while deploying custom operator build during update installation", p.Command(), p.Err(), p.Out(), p.ExitCode()) + } + t.Log("Deployment submitted") + + // Inject image pull secret into deployment, ensure to scale to 0 replicas and back to 2 replicas, otherwise pull secrets are not propagated correctly + t.Log("Patch instana operator deployment to redeploy pods with image pull secret") + r, err := resources.New(cfg.Client().RESTConfig()) + if err != nil { + t.Fatal("Cleanup: Error initializing client", err) + } + r.WithNamespace(cfg.Namespace()) + agent := &appsv1.Deployment{} + err = r.Get(ctx, InstanaOperatorDeploymentName, cfg.Namespace(), agent) + if err != nil { + t.Fatal("Failed to get deployment-manager deployment", err) + } + err = r.Patch(ctx, agent, k8s.Patch{ + PatchType: types.MergePatchType, + Data: []byte(fmt.Sprintf(`{"spec":{ "replicas": 0, "template":{"spec": {"imagePullSecrets": [{"name": "%s"}]}}}}`, InstanaTestCfg.ContainerRegistry.Name)), + }) + if err != nil { + t.Fatal("Failed to patch deployment to include pull secret and 0 replicas", err) + } + + err = r.Patch(ctx, agent, k8s.Patch{ + PatchType: types.MergePatchType, + Data: []byte(`{"spec":{ "replicas": 2 }}`), + }) + if err != nil { + t.Fatal("Failed to patch deployment to include pull secret and 0 replicas", err) + } + t.Log("Patching completed") + return ctx + } +} + +func DeployAgentCr(agent *v1.InstanaAgent) e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + // Wait for controller-manager deployment to ensure that CRD is installed correctly before proceeding. + // Technically, it could be categorized as "Assess" method, but the setup process requires to wait in between. + // Therefore, keeping the wait logic in this section. + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + t.Log("Creating a new Agent CR") + + // Create Agent CR + r := client.Resources(cfg.Namespace()) + err = v1.AddToScheme(r.GetScheme()) + if err != nil { + t.Fatal("Could not add Agent CR to client scheme", err) + } + + err = r.Create(ctx, agent) + if err != nil { + t.Fatal("Could not create Agent CR", err) + } + + return ctx + } +} + +// Assess functions +func WaitForDeploymentToBecomeReady(name string) e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + t.Logf("Waiting for deployment %s to become ready", name) + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + dep := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: cfg.Namespace()}, + } + // wait for operator pods of the deployment to become ready + err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&dep, appsv1.DeploymentAvailable, corev1.ConditionTrue), wait.WithTimeout(time.Minute*2)) + if err != nil { + t.Fatal(err) + } + t.Logf("Deployment %s is ready", name) + return ctx + } +} + +func WaitForAgentDaemonSetToBecomeReady() e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + t.Logf("Waiting for DaemonSet %s is ready", AgentDaemonSetName) + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + ds := appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: AgentDaemonSetName, Namespace: cfg.Namespace()}, + } + err = wait.For(conditions.New(client.Resources()).DaemonSetReady(&ds), wait.WithTimeout(time.Minute*5)) + if err != nil { + t.Fatal(err) + } + t.Logf("DaemonSet %s is ready", AgentDaemonSetName) + return ctx + } +} + +func WaitForAgentSuccessfulBackendConnection() e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + t.Log("Searching for successful backend connection in agent logs") + clientSet, err := kubernetes.NewForConfig(cfg.Client().RESTConfig()) + if err != nil { + t.Fatal(err) + } + podList, err := clientSet.CoreV1().Pods(cfg.Namespace()).List(ctx, metav1.ListOptions{LabelSelector: "app.kubernetes.io/component=instana-agent"}) + if err != nil { + t.Fatal(err) + } + if len(podList.Items) == 0 { + t.Fatal("No pods found") + } + + connectionSuccessful := false + var buf *bytes.Buffer + for i := 0; i < 9; i++ { + t.Log("Sleeping 10 seconds") + time.Sleep(10 * time.Second) + t.Log("Fetching logs") + logReq := clientSet.CoreV1().Pods(cfg.Namespace()).GetLogs(podList.Items[0].Name, &corev1.PodLogOptions{}) + podLogs, err := logReq.Stream(ctx) + if err != nil { + t.Fatal("Could not stream logs", err) + } + defer podLogs.Close() + + buf = new(bytes.Buffer) + _, err = io.Copy(buf, podLogs) + + if err != nil { + t.Fatal(err) + } + if strings.Contains(buf.String(), "Connected using HTTP/2 to") { + t.Log("Connection established correctly") + connectionSuccessful = true + break + } else { + t.Log("Could not find working connection in log of the first pod yet") + } + } + if !connectionSuccessful { + t.Fatal("Agent pod did not log successful connection, dumping log", buf.String()) + } + return ctx + } +} + +func ValidateAgentMultiBackendConfiguration() e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + log.Infof("Fetching secret %s", InstanaAgentConfigSecretName) + // Create a client to interact with the Kube API + r, err := resources.New(cfg.Client().RESTConfig()) + if err != nil { + t.Fatal(err) + } + + // Check if namespace exist, otherwise just skip over it + instanaAgentConfigSecret := &corev1.Secret{} + err = r.Get(ctx, InstanaAgentConfigSecretName, InstanaNamespace, instanaAgentConfigSecret) + if err != nil { + t.Fatal("Secret could not be fetched", InstanaAgentConfigSecretName, err) + } + + firstBackendConfigString := string(instanaAgentConfigSecret.Data["com.instana.agent.main.sender.Backend-1.cfg"]) + expectedFirstBackendConfigString := "host=first-backend.instana.io\nport=443\nprotocol=HTTP/2\nkey=xxx\n" + secondBackendConfigString := string(instanaAgentConfigSecret.Data["com.instana.agent.main.sender.Backend-2.cfg"]) + expectedSecondBackendConfigString := "host=second-backend.instana.io\nport=443\nprotocol=HTTP/2\nkey=yyy\n" + + if firstBackendConfigString != expectedFirstBackendConfigString { + t.Error("First backend does not match the expected string", firstBackendConfigString, expectedFirstBackendConfigString) + } else { + t.Log("First backend config confirmed") + } + if secondBackendConfigString != expectedSecondBackendConfigString { + t.Error("Second backend does not match the expected string", secondBackendConfigString, expectedSecondBackendConfigString) + } else { + t.Log("Second backend config confirmed") + } + + pods := &corev1.PodList{} + listOps := resources.WithLabelSelector("app.kubernetes.io/component=instana-agent") + err = r.List(ctx, pods, listOps) + if err != nil || pods.Items == nil { + t.Error("error while getting pods", err) + } + var stdout, stderr bytes.Buffer + podName := pods.Items[0].Name + containerName := "instana-agent" + + backendCheckMatrix := []struct { + fileSuffix string + expectedBackendString string + }{ + { + fileSuffix: "1", + expectedBackendString: "first-backend.instana.io", + }, + { + fileSuffix: "2", + expectedBackendString: "second-backend.instana.io", + }, + } + + for _, currentBackend := range backendCheckMatrix { + if err := r.ExecInPod( + ctx, + cfg.Namespace(), + podName, + containerName, + []string{"cat", fmt.Sprintf("/opt/instana/agent/etc/instana/com.instana.agent.main.sender.Backend-%s.cfg", currentBackend.fileSuffix)}, + &stdout, + &stderr, + ); err != nil { + t.Log(stderr.String()) + t.Error(err) + } + if strings.Contains(stdout.String(), currentBackend.expectedBackendString) { + t.Logf("ExecInPod returned expected backend config for file /opt/instana/agent/etc/instana/com.instana.agent.main.sender.Backend-%s.cfg", currentBackend.fileSuffix) + } else { + t.Error(fmt.Sprintf("Expected to find %s in file /opt/instana/agent/etc/instana/com.instana.agent.main.sender.Backend-%s.cfg", currentBackend.expectedBackendString, currentBackend.fileSuffix), stdout.String()) + } + } + + return ctx + } +} + +// Helper to produce test structs +func NewAgentCr(t *testing.T) v1.InstanaAgent { + boolTrue := true + + return v1.InstanaAgent{ + ObjectMeta: metav1.ObjectMeta{ + Name: "instana-agent", + Namespace: InstanaNamespace, + }, + Spec: v1.InstanaAgentSpec{ + Zone: v1.Name{ + Name: "e2e", + }, + // ensure to not overlap between concurrent test runs on different clusters, randomize cluster name, but have consistent zone + Cluster: v1.Name{Name: envconf.RandomName("e2e", 4)}, + Agent: v1.BaseAgentSpec{ + Key: InstanaTestCfg.InstanaBackend.AgentKey, + EndpointHost: InstanaTestCfg.InstanaBackend.EndpointHost, + EndpointPort: strconv.Itoa(InstanaTestCfg.InstanaBackend.EndpointPort), + }, + OpenTelemetry: v1.OpenTelemetry{ + GRPC: &v1.Enabled{Enabled: &boolTrue}, + HTTP: &v1.Enabled{Enabled: &boolTrue}, + }, + }, + } +} diff --git a/e2e/config.go b/e2e/config.go new file mode 100644 index 00000000..82c9c555 --- /dev/null +++ b/e2e/config.go @@ -0,0 +1,121 @@ +/* + * (c) Copyright IBM Corp. 2024 + * (c) Copyright Instana Inc. 2024 + */ + +package e2e + +import ( + "os" + + log "k8s.io/klog/v2" + "sigs.k8s.io/e2e-framework/support/utils" +) + +type InstanaTestConfig struct { + ContainerRegistry *ContainerRegistry + InstanaBackend *InstanaBackend + OperatorImage *OperatorImage +} + +type ContainerRegistry struct { + Name string + User string + Host string + Password string +} + +type InstanaBackend struct { + EndpointHost string + EndpointPort int + AgentKey string +} + +type OperatorImage struct { + Name string + Tag string +} + +var InstanaTestCfg InstanaTestConfig + +const InstanaNamespace string = "instana-agent" +const InstanaOperatorDeploymentName string = "controller-manager" +const AgentDaemonSetName string = "instana-agent" +const AgentCustomResourceName string = "instana-agent" +const K8sensorDeploymentName string = "instana-agent-k8sensor" +const InstanaAgentConfigSecretName string = "instana-agent-config" + +func init() { + var instanaApiKey, containerRegistryUser, containerRegistryPassword, containerRegistryHost, endpointHost, operatorImageName, operatorImageTag string + var found, fatal bool + + instanaApiKey, found = os.LookupEnv("INSTANA_API_KEY") + if !found { + log.Errorln("Required: $INSTANA_API_KEY not defined") + fatal = true + } + containerRegistryUser, found = os.LookupEnv("ARTIFACTORY_USERNAME") + if !found { + log.Errorln("Required: $ARTIFACTORY_USERNAME not defined") + fatal = true + } + containerRegistryPassword, found = os.LookupEnv("ARTIFACTORY_PASSWORD") + if !found { + log.Errorln("Required: $ARTIFACTORY_PASSWORD not defined") + fatal = true + } + containerRegistryHost, found = os.LookupEnv("ARTIFACTORY_HOST") + if !found { + log.Warningln("Optional: $ARTIFACTORY_HOST not defined, using default") + containerRegistryHost = "delivery.instana.io" + } + endpointHost, found = os.LookupEnv("INSTANA_ENDPOINT_HOST") + if !found { + log.Warningln("Optional: $INSTANA_ENDPOINT_HOST not defined, using default") + endpointHost = "ingress-red-saas.instana.io" + } + operatorImageName, found = os.LookupEnv("OPERATOR_IMAGE_NAME") + if !found { + log.Warningln("Optional: $OPERATOR_IMAGE_NAME not defined, using default") + operatorImageName = "delivery.instana.io/int-docker-agent-local/instana-agent-operator/dev-build" + } + + operatorImageTag, found = os.LookupEnv("OPERATOR_IMAGE_TAG") + if !found { + log.Warningln("Optional: $OPERATOR_IMAGE_TAG not defined, falling back to $GIT_COMMIT") + operatorImageTag, found = os.LookupEnv("GIT_COMMIT") + if !found { + log.Warningln("Optional: $GIT_COMMIT is not defined, falling back to git cli to resolve last commit") + p := utils.RunCommand("git rev-parse HEAD") + if p.Err() != nil { + log.Warningf("Error while getting git commit via cli: %v, %v, %v, %v\n", p.Command(), p.Err(), p.Out(), p.ExitCode()) + log.Fatalln("Required: Either $OPERATOR_IMAGE_TAG or $GIT_COMMIT must be set to be able to deploy a custom operator build") + fatal = true + } + // using short commit as tag (default) + operatorImageTag = p.Result()[0:7] + } + } + + if fatal { + log.Fatalln("Fatal: Required configuration is missing, tests woud not work without those settings, terminating execution") + } + + InstanaTestCfg = InstanaTestConfig{ + ContainerRegistry: &ContainerRegistry{ + Name: "delivery-instana", + User: containerRegistryUser, + Password: containerRegistryPassword, + Host: containerRegistryHost, + }, + InstanaBackend: &InstanaBackend{ + EndpointHost: endpointHost, + EndpointPort: 443, + AgentKey: instanaApiKey, + }, + OperatorImage: &OperatorImage{ + Name: operatorImageName, + Tag: operatorImageTag, + }, + } +} diff --git a/e2e/install_test.go b/e2e/install_test.go new file mode 100644 index 00000000..6c038720 --- /dev/null +++ b/e2e/install_test.go @@ -0,0 +1,95 @@ +/* + * (c) Copyright IBM Corp. 2024 + * (c) Copyright Instana Inc. 2024 + */ + +package e2e + +import ( + "context" + "fmt" + "testing" + + appsv1 "k8s.io/api/apps/v1" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + "sigs.k8s.io/e2e-framework/support/utils" +) + +func TestInitialInstall(t *testing.T) { + agent := NewAgentCr(t) + initialInstallFeature := features.New("initial install dev-operator-build"). + Setup(SetupOperatorDevBuild()). + Setup(DeployAgentCr(&agent)). + Assess("wait for controller-manager deployment to become ready", WaitForDeploymentToBecomeReady(InstanaOperatorDeploymentName)). + Assess("wait for k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(K8sensorDeploymentName)). + Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). + Assess("check agent log for successful connection", WaitForAgentSuccessfulBackendConnection()). + Feature() + + // test feature + testEnv.Test(t, initialInstallFeature) +} +func TestUpdateInstall(t *testing.T) { + agent := NewAgentCr(t) + installLatestFeature := features.New("deploy latest released instana-agent-operator"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + const latestOperatorYamlUrl string = "https://github.com/instana/instana-agent-operator/releases/latest/download/instana-agent-operator.yaml" + t.Logf("Installing latest available operator from %s", latestOperatorYamlUrl) + p := utils.RunCommand( + fmt.Sprintf("kubectl apply -f %s", latestOperatorYamlUrl), + ) + if p.Err() != nil { + t.Fatal("Error while applying latest operator yaml", p.Command(), p.Err(), p.Out(), p.ExitCode()) + } + return ctx + }). + Setup(WaitForDeploymentToBecomeReady(InstanaOperatorDeploymentName)). + Setup(DeployAgentCr(&agent)). + Assess("wait for k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(K8sensorDeploymentName)). + Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). + Assess("check agent log for successful connection", WaitForAgentSuccessfulBackendConnection()). + Feature() + + updateInstallDevBuildFeature := features.New("upgrade install from latest released to dev-operator-build"). + Setup(SetupOperatorDevBuild()). + Assess("wait for controller-manager deployment to become ready", WaitForDeploymentToBecomeReady(InstanaOperatorDeploymentName)). + Assess("wait for k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(K8sensorDeploymentName)). + Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). + Assess("check agent log for successful connection", WaitForAgentSuccessfulBackendConnection()). + Feature() + + checkReconciliationFeature := features.New("check reconcile works with new operator deployment"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + // delete agent daemonset + t.Log("Delete agent DaemonSet") + var ds appsv1.DaemonSet + if err := cfg.Client().Resources().Get(ctx, AgentDaemonSetName, cfg.Namespace(), &ds); err != nil { + t.Fatal(err) + } + if err := cfg.Client().Resources().Delete(ctx, &ds); err != nil { + t.Fatal(err) + } + t.Log("Agent DaemonSet deleted") + + t.Log("Delete k8sensor Deployment") + var dep appsv1.Deployment + if err := cfg.Client().Resources().Get(ctx, K8sensorDeploymentName, cfg.Namespace(), &dep); err != nil { + t.Fatal(err) + } + + if err := cfg.Client().Resources().Delete(ctx, &dep); err != nil { + t.Fatal(err) + } + t.Log("K8sensor Deployment deleted") + t.Log("Assessing reconciliation now") + return ctx + }). + Assess("wait for k8sensor deployment to become ready", WaitForDeploymentToBecomeReady("instana-agent-k8sensor")). + Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). + Assess("check agent log for successful connection", WaitForAgentSuccessfulBackendConnection()). + Feature() + + // test feature + testEnv.Test(t, installLatestFeature, updateInstallDevBuildFeature, checkReconciliationFeature) +} diff --git a/e2e/main_test.go b/e2e/main_test.go new file mode 100644 index 00000000..4a2ce078 --- /dev/null +++ b/e2e/main_test.go @@ -0,0 +1,45 @@ +/* + * (c) Copyright IBM Corp. 2024 + * (c) Copyright Instana Inc. 2024 + */ + +package e2e + +import ( + "context" + "os" + "testing" + + "sigs.k8s.io/e2e-framework/klient/conf" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" +) + +var testEnv env.Environment + +func TestMain(m *testing.M) { + path := conf.ResolveKubeConfigFile() + cfg := envconf.NewWithKubeConfig(path) + cfg.WithNamespace(InstanaNamespace) + testEnv = env.NewWithConfig(cfg) + // cluster level setup + testEnv.Setup( + AdjustOcpPermissionsIfNecessary(), + ) + // ensure a new clean namespace before every test + // EnvFuncs are only allowed in testEnv.Setup, testEnv.BeforeEachTest requires TestEnvFuncs, therefore converting below + testEnv.BeforeEachTest( + func(ctx context.Context, cfg *envconf.Config, t *testing.T) (context.Context, error) { + return EnsureAgentNamespaceDeletion()(ctx, cfg) + }, + func(ctx context.Context, cfg *envconf.Config, t *testing.T) (context.Context, error) { + return envfuncs.CreateNamespace(cfg.Namespace())(ctx, cfg) + }, + ) + // Consider leave artifacts in cluster for easier debugging, + // as a new run needs to cleanup anyways. Cleanup for now to ensure + // that the existing test suite is not facing issues. + testEnv.Finish(EnsureAgentNamespaceDeletion()) + os.Exit(testEnv.Run(m)) +} diff --git a/e2e/multi_backend_test.go b/e2e/multi_backend_test.go new file mode 100644 index 00000000..93c48210 --- /dev/null +++ b/e2e/multi_backend_test.go @@ -0,0 +1,103 @@ +/* + * (c) Copyright IBM Corp. 2024 + * (c) Copyright Instana Inc. 2024 + */ + +package e2e + +import ( + "context" + "fmt" + "os" + "testing" + + instanav1 "github.com/instana/instana-agent-operator/api/v1" + "sigs.k8s.io/e2e-framework/klient/decoder" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +func TestMultiBackendSupportExternalSecret(t *testing.T) { + installCrWithExternalSecretFeature := features.New("multiple backend support with external keyssecret"). + Setup(SetupOperatorDevBuild()). + Setup(WaitForDeploymentToBecomeReady(InstanaOperatorDeploymentName)). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + r, err := resources.New(cfg.Client().RESTConfig()) + if err != nil { + t.Fatal(err) + } + + t.Logf("Creating dummy secret") + + err = decoder.ApplyWithManifestDir(ctx, r, "../config/samples", "external_secret_instana_agent_key.yaml", []resources.CreateOption{}) + if err != nil { + t.Fatal(err) + } + + t.Logf("Secret created") + + t.Logf("Creating dummy agent CR with external secret") + err = decoder.ApplyWithManifestDir(ctx, r, "../config/samples", "instana_v1_instanaagent_multiple_backends_external_keyssecret.yaml", []resources.CreateOption{}) + if err != nil { + t.Fatal(err) + } + t.Logf("CR created") + + return ctx + }). + Assess("wait for first k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(K8sensorDeploymentName)). + Assess("wait for second k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(fmt.Sprintf("%s-1", K8sensorDeploymentName))). + Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). + Assess("validate instana-agent-config secret contains 2 backends", ValidateAgentMultiBackendConfiguration()). + Feature() + + // test feature + testEnv.Test(t, installCrWithExternalSecretFeature) +} + +func TestMultiBackendSupportInlineSecret(t *testing.T) { + installCrWithInlineSecretFeature := features.New("multiple backend support with inlined keyssecret"). + Setup(SetupOperatorDevBuild()). + Setup(WaitForDeploymentToBecomeReady(InstanaOperatorDeploymentName)). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + r, err := resources.New(cfg.Client().RESTConfig()) + if err != nil { + t.Fatal(err) + } + err = instanav1.AddToScheme(r.GetScheme()) + if err != nil { + t.Fatal(err) + } + r.WithNamespace(cfg.Namespace()) + + // read the same custom resource, but adjust it + f, err := os.Open("../config/samples/instana_v1_instanaagent_multiple_backends.yaml") + if err != nil { + t.Fatal(err) + } + var agent instanav1.InstanaAgent + err = decoder.Decode(f, &agent) + if err != nil { + t.Fatal("Could not decode agent", err) + } + + t.Logf("Creating dummy agent CR with inline key") + + err = decoder.CreateHandler(r)(ctx, &agent) + if err != nil { + t.Fatal(err) + } + + t.Logf("CR created") + return ctx + }). + Assess("wait for k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(K8sensorDeploymentName)). + Assess("wait for second k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(fmt.Sprintf("%s-1", K8sensorDeploymentName))). + Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). + Assess("validate instana-agent-config secret contains 2 backends", ValidateAgentMultiBackendConfiguration()). + Feature() + + // test feature + testEnv.Test(t, installCrWithInlineSecretFeature) +} diff --git a/go.mod b/go.mod index 94fafc5a..bcce31df 100644 --- a/go.mod +++ b/go.mod @@ -1,5 +1,8 @@ module github.com/instana/instana-agent-operator +// use full version x.y.z +// see https://github.com/instana/instana-agent-operator/pull/218 +// and https://github.com/golang/go/issues/62278#issuecomment-1693538776 go 1.23.2 require ( @@ -7,6 +10,7 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/go-errors/errors v1.4.2 github.com/go-logr/logr v1.4.1 + github.com/openshift/client-go v0.0.0-20240906181530-b2f7c4ab0984 github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.8.4 go.uber.org/mock v0.4.0 @@ -16,7 +20,9 @@ require ( k8s.io/api v0.30.3 k8s.io/apimachinery v0.30.3 k8s.io/client-go v0.30.3 + k8s.io/klog/v2 v2.120.1 sigs.k8s.io/controller-runtime v0.18.5 + sigs.k8s.io/e2e-framework v0.4.0 ) require ( @@ -29,6 +35,7 @@ require ( github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/containerd/containerd v1.7.12 // indirect @@ -99,6 +106,7 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc6 // indirect + github.com/openshift/api v0.0.0-20240906165951-d73f2e11e0be // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.19.0 // indirect @@ -113,6 +121,7 @@ require ( github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/vladimirvivien/gexe v0.2.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect @@ -143,7 +152,6 @@ require ( k8s.io/apiserver v0.30.3 // indirect k8s.io/cli-runtime v0.30.3 // indirect k8s.io/component-base v0.30.3 // indirect - k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/kubectl v0.30.3 // indirect k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect diff --git a/go.sum b/go.sum index 1bee551c..e97dc3fb 100644 --- a/go.sum +++ b/go.sum @@ -35,6 +35,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= @@ -305,6 +307,10 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU= github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/openshift/api v0.0.0-20240906165951-d73f2e11e0be h1:/9hVw/AJPt3bss5m0YrHIWtIsXv/1L43wn4oTPLR+oA= +github.com/openshift/api v0.0.0-20240906165951-d73f2e11e0be/go.mod h1:OOh6Qopf21pSzqNVCB5gomomBXb8o5sGKZxG2KNpaXM= +github.com/openshift/client-go v0.0.0-20240906181530-b2f7c4ab0984 h1:4OVV/fm6ea+51rZbA/52SFbHdjlzjCKK6OCE7Xtn834= +github.com/openshift/client-go v0.0.0-20240906181530-b2f7c4ab0984/go.mod h1:K+5rEJpGf5LpcwdNtkGsvV3u8wU7m3oHzcVZzuGTRZ4= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= @@ -366,6 +372,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/vladimirvivien/gexe v0.2.0 h1:nbdAQ6vbZ+ZNsolCgSVb9Fno60kzSuvtzVh6Ytqi/xY= +github.com/vladimirvivien/gexe v0.2.0/go.mod h1:LHQL00w/7gDUKIak24n801ABp8C+ni6eBht9vGVst8w= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -575,6 +583,8 @@ oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= sigs.k8s.io/controller-runtime v0.18.5 h1:nTHio/W+Q4aBlQMgbnC5hZb4IjIidyrizMai9P6n4Rk= sigs.k8s.io/controller-runtime v0.18.5/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/e2e-framework v0.4.0 h1:4yYmFDNNoTnazqmZJXQ6dlQF1vrnDbutmxlyvBpC5rY= +sigs.k8s.io/e2e-framework v0.4.0/go.mod h1:JilFQPF1OL1728ABhMlf9huse7h+uBJDXl9YeTs49A8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=