diff --git a/.github/workflows/remote-controller.yaml b/.github/workflows/remote-controller.yaml index f06b0881..3ce7912e 100644 --- a/.github/workflows/remote-controller.yaml +++ b/.github/workflows/remote-controller.yaml @@ -18,24 +18,24 @@ jobs: fail-fast: false matrix: kindest_node_version: [v1.25.16, v1.26.15] - harbor: ["1.9.0","1.14.0"] + harbor: ["1.9.0","1.14.3"] lagoon_build_image: ["uselagoon/build-deploy-image:main"] experimental: [false] include: - kindest_node_version: v1.27.13 - harbor: "1.14.0" + harbor: "1.14.3" lagoon_build_image: "uselagoon/build-deploy-image:main" experimental: false - kindest_node_version: v1.28.9 - harbor: "1.14.0" + harbor: "1.14.3" lagoon_build_image: "uselagoon/build-deploy-image:main" experimental: false - kindest_node_version: v1.29.4 - harbor: "1.14.0" + harbor: "1.14.3" lagoon_build_image: "uselagoon/build-deploy-image:main" experimental: true - kindest_node_version: v1.30.2 - harbor: "1.14.0" + harbor: "1.14.3" lagoon_build_image: "uselagoon/build-deploy-image:main" experimental: true steps: @@ -79,6 +79,8 @@ jobs: helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx helm repo add harbor https://helm.goharbor.io helm repo add lagoon https://uselagoon.github.io/lagoon-charts/ + helm repo add metallb https://metallb.github.io/metallb + helm repo add jetstack https://charts.jetstack.io - name: Install gojq if: | @@ -92,23 +94,26 @@ jobs: - name: Configure node IP in kind-config.yaml run: | - docker network create kind - export KIND_NODE_IP=$(docker run --network kind --rm alpine ip -o addr show eth0 | sed -nE 's/.* ([0-9.]{7,})\/.*/\1/p') - envsubst < test-resources/kind-config.yaml.tpl > test-resources/kind-config.yaml - cat test-resources/kind-config.yaml + docker network create remote-controller + LAGOON_KIND_CIDR_BLOCK=$(docker network inspect remote-controller | jq '. [0].IPAM.Config[0].Subnet' | tr -d '"') + export KIND_NODE_IP=$(echo ${LAGOON_KIND_CIDR_BLOCK%???} | awk -F'.' '{print $1,$2,$3,240}' OFS='.') + envsubst < test-resources/test-suite.kind-config.yaml.tpl > test-resources/test-suite.kind-config.yaml - name: Create kind cluster - uses: helm/kind-action@v1.3.0 + uses: helm/kind-action@v1.10.0 with: - version: v0.14.0 + version: v0.24.0 + cluster_name: remote-controller node_image: kindest/node:${{ matrix.kindest_node_version }} - config: test-resources/kind-config.yaml + kubectl_version: v1.30.4 + config: test-resources/test-suite.kind-config.yaml - name: Check node IP matches kind configuration run: | - NODE_IP="$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}')" + LAGOON_KIND_CIDR_BLOCK=$(docker network inspect remote-controller | jq '. [0].IPAM.Config[0].Subnet' | tr -d '"') + NODE_IP=$(echo ${LAGOON_KIND_CIDR_BLOCK%???} | awk -F'.' '{print $1,$2,$3,240}' OFS='.') echo Checking for NODE_IP "$NODE_IP" - grep $NODE_IP test-resources/kind-config.yaml + grep $NODE_IP test-resources/test-suite.kind-config.yaml - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 @@ -120,17 +125,6 @@ jobs: load: true tags: uselagoon/remote-controller:test-tag - - name: Install prerequisites - run: make -j8 -O install-lagoon-remote HARBOR_VERSION=${{matrix.harbor}} - - - name: Run Tests + - name: Run github/test-e2e run: | - export PATH=$PATH:/usr/local/kubebuilder/bin - export PATH=$PATH:/usr/local/go/bin - export OVERRIDE_BUILD_DEPLOY_DIND_IMAGE="${{matrix.lagoon_build_image}}" - export HARBOR_URL="http://harbor.$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080" - export HARBOR_API="http://harbor.$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/api" - export KIND_NODE_IP="$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}')" - export HARBOR_VERSION=${{matrix.harbor}} - # export GO111MODULE=on - make controller-test \ No newline at end of file + make github/test-e2e HARBOR_VERSION=${{matrix.harbor}} OVERRIDE_BUILD_DEPLOY_DIND_IMAGE="${{matrix.lagoon_build_image}}" \ No newline at end of file diff --git a/.gitignore b/.gitignore index d97ffc51..4b5eaa88 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,6 @@ bin *.swp *.swo *~ + +test-resources/test-suite.kind-config.yaml +test-resources/test-suite.metallb-pool.yaml \ No newline at end of file diff --git a/Makefile b/Makefile index b7b4b0a1..8cb005a3 100644 --- a/Makefile +++ b/Makefile @@ -6,21 +6,18 @@ CRD_OPTIONS ?= "crd" CONTROLLER_NAMESPACE ?= lagoon-builddeploy -OVERRIDE_BUILD_DEPLOY_DIND_IMAGE ?= uselagoon/kubectl-build-deploy-dind:latest +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) -# IMAGE_TAG controls the tag used for container images in the lagoon-core, -# lagoon-remote, and lagoon-test charts. If IMAGE_TAG is not set, it will fall -# back to the version set in the CI values file, then to the chart default. -IMAGE_TAG = -# IMAGE_REGISTRY controls the registry used for container images in the -# lagoon-core, lagoon-remote, and lagoon-test charts. If IMAGE_REGISTRY is not -# set, it will fall back to the version set in the chart values files. This -# only affects lagoon-core, lagoon-remote, and the fill-test-ci-values target. -IMAGE_REGISTRY = uselagoon +OVERRIDE_BUILD_DEPLOY_DIND_IMAGE ?= uselagoon/build-deploy-image:main -INGRESS_VERSION=4.1.3 +INGRESS_VERSION=4.9.1 -HARBOR_VERSION=1.9.0 +HARBOR_VERSION=1.14.3 + +KIND_CLUSTER ?= remote-controller TIMEOUT = 30m HELM = helm @@ -34,56 +31,87 @@ else GOBIN=$(shell go env GOBIN) endif +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.29.0 +ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION) +ENVTEST_VERSION ?= latest + all: manager -# Run tests -test: generate fmt vet manifests - go test ./... -coverprofile cover.out +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: test +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out # Build manager binary +.PHONY: manager manager: generate fmt vet go build -o bin/manager main.go # Run against the configured Kubernetes cluster in ~/.kube/config +.PHONY: run run: generate fmt vet manifests go run ./main.go --controller-namespace=${CONTROLLER_NAMESPACE} # Install CRDs into a cluster +.PHONY: install install: manifests kustomize build config/crd | kubectl apply -f - +.PHONY: outputcrds outputcrds: manifests kustomize build config/crd +.PHONY: uninstall # Uninstall CRDs from a cluster uninstall: manifests kustomize build config/crd | kubectl delete -f - # Deploy controller in the configured Kubernetes cluster in ~/.kube/config +.PHONY: preview preview: manifests cd config/manager && kustomize edit set image controller=${IMG} + @export HARBOR_URL="https://registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}' || echo 127.0.0.1).nip.io" && \ + echo "OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=${OVERRIDE_BUILD_DEPLOY_DIND_IMAGE}" > config/default/config.properties && \ + echo "HARBOR_URL=$${HARBOR_URL}" >> config/default/config.properties && \ + echo "HARBOR_API=$${HARBOR_URL}/api" >> config/default/config.properties OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=${OVERRIDE_BUILD_DEPLOY_DIND_IMAGE} kustomize build config/default + cp config/default/config.properties.default config/default/config.properties # Deploy controller in the configured Kubernetes cluster in ~/.kube/config +# this is only used locally for development or in the test suite +.PHONY: deploy deploy: manifests cd config/manager && kustomize edit set image controller=${IMG} + @if kind get clusters | grep -q $(KIND_CLUSTER); then \ + kind export kubeconfig --name=$(KIND_CLUSTER) \ + && export HARBOR_URL="https://registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io"; \ + fi && \ + echo "OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=${OVERRIDE_BUILD_DEPLOY_DIND_IMAGE}" > config/default/config.properties && \ + echo "HARBOR_URL=$${HARBOR_URL}" >> config/default/config.properties && \ + echo "HARBOR_API=$${HARBOR_URL}/api" >> config/default/config.properties OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=${OVERRIDE_BUILD_DEPLOY_DIND_IMAGE} kustomize build config/default | kubectl apply -f - + cp config/default/config.properties.default config/default/config.properties -# Generate manifests e.g. CRD, RBAC etc. -manifests: controller-gen - $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases - -# Run go fmt against code -fmt: - go fmt ./... - -# Run go vet against code -vet: - go vet ./... - -# Generate code -generate: controller-gen - $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..." +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases # Build the docker image docker-build: test @@ -110,35 +138,62 @@ else CONTROLLER_GEN=$(shell which controller-gen) endif -controller-test: - ./controller-test.sh - -clean: - docker compose down - kind delete cluster --name ${KIND_NAME} - -local-circle: - circleci build -v $(shell pwd):/workdir +.PHONY: install-metallb +install-metallb: + LAGOON_KIND_CIDR_BLOCK=$$(docker network inspect $(KIND_CLUSTER) | $(JQ) '. [0].IPAM.Config[0].Subnet' | tr -d '"') && \ + export LAGOON_KIND_NETWORK_RANGE=$$(echo $${LAGOON_KIND_CIDR_BLOCK%???} | awk -F'.' '{print $$1,$$2,$$3,240}' OFS='.')/29 && \ + $(HELM) upgrade \ + --install \ + --create-namespace \ + --namespace metallb-system \ + --wait \ + --timeout $(TIMEOUT) \ + --version=v0.13.12 \ + metallb \ + metallb/metallb && \ + $$(envsubst < test-resources/test-suite.metallb-pool.yaml.tpl > test-resources/test-suite.metallb-pool.yaml) && \ + $(KUBECTL) apply -f test-resources/test-suite.metallb-pool.yaml \ +# cert-manager is used to allow self-signed certificates to be generated automatically by ingress in the same way lets-encrypt would +.PHONY: install-certmanager +install-certmanager: install-metallb + $(HELM) upgrade \ + --install \ + --create-namespace \ + --namespace cert-manager \ + --wait \ + --timeout $(TIMEOUT) \ + --set installCRDs=true \ + --set ingressShim.defaultIssuerName=lagoon-testing-issuer \ + --set ingressShim.defaultIssuerKind=ClusterIssuer \ + --set ingressShim.defaultIssuerGroup=cert-manager.io \ + --version=v1.11.0 \ + cert-manager \ + jetstack/cert-manager + $(KUBECTL) apply -f test-resources/test-suite.certmanager-issuer-ss.yaml +# installs ingress-nginx .PHONY: install-ingress -install-ingress: +install-ingress: install-certmanager $(HELM) upgrade \ --install \ --create-namespace \ --namespace ingress-nginx \ --wait \ --timeout $(TIMEOUT) \ - --set controller.service.type=NodePort \ + --set controller.allowSnippetAnnotations=true \ + --set controller.service.type=LoadBalancer \ --set controller.service.nodePorts.http=32080 \ --set controller.service.nodePorts.https=32443 \ - --set controller.config.proxy-body-size=100m \ + --set controller.config.proxy-body-size=0 \ + --set controller.config.hsts="false" \ --set controller.watchIngressWithoutClass=true \ --set controller.ingressClassResource.default=true \ --version=$(INGRESS_VERSION) \ ingress-nginx \ ingress-nginx/ingress-nginx +# installs harbor .PHONY: install-registry install-registry: install-ingress $(HELM) upgrade \ @@ -147,10 +202,13 @@ install-registry: install-ingress --namespace registry \ --wait \ --timeout $(TIMEOUT) \ - --set expose.tls.enabled=false \ - --set "expose.ingress.annotations.kubernetes\.io\/ingress\.class=nginx" \ - --set "expose.ingress.hosts.core=harbor.$$($(KUBECTL) get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io" \ - --set "externalURL=http://harbor.$$($(KUBECTL) get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080" \ + --set expose.tls.enabled=true \ + --set expose.tls.certSource=secret \ + --set expose.tls.secret.secretName=harbor-ingress \ + --set expose.ingress.className=nginx \ + --set-string expose.ingress.annotations.kubernetes\\.io/tls-acme=true \ + --set "expose.ingress.hosts.core=registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io" \ + --set "externalURL=https://registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io" \ --set chartmuseum.enabled=false \ --set clair.enabled=false \ --set notary.enabled=false \ @@ -159,20 +217,91 @@ install-registry: install-ingress registry \ harbor/harbor +# installs lagoon-remote mainly for the docker-host .PHONY: install-lagoon-remote -install-lagoon-remote: install-registry install-ingress +install-lagoon-remote: install-registry $(HELM) upgrade \ --install \ --create-namespace \ --namespace lagoon \ --wait \ --timeout $(TIMEOUT) \ - --set dockerHost.image.repository=$(IMAGE_REGISTRY)/docker-host \ --set "lagoon-build-deploy.enabled=false" \ - --set "dockerHost.registry=harbor.$$($(KUBECTL) get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080" \ + --set "dockerHost.registry=registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io" \ --set "dockerHost.storage.size=10Gi" \ - --set "dioscuri.enabled=false" \ --set "dbaas-operator.enabled=false" \ - $$([ $(IMAGE_TAG) ] && echo '--set imageTag=$(IMAGE_TAG)') \ lagoon \ - lagoon/lagoon-remote \ No newline at end of file + lagoon/lagoon-remote + +# .PHONY: create-kind-cluster +# create-kind-cluster: +# @if ! kind get clusters | grep -q $(KIND_CLUSTER); then \ +# docker network inspect $(KIND_CLUSTER) >/dev/null 2>&1 || docker network create $(KIND_CLUSTER) \ +# && export KIND_NODE_IP=$$(docker run --network $(KIND_CLUSTER) --rm alpine ip -o addr show eth0 | sed -nE 's/.* ([0-9.]{7,})\/.*/\1/p') \ +# && envsubst < test-resources/test-suite.kind-config.yaml.tpl > test-resources/test-suite.kind-config.yaml \ +# && kind create cluster --wait=60s --name=$(KIND_CLUSTER) --config=test-resources/test-suite.kind-config.yaml; \ +# else \ +# echo "Cluster $(KIND_CLUSTER) already exists"; \ +# fi + +.PHONY: create-kind-cluster +create-kind-cluster: + docker network inspect $(KIND_CLUSTER) >/dev/null || docker network create $(KIND_CLUSTER) \ + && LAGOON_KIND_CIDR_BLOCK=$$(docker network inspect $(KIND_CLUSTER) | $(JQ) '. [0].IPAM.Config[0].Subnet' | tr -d '"') \ + && export KIND_NODE_IP=$$(echo $${LAGOON_KIND_CIDR_BLOCK%???} | awk -F'.' '{print $$1,$$2,$$3,240}' OFS='.') \ + && envsubst < test-resources/test-suite.kind-config.yaml.tpl > test-resources/test-suite.kind-config.yaml \ + && kind create cluster --wait=60s --name=$(KIND_CLUSTER) --config=test-resources/test-suite.kind-config.yaml + +# Create a kind cluster locally and run the test e2e test suite against it +.PHONY: kind/test-e2e # Run the e2e tests against a Kind k8s instance that is spun up locally +kind/test-e2e: create-kind-cluster install-lagoon-remote kind/re-test-e2e + +.PHONY: local-kind/test-e2e # Run the e2e tests against a Kind k8s instance that is spun up locally +kind/re-test-e2e: + export KIND_CLUSTER=$(KIND_CLUSTER) && \ + kind export kubeconfig --name=$(KIND_CLUSTER) && \ + export HARBOR_VERSION=$(HARBOR_VERSION) && \ + export OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=$(OVERRIDE_BUILD_DEPLOY_DIND_IMAGE) && \ + $(MAKE) test-e2e + +.PHONY: clean +kind/clean: + docker compose down && \ + kind delete cluster --name=$(KIND_CLUSTER) && docker network rm $(KIND_CLUSTER) + +# Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors. +.PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up inside github action. +test-e2e: + export HARBOR_VERSION=$(HARBOR_VERSION) && \ + export OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=$(OVERRIDE_BUILD_DEPLOY_DIND_IMAGE) && \ + go test ./test/e2e/ -v -ginkgo.v + +.PHONY: github/test-e2e +github/test-e2e: install-lagoon-remote test-e2e + +.PHONY: kind/set-kubeconfig +kind/set-kubeconfig: + export KIND_CLUSTER=$(KIND_CLUSTER) && \ + kind export kubeconfig --name=$(KIND_CLUSTER) + +.PHONY: kind/logs-remote-controller +kind/logs-remote-controller: + export KIND_CLUSTER=$(KIND_CLUSTER) && \ + kind export kubeconfig --name=$(KIND_CLUSTER) && \ + kubectl -n remote-controller-system logs -f \ + $$(kubectl -n remote-controller-system get pod -l control-plane=controller-manager -o jsonpath="{.items[0].metadata.name}") \ + -c manager + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary (ideally with version) +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f $(1) ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv "$$(echo "$(1)" | sed "s/-$(3)$$//")" $(1) ;\ +} +endef \ No newline at end of file diff --git a/config/crd/bases/crd.lagoon.sh_lagoonbuilds.yaml b/config/crd/bases/crd.lagoon.sh_lagoonbuilds.yaml index e7ee79eb..ba3426da 100644 --- a/config/crd/bases/crd.lagoon.sh_lagoonbuilds.yaml +++ b/config/crd/bases/crd.lagoon.sh_lagoonbuilds.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.16.5 name: lagoonbuilds.crd.lagoon.sh spec: group: crd.lagoon.sh diff --git a/config/crd/bases/crd.lagoon.sh_lagoontasks.yaml b/config/crd/bases/crd.lagoon.sh_lagoontasks.yaml index 50d7623a..bc08f7a3 100644 --- a/config/crd/bases/crd.lagoon.sh_lagoontasks.yaml +++ b/config/crd/bases/crd.lagoon.sh_lagoontasks.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.2 + controller-gen.kubebuilder.io/version: v0.16.5 name: lagoontasks.crd.lagoon.sh spec: group: crd.lagoon.sh diff --git a/config/default/config.properties.default b/config/default/config.properties.default new file mode 100644 index 00000000..8d18a393 --- /dev/null +++ b/config/default/config.properties.default @@ -0,0 +1,3 @@ +OVERRIDE_BUILD_DEPLOY_DIND_IMAGE +HARBOR_URL +HARBOR_API \ No newline at end of file diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index 340f6706..ae167ed4 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -33,3 +33,4 @@ spec: - "--qos-max-builds=3" - "--enable-deprecated-apis" - "--lagoon-feature-flag-support-k8upv2" + - "--skip-tls-verify" diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index ad13e96b..01d1a3a8 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -4,5 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: controller - newTag: latest + newName: example.com/remote-controller + newTag: v0.0.1 diff --git a/controller-test.sh b/controller-test.sh deleted file mode 100755 index 0da8fe52..00000000 --- a/controller-test.sh +++ /dev/null @@ -1,547 +0,0 @@ -#!/bin/bash - -#KIND_VER=v1.13.12 -#KIND_VER=v1.14.10 -#KIND_VER=v1.15.7 -#KIND_VER=v1.16.4 -KIND_VER=v1.17.5 -# or get the latest tagged version of a specific k8s version of kind -#KIND_VER=$(curl -s https://hub.docker.com/v2/repositories/kindest/node/tags | jq -r '.results | .[].name' | grep 'v1.17' | sort -Vr | head -1) -KIND_NAME=chart-testing -CONTROLLER_IMAGE=uselagoon/remote-controller:test-tag - - -CONTROLLER_NAMESPACE=remote-controller-system -CHECK_TIMEOUT=20 - -NS=nginx-example-main -LBUILD=7m5zypx -LBUILD2=8m5zypx -LBUILD3=9m5zypx -LBUILD4=1m5zypx - -LATEST_CRD_VERSION=v1beta2 - -HARBOR_VERSION=${HARBOR_VERSION:-1.6.4} - -check_controller_log () { - echo "=========== CONTROLLER LOG ============" - kubectl logs $(kubectl get pods -n ${CONTROLLER_NAMESPACE} --no-headers | awk '{print $1}') -c manager -n ${CONTROLLER_NAMESPACE} --previous=true - kubectl logs $(kubectl get pods -n ${CONTROLLER_NAMESPACE} --no-headers | awk '{print $1}') -c manager -n ${CONTROLLER_NAMESPACE} - if $(kubectl logs $(kubectl get pods -n ${CONTROLLER_NAMESPACE} --no-headers | awk '{print $1}') -c manager -n ${CONTROLLER_NAMESPACE} | grep -q "Build ${1} Failed") - then - # build failed, exit 1 - tear_down - echo "============== FAILED ===============" - exit 1 - fi -} - -tear_down () { - echo "============= TEAR DOWN =============" - echo "==> Get ingress" - kubectl get ingress --all-namespaces - echo "==> Get pods" - kubectl get pods --all-namespaces - echo "==> Get remote logs (docker-host)" - kubectl describe pods --namespace=lagoon --selector=app.kubernetes.io/name=lagoon-remote - kubectl logs --tail=80 --namespace=lagoon --prefix --timestamps --all-containers --selector=app.kubernetes.io/name=lagoon-remote - echo "==> Remove cluster" - kind delete cluster --name ${KIND_NAME} - echo "==> Remove services" - docker compose down -} - -start_docker_compose_services () { - echo "================ BEGIN ================" - echo "==> Bring up local provider" - docker compose up -d - CHECK_COUNTER=1 -} - -mariadb_start_check () { - until $(docker compose exec -T mysql mysql --host=local-dbaas-mariadb-provider --port=3306 -uroot -e 'show databases;' | grep -q "information_schema") - do - if [ $CHECK_COUNTER -lt $CHECK_TIMEOUT ]; then - let CHECK_COUNTER=CHECK_COUNTER+1 - echo "Database provider not running yet" - sleep 5 - else - echo "Timeout of $CHECK_TIMEOUT for database provider startup reached" - echo "============== FAILED ===============" - exit 1 - fi - done - echo "==> Database provider is running" -} - -install_path_provisioner () { - echo "==> Install local path provisioner" - kubectl apply -f test-resources/local-path-storage.yaml - echo "==> local path provisioner installed" - ## add the bulk storageclass for builds to use - kubectl apply -f test-resources/bulk-storage.yaml - echo "==> Bulk storage configured" -} - -build_deploy_controller () { - echo "==> Install CRDs and deploy controller" - make install - - # set the images into config.properties so that kustomize can read them - echo "OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=$OVERRIDE_BUILD_DEPLOY_DIND_IMAGE" > config/default/config.properties - echo "HARBOR_URL=$HARBOR_URL" >> config/default/config.properties - echo "HARBOR_API=$HARBOR_API" >> config/default/config.properties - - kind load docker-image ${CONTROLLER_IMAGE} --name ${KIND_NAME} - make deploy IMG=${CONTROLLER_IMAGE} - - CHECK_COUNTER=1 - echo "==> Ensure controller is running" - until $(kubectl get pods -n ${CONTROLLER_NAMESPACE} --no-headers | grep -q "Running") - do - if [ $CHECK_COUNTER -lt $CHECK_TIMEOUT ]; then - let CHECK_COUNTER=CHECK_COUNTER+1 - echo "Controller not running yet" - sleep 5 - else - echo "Timeout of $CHECK_TIMEOUT for controller startup reached" - check_controller_log - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 - fi - done - echo "==> Controller is running" -} - -clean_task_test_resources() { - kubectl -n $NS delete -f test-resources/dynamic-secret-in-task-project1-secret.yaml - kubectl -n $NS delete -f test-resources/dynamic-secret-in-task-project1.yaml -} - -wait_for_task_pod_to_complete () { - POD_NAME=${1} - CHECK_COUNTER=1 - echo "==> Check task progress" - until $(kubectl -n ${NS} get pods ${1} --no-headers | grep -iq "Completed") - do - echo "=====> Pods in ns ${NS}:" - kubectl -n ${NS} get pods ${1} --no-headers - if [ $CHECK_COUNTER -lt $CHECK_TIMEOUT ]; then - let CHECK_COUNTER=CHECK_COUNTER+1 - echo "==> Task not completed yet" - sleep 5 - else - echo "Timeout of $CHECK_TIMEOUT waiting for task to complete" - echo "=========== TASK LOG ============" - kubectl -n ${NS} logs ${1} -f - clean_task_test_resources - check_controller_log ${1} - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 - fi - done - echo "==> Task completed" -} - - -check_lagoon_build () { - CHECK_COUNTER=1 - echo "==> Check build progress" - until $(kubectl -n ${NS} get pods ${1} --no-headers | grep -iq "Running") - do - if [ $CHECK_COUNTER -lt $CHECK_TIMEOUT ]; then - let CHECK_COUNTER=CHECK_COUNTER+1 - if $(kubectl -n ${NS} get pods ${1} --no-headers | grep -iq "Error"); then - echo "Build failed" - echo "=========== BUILD LOG ============" - kubectl -n ${NS} logs ${1} -f - check_controller_log ${1} - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 - fi - echo "Build not running yet" - sleep 5 - else - echo "Timeout of $CHECK_TIMEOUT waiting for build to start reached" - echo "=========== BUILD LOG ============" - kubectl -n ${NS} logs ${1} -f - check_controller_log ${1} - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 - fi - done - echo "==> Build running" - # kubectl -n ${NS} logs ${1} -f -} - -start_docker_compose_services -install_path_provisioner - -# echo "====> Install dbaas-operator" -# helm repo add amazeeio https://amazeeio.github.io/charts/ -# kubectl create namespace dbaas-operator -# helm upgrade --install -n dbaas-operator dbaas-operator amazeeio/dbaas-operator -# helm repo add dbaas-operator https://raw.githubusercontent.com/amazeeio/dbaas-operator/main/charts -# helm upgrade --install -n dbaas-operator mariadbprovider dbaas-operator/mariadbprovider -f test-resources/helm-values-mariadbprovider.yml - -echo "==> Configure example environment" -echo "====> Install build deploy controllers" -build_deploy_controller - -echo "==> Trigger a lagoon build using kubectl apply" -kubectl -n $CONTROLLER_NAMESPACE apply -f test-resources/example-project1.yaml -# patch the resource with the controller namespace -kubectl -n $CONTROLLER_NAMESPACE patch lagoonbuilds.crd.lagoon.sh lagoon-build-${LBUILD} --type=merge --patch '{"metadata":{"labels":{"lagoon.sh/controller":"'$CONTROLLER_NAMESPACE'"}}}' -# patch the resource with a random label to bump the controller event filter -kubectl -n $CONTROLLER_NAMESPACE patch lagoonbuilds.crd.lagoon.sh lagoon-build-${LBUILD} --type=merge --patch '{"metadata":{"labels":{"bump":"bump"}}}' -sleep 10 -check_lagoon_build lagoon-build-${LBUILD} - -echo "==> Trigger a lagoon build using kubectl apply and check organization labels exist" -kubectl -n $CONTROLLER_NAMESPACE apply -f test-resources/example-project2.yaml -# patch the resource with the controller namespace -kubectl -n $CONTROLLER_NAMESPACE patch lagoonbuilds.crd.lagoon.sh lagoon-build-${LBUILD2} --type=merge --patch '{"metadata":{"labels":{"lagoon.sh/controller":"'$CONTROLLER_NAMESPACE'"}}}' -# patch the resource with a random label to bump the controller event filter -kubectl -n $CONTROLLER_NAMESPACE patch lagoonbuilds.crd.lagoon.sh lagoon-build-${LBUILD2} --type=merge --patch '{"metadata":{"labels":{"bump":"bump"}}}' -sleep 10 -check_lagoon_build lagoon-build-${LBUILD2} -echo "==> Check organization.lagoon.sh/name label exists on namespace" -if ! $(kubectl get namespace -l 'organization.lagoon.sh/name=test-org' --no-headers 2> /dev/null | grep -q ${NS}); then - echo "==> Build failed to set organization name label on namespace" - clean_task_test_resources - check_controller_log ${1} - tear_down - echo "============== FAILED ===============" - exit 1 -else - echo "===> label exists" -fi -echo "==> Check organization.lagoon.sh/id label exists on namespace" -if ! $(kubectl get namespace -l 'organization.lagoon.sh/id=123' --no-headers 2> /dev/null | grep -q ${NS}); then - echo "==> Build failed to set organization id label on namespace" - clean_task_test_resources - check_controller_log ${1} - tear_down - echo "============== FAILED ===============" - exit 1 -else - echo "===> label exists" -fi - -echo "==> deprecated v1beta1 api: Trigger a lagoon build using kubectl apply" -kubectl -n $CONTROLLER_NAMESPACE apply -f test-resources/example-project3.yaml -# patch the resource with the controller namespace -kubectl -n $CONTROLLER_NAMESPACE patch lagoonbuilds.v1beta1.crd.lagoon.sh lagoon-build-${LBUILD4} --type=merge --patch '{"metadata":{"labels":{"lagoon.sh/controller":"'$CONTROLLER_NAMESPACE'"}}}' -# patch the resource with a random label to bump the controller event filter -kubectl -n $CONTROLLER_NAMESPACE patch lagoonbuilds.v1beta1.crd.lagoon.sh lagoon-build-${LBUILD4} --type=merge --patch '{"metadata":{"labels":{"bump":"bump"}}}' -sleep 10 -check_lagoon_build lagoon-build-${LBUILD4} - -echo "==> Trigger a Task using kubectl apply to test dynamic secret mounting" - -kubectl -n $NS apply -f test-resources/dynamic-secret-in-task-project1-secret.yaml -kubectl -n $NS apply -f test-resources/dynamic-secret-in-task-project1.yaml -kubectl -n $NS patch lagoontasks.crd.lagoon.sh lagoon-advanced-task-example-task-project-1 --type=merge --patch '{"metadata":{"labels":{"lagoon.sh/controller":"'$CONTROLLER_NAMESPACE'"}}}' -kubectl -n $NS patch lagoontasks.crd.lagoon.sh lagoon-advanced-task-example-task-project-1 --type=merge --patch '{"metadata":{"labels":{"bump":"bump"}}}' -#kubectl get lagoontasks lagoon-advanced-task-example-task-project-1 -n $NS -o yaml - -# wait on pod creation -wait_for_task_pod_to_complete lagoon-advanced-task-example-task-project-1 -VMDATA=$(kubectl get pod -n $NS lagoon-advanced-task-example-task-project-1 -o jsonpath='{.spec.containers[0].volumeMounts}' | jq -r '.[] | select(.name == "dynamic-test-dynamic-secret") | .mountPath') - -if [ ! "$VMDATA" = "/var/run/secrets/lagoon/dynamic/test-dynamic-secret" ]; then - echo "==> Task failed to mount dynamic secret" - clean_task_test_resources - check_controller_log ${1} - tear_down - echo "============== FAILED ===============" - exit 1 - else - echo "==> Dynamic secret mounting into tasks good" - clean_task_test_resources -fi - - -echo "==> Trigger a lagoon build using rabbitmq" -echo ' -{ - "properties":{ - "delivery_mode":2 - }, - "routing_key":"ci-local-controller-kubernetes:builddeploy", - "payload":"{ - \"metadata\": { - \"name\": \"lagoon-build-9m5zypx\" - }, - \"spec\": { - \"build\": { - \"ci\": \"true\", - \"type\": \"branch\" - }, - \"gitReference\": \"origin\/main\", - \"project\": { - \"name\": \"nginx-example\", - \"environment\": \"main\", - \"uiLink\": \"https:\/\/dashboard.amazeeio.cloud\/projects\/project\/project-environment\/deployments\/lagoon-build-9m5zypx\", - \"routerPattern\": \"main-nginx-example\", - \"environmentType\": \"production\", - \"productionEnvironment\": \"main\", - \"standbyEnvironment\": \"\", - \"gitUrl\": \"https:\/\/github.com\/shreddedbacon\/lagoon-nginx-example.git\", - \"deployTarget\": \"kind\", - \"projectSecret\": \"4d6e7dd0f013a75d62a0680139fa82d350c2a1285f43f867535bad1143f228b1\", - \"key\": \"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlDWFFJQkFBS0JnUUNjc1g2RG5KNXpNb0RqQ2R6a1JFOEg2TEh2TDQzaUhsekJLTWo4T1VNV05ZZG5YekdqCkR5Mkp1anQ3ZDNlMTVLeC8zOFo5UzJLdHNnVFVtWi9lUlRQSTdabE1idHRJK250UmtyblZLblBWNzhEeEFKNW8KTGZtQndmdWE2MnlVYnl0cnpYQ2pwVVJrQUlBMEZiR2VqS2Rvd3cxcnZGMzJoZFUzQ3ZIcG5rKzE2d0lEQVFBQgpBb0dCQUkrV0dyL1NDbVMzdCtIVkRPVGtMNk9vdVR6Y1QrRVFQNkVGbGIrRFhaV0JjZFhwSnB3c2NXZFBEK2poCkhnTEJUTTFWS3hkdnVEcEE4aW83cUlMTzJWYm1MeGpNWGk4TUdwY212dXJFNVJydTZTMXJzRDl2R0c5TGxoR3UKK0pUSmViMVdaZFduWFZ2am5LbExrWEV1eUthbXR2Z253Um5xNld5V05OazJ6SktoQWtFQThFenpxYnowcFVuTApLc241K2k0NUdoRGVpRTQvajRtamo1b1FHVzJxbUZWT2pHaHR1UGpaM2lwTis0RGlTRkFyMkl0b2VlK085d1pyCkRINHBkdU5YOFFKQkFLYnVOQ3dXK29sYXA4R2pUSk1TQjV1MW8wMVRHWFdFOGhVZG1leFBBdjl0cTBBT0gzUUQKUTIrM0RsaVY0ektoTlMra2xaSkVjNndzS0YyQmJIby81NXNDUVFETlBJd24vdERja3loSkJYVFJyc1RxZEZuOApCUWpZYVhBZTZEQ3o1eXg3S3ZFSmp1K1h1a01xTXV1ajBUSnpITFkySHVzK3FkSnJQVG9VMDNSS3JHV2hBa0JFCnB3aXI3Vk5pYy9jMFN2MnVLcWNZWWM1a2ViMnB1R0I3VUs1Q0lvaWdGakZzNmFJRDYyZXJwVVJ3S0V6RlFNbUgKNjQ5Y0ZXemhMVlA0aU1iZFREVHJBa0FFMTZXU1A3WXBWOHV1eFVGMGV0L3lFR3dURVpVU2R1OEppSTBHN0tqagpqcVR6RjQ3YkJZc0pIYTRYcWpVb2E3TXgwcS9FSUtRWkJ2NGFvQm42bGFOQwotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQ==\", - \"monitoring\": { - \"contact\": \"1234\", - \"statuspageID\": \"1234\" - }, - \"variables\": { - \"project\": \"W3sibmFtZSI6IkxBR09PTl9TWVNURU1fUk9VVEVSX1BBVFRFUk4iLCJ2YWx1ZSI6IiR7ZW52aXJvbm1lbnR9LiR7cHJvamVjdH0uZXhhbXBsZS5jb20iLCJzY29wZSI6ImludGVybmFsX3N5c3RlbSJ9XQ==\", - \"environment\": \"W10=\" - } - }, - \"branch\": { - \"name\": \"main\" - } - } - }", - "payload_encoding":"string" -}' >payload.json -curl -s -u guest:guest -H "Accept: application/json" -H "Content-Type:application/json" -X POST -d @payload.json http://172.17.0.1:15672/api/exchanges/%2f/lagoon-tasks/publish -echo "" -sleep 10 -check_lagoon_build lagoon-build-${LBUILD3} - -echo "==> Check pod cleanup worked" -CHECK_COUNTER=1 -# wait for first build pod to clean up -until ! $(kubectl -n nginx-example-main get pods lagoon-build-${LBUILD} &> /dev/null) -do -if [ $CHECK_COUNTER -lt 14 ]; then - let CHECK_COUNTER=CHECK_COUNTER+1 - echo "Build pod not deleted yet" - sleep 5 -else - echo "Timeout of 70seconds for build pod clean up check" - check_controller_log - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 -fi -done -CHECK_COUNTER=1 -# wait for second build pod to clean up -until ! $(kubectl -n nginx-example-main get pods lagoon-build-${LBUILD2} &> /dev/null) -do -if [ $CHECK_COUNTER -lt 14 ]; then - let CHECK_COUNTER=CHECK_COUNTER+1 - echo "Build pod not deleted yet" - sleep 5 -else - echo "Timeout of 70seconds for build pod clean up check" - check_controller_log - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 -fi -done -echo "==> Pod cleanup output (should only be 1 lagoon-build pod)" - -# only check -POD_CLEANUP_OUTPUT=$(kubectl -n nginx-example-main get pods -l crd.lagoon.sh/version=${LATEST_CRD_VERSION} | grep "lagoon-build") -echo "${POD_CLEANUP_OUTPUT}" -POD_CLEANUP_COUNT=$(echo "${POD_CLEANUP_OUTPUT}" | wc -l | tr -d " ") -if [ $POD_CLEANUP_COUNT -gt 1 ]; then - echo "There is more than 1 build pod left, there should only be 1" - check_controller_log - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 -fi - -echo "==> Check robot credential rotation worked" -CHECK_COUNTER=1 -until $(kubectl logs $(kubectl get pods -n ${CONTROLLER_NAMESPACE} --no-headers | awk '{print $1}') -c manager -n ${CONTROLLER_NAMESPACE} | grep -q "Robot credentials rotated for") -do -if [ $CHECK_COUNTER -lt 20 ]; then - let CHECK_COUNTER=CHECK_COUNTER+1 - echo "Credentials not rotated yet" - sleep 5 -else - echo "Timeout of 100seconds for robot credential rotation check" - check_controller_log - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 -fi -done -kubectl logs $(kubectl get pods -n ${CONTROLLER_NAMESPACE} --no-headers | awk '{print $1}') -c manager -n ${CONTROLLER_NAMESPACE} | grep "handlers.RotateRobotCredentials" - -# install the k8upv1alpha1 crds for first test -kubectl apply -f test-resources/k8upv1alpha1-crds.yaml -sleep 5 - -echo "==> Trigger a lagoon restore using rabbitmq" -echo ' -{"properties":{"delivery_mode":2},"routing_key":"ci-local-controller-kubernetes:misc", - "payload":"{ - \"misc\":{ - \"miscResource\":\"eyJhcGlWZXJzaW9uIjoiYmFja3VwLmFwcHVpby5jaC92MWFscGhhMSIsImtpbmQiOiJSZXN0b3JlIiwibWV0YWRhdGEiOnsibmFtZSI6InJlc3RvcmUtYmYwNzJhMC11cXhxbzMifSwic3BlYyI6eyJzbmFwc2hvdCI6ImJmMDcyYTA5ZTE3NzI2ZGE1NGFkYzc5OTM2ZWM4NzQ1NTIxOTkzNTk5ZDQxMjExZGZjOTQ2NmRmZDViYzMyYTUiLCJyZXN0b3JlTWV0aG9kIjp7InMzIjp7fX0sImJhY2tlbmQiOnsiczMiOnsiYnVja2V0IjoiYmFhcy1uZ2lueC1leGFtcGxlIn0sInJlcG9QYXNzd29yZFNlY3JldFJlZiI6eyJrZXkiOiJyZXBvLXB3IiwibmFtZSI6ImJhYXMtcmVwby1wdyJ9fX19\" - }, - \"key\":\"deploytarget:restic:backup:restore\", - \"environment\":{ - \"name\":\"main\", - \"openshiftProjectName\":\"nginx-example-main\" - }, - \"project\":{ - \"name\":\"nginx-example\" - }, - \"advancedTask\":{} - }", -"payload_encoding":"string" -}' >payload.json -curl -s -u guest:guest -H "Accept: application/json" -H "Content-Type:application/json" -X POST -d @payload.json http://172.17.0.1:15672/api/exchanges/%2f/lagoon-tasks/publish -echo "" -sleep 10 -CHECK_COUNTER=1 -kubectl -n nginx-example-main get restores -until $(kubectl -n nginx-example-main get restores restore-bf072a0-uqxqo3 &> /dev/null) -do -if [ $CHECK_COUNTER -lt 14 ]; then - let CHECK_COUNTER=CHECK_COUNTER+1 - echo "Restore not created yet" - sleep 5 -else - echo "Timeout of 70seconds for restore to be created" - check_controller_log - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 -fi -done -kubectl -n nginx-example-main get restores restore-bf072a0-uqxqo3 -o yaml | kubectl-neat > test-resources/results/k8upv1alpha1-cluster.yaml -if cmp --silent -- "test-resources/results/k8upv1alpha1.yaml" "test-resources/results/k8upv1alpha1-cluster.yaml"; then - echo "Resulting restores match" -else - echo "Files don't match" - echo "============" - cat test-resources/results/k8upv1alpha1.yaml - echo "============" - cat test-resources/results/k8upv1alpha1-cluster.yaml - echo "============" - check_controller_log - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 -fi - -# install the k8upv1 crds for testing -kubectl apply -f test-resources/k8upv1-crds.yaml -sleep 5 - -echo "==> Trigger a lagoon restore using rabbitmq" -echo ' -{"properties":{"delivery_mode":2},"routing_key":"ci-local-controller-kubernetes:misc", - "payload":"{ - \"misc\":{ - \"miscResource\":\"eyJtZXRhZGF0YSI6eyJuYW1lIjoicmVzdG9yZS1iZjA3MmEwLXVxeHFvNCJ9LCJzcGVjIjp7InNuYXBzaG90IjoiYmYwNzJhMDllMTc3MjZkYTU0YWRjNzk5MzZlYzg3NDU1MjE5OTM1OTlkNDEyMTFkZmM5NDY2ZGZkNWJjMzJhNSIsInJlc3RvcmVNZXRob2QiOnsiczMiOnt9fSwiYmFja2VuZCI6eyJzMyI6eyJidWNrZXQiOiJiYWFzLW5naW54LWV4YW1wbGUifSwicmVwb1Bhc3N3b3JkU2VjcmV0UmVmIjp7ImtleSI6InJlcG8tcHciLCJuYW1lIjoiYmFhcy1yZXBvLXB3In19fX0=\" - }, - \"key\":\"deploytarget:restic:backup:restore\", - \"environment\":{ - \"name\":\"main\", - \"openshiftProjectName\":\"nginx-example-main\" - }, - \"project\":{ - \"name\":\"nginx-example\" - }, - \"advancedTask\":{} - }", -"payload_encoding":"string" -}' >payload.json -curl -s -u guest:guest -H "Accept: application/json" -H "Content-Type:application/json" -X POST -d @payload.json http://172.17.0.1:15672/api/exchanges/%2f/lagoon-tasks/publish -echo "" -sleep 10 -CHECK_COUNTER=1 -kubectl -n nginx-example-main get restores.k8up.io -until $(kubectl -n nginx-example-main get restores.k8up.io restore-bf072a0-uqxqo4 &> /dev/null) -do -if [ $CHECK_COUNTER -lt 14 ]; then - let CHECK_COUNTER=CHECK_COUNTER+1 - echo "Restore not created yet" - sleep 5 -else - echo "Timeout of 70seconds for restore to be created" - check_controller_log - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 -fi -done -kubectl -n nginx-example-main get restores.k8up.io restore-bf072a0-uqxqo4 -o yaml | kubectl-neat > test-resources/results/k8upv1-cluster.yaml -if cmp --silent -- "test-resources/results/k8upv1.yaml" "test-resources/results/k8upv1-cluster.yaml"; then - echo "Resulting restores match" -else - echo "Files don't match" - echo "============" - cat test-resources/results/k8upv1.yaml - echo "============" - cat test-resources/results/k8upv1-cluster.yaml - echo "============" - check_controller_log - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 -fi - -echo "==> Delete the environment" -echo ' -{"properties":{"delivery_mode":2},"routing_key":"ci-local-controller-kubernetes:remove", - "payload":"{ - \"projectName\": \"nginx-example\", - \"type\":\"branch\", - \"forceDeleteProductionEnvironment\":true, - \"branch\":\"main\", - \"openshiftProjectName\":\"nginx-example-main\" - }", -"payload_encoding":"string" -}' >payload.json -curl -s -u guest:guest -H "Accept: application/json" -H "Content-Type:application/json" -X POST -d @payload.json http://172.17.0.1:15672/api/exchanges/%2f/lagoon-tasks/publish -echo "" -CHECK_COUNTER=1 -until $(kubectl logs $(kubectl get pods -n ${CONTROLLER_NAMESPACE} --no-headers | awk '{print $1}') -c manager -n ${CONTROLLER_NAMESPACE} | grep -q "Deleted namespace nginx-example-main for project nginx-example, environment main") -do -if [ $CHECK_COUNTER -lt 20 ]; then - let CHECK_COUNTER=CHECK_COUNTER+1 - echo "Environment not deleted yet" - sleep 5 -else - echo "Timeout of 100seconds for environment to be deleted" - check_controller_log - tear_down - echo "================ END ================" - echo "============== FAILED ===============" - exit 1 -fi -done -kubectl logs $(kubectl get pods -n ${CONTROLLER_NAMESPACE} --no-headers | awk '{print $1}') -c manager -n ${CONTROLLER_NAMESPACE} | grep "handlers.LagoonTasks.Deletion" - -check_controller_log -tear_down -echo "================ END ================" \ No newline at end of file diff --git a/go.mod b/go.mod index 3f757eb1..bc390b23 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/k8up-io/k8up/v2 v2.7.1 github.com/mittwald/goharbor-client/v5 v5.3.1 github.com/onsi/ginkgo v1.16.5 + github.com/onsi/ginkgo/v2 v2.19.0 github.com/onsi/gomega v1.33.1 github.com/prometheus/client_golang v1.19.1 github.com/uselagoon/machinery v0.0.29 @@ -52,12 +53,14 @@ require ( github.com/go-openapi/strfmt v0.21.3 // indirect github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/goharbor/harbor/src v0.0.0-20230220075213-6015b3efa7d0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect github.com/google/uuid v1.6.0 // indirect github.com/guregu/null v4.0.0+incompatible // indirect github.com/imdario/mergo v0.3.13 // indirect @@ -81,7 +84,7 @@ require ( github.com/rabbitmq/amqp091-go v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect - go.mongodb.org/mongo-driver v1.11.2 // indirect + go.mongodb.org/mongo-driver v1.15.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect @@ -93,6 +96,7 @@ require ( golang.org/x/sys v0.21.0 // indirect golang.org/x/term v0.21.0 // indirect golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 43493218..66618cd0 100644 --- a/go.sum +++ b/go.sum @@ -538,7 +538,6 @@ github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUri github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -1214,7 +1213,6 @@ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tiago4orion/conjure v0.0.0-20150908101743-93cb30b9d218 h1:tOESt7J50fPC9NqR0VdU1Zxk2zo5QYH70ap5TsU1bt4= github.com/tiago4orion/conjure v0.0.0-20150908101743-93cb30b9d218/go.mod h1:GQei++1WClbEC7AN1B9ipY1jCjzllM/7UNg0okAh/Z4= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1296,8 +1294,8 @@ go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.11.2 h1:+1v2rDQUWNcGW7/7E0Jvdz51V38XXxJfhzbV17aNHCw= -go.mongodb.org/mongo-driver v1.11.2/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= +go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= +go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= diff --git a/test-resources/Dockerfile.mariadb b/test-resources/Dockerfile.mariadb deleted file mode 100644 index 9b300c55..00000000 --- a/test-resources/Dockerfile.mariadb +++ /dev/null @@ -1 +0,0 @@ -FROM mariadb:10.4.12 diff --git a/test-resources/harbor-values.yaml b/test-resources/harbor-values.yaml deleted file mode 100644 index 42673a24..00000000 --- a/test-resources/harbor-values.yaml +++ /dev/null @@ -1,25 +0,0 @@ -expose: - tls: - enabled: false - ingress: - hosts: - core: harbor.172.17.0.1.nip.io - annotations: - ingress.kubernetes.io/ssl-redirect: "false" - ingress.kubernetes.io/proxy-body-size: "0" - nginx.ingress.kubernetes.io/ssl-redirect: "false" - nginx.ingress.kubernetes.io/proxy-body-size: "0" -harborAdminPassword: Harbor12345 -secretKey: "not-a-secure-key" -externalURL: http://harbor.172.17.0.1.nip.io:32080 -persistence: - enabled: false - -chartmuseum: - enabled: false -clair: - enabled: false -trivy: - enabled: true -notary: - enabled: false diff --git a/test-resources/helm-values-mariadbprovider.yml b/test-resources/helm-values-mariadbprovider.yml deleted file mode 100644 index 02841e55..00000000 --- a/test-resources/helm-values-mariadbprovider.yml +++ /dev/null @@ -1,17 +0,0 @@ -providers: - production: - environment: production - hostname: production.172.17.0.1.nip.io - readReplicaHostnames: - - production.replica.172.17.0.1.nip.io - password: password - port: '3306' - user: root - development: - environment: development - hostname: development.172.17.0.1.nip.io - readReplicaHostnames: - - development.replica.172.17.0.1.nip.io - password: password - port: '3306' - user: root diff --git a/test-resources/ingress-nginx-values.yaml b/test-resources/ingress-nginx-values.yaml deleted file mode 100644 index a02d20a3..00000000 --- a/test-resources/ingress-nginx-values.yaml +++ /dev/null @@ -1,9 +0,0 @@ -controller: - service: - type: NodePort - nodePorts: - http: 32080 - https: 32443 - watchIngressWithoutClass: true - ingressClassResource: - default: true \ No newline at end of file diff --git a/test-resources/kind-config.yaml b/test-resources/kind-config.yaml deleted file mode 100644 index 4b7d402d..00000000 --- a/test-resources/kind-config.yaml +++ /dev/null @@ -1,19 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -containerdConfigPatches: -- |- - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."172.17.0.1:5000"] - endpoint = ["http://172.17.0.1:5000"] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."harbor.172.17.0.1.nip.io:32080"] - endpoint = ["http://harbor.172.17.0.1.nip.io:32080"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.172.17.0.1.nip.io:32443".tls] - insecure_skip_verify = true -nodes: -- role: control-plane - extraPortMappings: - - containerPort: 32080 - hostPort: 32080 - protocol: TCP - - containerPort: 32443 - hostPort: 32443 - protocol: TCP \ No newline at end of file diff --git a/test-resources/kind-config.yaml.tpl b/test-resources/kind-config.yaml.tpl deleted file mode 100644 index 6aaa3bdb..00000000 --- a/test-resources/kind-config.yaml.tpl +++ /dev/null @@ -1,19 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -containerdConfigPatches: -- |- - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."${KIND_NODE_IP}:5000"] - endpoint = ["http://${KIND_NODE_IP}:5000"] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."harbor.${KIND_NODE_IP}.nip.io:32080"] - endpoint = ["http://harbor.${KIND_NODE_IP}.nip.io:32080"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.${KIND_NODE_IP}.nip.io:32443".tls] - insecure_skip_verify = true -nodes: -- role: control-plane - extraPortMappings: - - containerPort: 32080 - hostPort: 32080 - protocol: TCP - - containerPort: 32443 - hostPort: 32443 - protocol: TCP \ No newline at end of file diff --git a/test-resources/local-path-storage.yaml b/test-resources/local-path-storage.yaml deleted file mode 100644 index 7af9ee86..00000000 --- a/test-resources/local-path-storage.yaml +++ /dev/null @@ -1,156 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: local-path-storage - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: local-path-provisioner-service-account - namespace: local-path-storage - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: local-path-provisioner-role -rules: - - apiGroups: [ "" ] - resources: [ "nodes", "persistentvolumeclaims", "configmaps" ] - verbs: [ "get", "list", "watch" ] - - apiGroups: [ "" ] - resources: [ "endpoints", "persistentvolumes", "pods" ] - verbs: [ "*" ] - - apiGroups: [ "" ] - resources: [ "events" ] - verbs: [ "create", "patch" ] - - apiGroups: [ "storage.k8s.io" ] - resources: [ "storageclasses" ] - verbs: [ "get", "list", "watch" ] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: local-path-provisioner-bind -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: local-path-provisioner-role -subjects: - - kind: ServiceAccount - name: local-path-provisioner-service-account - namespace: local-path-storage - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: local-path-provisioner - namespace: local-path-storage -spec: - replicas: 1 - selector: - matchLabels: - app: local-path-provisioner - template: - metadata: - labels: - app: local-path-provisioner - spec: - serviceAccountName: local-path-provisioner-service-account - containers: - - name: local-path-provisioner - image: rancher/local-path-provisioner:v0.0.19 - imagePullPolicy: IfNotPresent - command: - - local-path-provisioner - - --debug - - start - - --config - - /etc/config/config.json - volumeMounts: - - name: config-volume - mountPath: /etc/config/ - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumes: - - name: config-volume - configMap: - name: local-path-config - ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: local-path -provisioner: rancher.io/local-path -volumeBindingMode: WaitForFirstConsumer -reclaimPolicy: Delete - ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: local-path-config - namespace: local-path-storage -data: - config.json: |- - { - "nodePathMap":[ - { - "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", - "paths":["/opt/local-path-provisioner"] - } - ] - } - setup: |- - #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - mkdir -m 0777 -p ${absolutePath} - teardown: |- - #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - - rm -rf ${absolutePath} - helperPod.yaml: |- - apiVersion: v1 - kind: Pod - metadata: - name: helper-pod - spec: - containers: - - name: helper-pod - image: busybox - diff --git a/test-resources/test-suite.certmanager-issuer-ss.yaml b/test-resources/test-suite.certmanager-issuer-ss.yaml new file mode 100644 index 00000000..fd23b02a --- /dev/null +++ b/test-resources/test-suite.certmanager-issuer-ss.yaml @@ -0,0 +1,38 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: selfsigned-issuer +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: lagoon-testing-ca + namespace: cert-manager +spec: + isCA: true + commonName: lagoon.test + subject: + organizations: + - Lagoon Testing Inc + organizationalUnits: + - Lagoon + dnsNames: + - lagoon.test + secretName: lagoon-test-secret + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: selfsigned-issuer + kind: ClusterIssuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: lagoon-testing-issuer +spec: + ca: + secretName: lagoon-test-secret diff --git a/test-resources/test-suite.kind-config.yaml.tpl b/test-resources/test-suite.kind-config.yaml.tpl new file mode 100644 index 00000000..1141b978 --- /dev/null +++ b/test-resources/test-suite.kind-config.yaml.tpl @@ -0,0 +1,8 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.configs."registry.${KIND_NODE_IP}.nip.io".tls] + insecure_skip_verify = true + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.${KIND_NODE_IP}.nip.io"] + endpoint = ["http://registry.${KIND_NODE_IP}.nip.io"] \ No newline at end of file diff --git a/test-resources/test-suite.metallb-pool.yaml.tpl b/test-resources/test-suite.metallb-pool.yaml.tpl new file mode 100644 index 00000000..aa722e39 --- /dev/null +++ b/test-resources/test-suite.metallb-pool.yaml.tpl @@ -0,0 +1,19 @@ +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + creationTimestamp: null + name: default + namespace: metallb-system +spec: + addresses: + - ${LAGOON_KIND_NETWORK_RANGE} +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + creationTimestamp: null + name: l2advertisement1 + namespace: metallb-system +spec: + ipAddressPools: + - default diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go new file mode 100644 index 00000000..43caaf94 --- /dev/null +++ b/test/e2e/e2e_suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Run e2e tests using the Ginkgo runner. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + fmt.Fprintf(GinkgoWriter, "Starting remote-controller suite\n") + RunSpecs(t, "e2e suite") +} diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go new file mode 100644 index 00000000..a31bacf9 --- /dev/null +++ b/test/e2e/e2e_test.go @@ -0,0 +1,419 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os" + "os/exec" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/uselagoon/remote-controller/test/utils" +) + +const ( + namespace = "remote-controller-system" + timeout = "600s" +) + +var ( + harborversion string + builddeployimage string + + duration = 600 * time.Second + interval = 1 * time.Second +) + +func init() { + harborversion = os.Getenv("HARBOR_VERSION") + builddeployimage = os.Getenv("OVERRIDE_BUILD_DEPLOY_DIND_IMAGE") +} + +var _ = Describe("controller", Ordered, func() { + BeforeAll(func() { + By("start local services") + Expect(utils.StartLocalServices()).To(Succeed()) + + By("creating manager namespace") + cmd := exec.Command("kubectl", "create", "ns", namespace) + _, _ = utils.Run(cmd) + }) + + // comment to prevent cleaning up controller namespace and local services + AfterAll(func() { + By("removing manager namespace") + cmd := exec.Command("kubectl", "delete", "ns", namespace) + _, _ = utils.Run(cmd) + + By("stop local services") + utils.StopLocalServices() + }) + + Context("Operator", func() { + It("should run successfully", func() { + var controllerPodName string + var err error + + // projectimage stores the name of the image used in the example + var projectimage = "example.com/remote-controller:v0.0.1" + + By("building the manager(Operator) image") + cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("loading the the manager(Operator) image on Kind") + err = utils.LoadImageToKindClusterWithName(projectimage) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("installing CRDs") + cmd = exec.Command("make", "install") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("deploying the controller-manager") + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectimage), fmt.Sprintf("OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=%s", builddeployimage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func() error { + // Get pod name + + cmd = exec.Command("kubectl", "get", + "pods", "-l", "control-plane=controller-manager", + "-o", "go-template={{ range .items }}"+ + "{{ if not .metadata.deletionTimestamp }}"+ + "{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", namespace, + ) + + podOutput, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + podNames := utils.GetNonEmptyLines(string(podOutput)) + if len(podNames) != 1 { + return fmt.Errorf("expect 1 controller pods running, but got %d", len(podNames)) + } + controllerPodName = podNames[0] + ExpectWithOffset(2, controllerPodName).Should(ContainSubstring("controller-manager")) + + // Validate pod status + cmd = exec.Command("kubectl", "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", namespace, + ) + status, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + if string(status) != "Running" { + return fmt.Errorf("controller pod in %s status", status) + } + return nil + } + EventuallyWithOffset(1, verifyControllerUp, time.Minute, time.Second).Should(Succeed()) + + By("validating that lagoonbuilds are working") + for _, name := range []string{"7m5zypx", "8m5zypx", "9m5zypx", "1m5zypx"} { + if name == "9m5zypx" { + By("creating a LagoonBuild resource via rabbitmq") + cmd = exec.Command( + "curl", + "-s", + "-u", + "guest:guest", + "-H", + "'Accept: application/json'", + "-H", + "'Content-Type:application/json'", + "-X", + "POST", + "-d", + fmt.Sprintf("@test/e2e/testdata/lagoon-build-%s.json", name), + "http://172.17.0.1:15672/api/exchanges/%2f/lagoon-tasks/publish", + ) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } else { + By("creating a LagoonBuild resource") + cmd = exec.Command( + "kubectl", + "apply", + "-f", + fmt.Sprintf("test/e2e/testdata/lagoon-build-%s.yaml", name), + ) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + time.Sleep(10 * time.Second) + + By("validating that the LagoonBuild build pod is created") + cmd = exec.Command( + "kubectl", + "-n", "nginx-example-main", + "wait", + "--for=condition=Ready", + "pod", + fmt.Sprintf("lagoon-build-%s", name), + fmt.Sprintf("--timeout=%s", timeout), + ) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("validating that the lagoon-build pod completes as expected") + verifyBuildPodCompletes := func() error { + // Validate pod status + cmd = exec.Command("kubectl", "get", + "pods", fmt.Sprintf("lagoon-build-%s", name), "-o", "jsonpath={.status.phase}", + "-n", "nginx-example-main", + ) + status, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + if string(status) != "Succeeded" { + return fmt.Errorf("controller pod in %s status", status) + } + return nil + } + EventuallyWithOffset(1, verifyBuildPodCompletes, duration, interval).Should(Succeed()) + + if name == "8m5zypx" { + By("validating that the namespace has organization name label") + cmd = exec.Command( + "kubectl", + "get", + "namespace", + "-l", + "organization.lagoon.sh/name=test-org", + ) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + By("validating that the namespace has organization id label") + cmd = exec.Command( + "kubectl", + "get", + "namespace", + "-l", + "organization.lagoon.sh/id=123", + ) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + } + + By("validating that only 1 build pod remains in a namespace") + verifyOnlyOneBuildPod := func() error { + cmd = exec.Command("kubectl", "get", + "pods", "-l", "lagoon.sh/jobType=build", + "-o", "go-template={{ range .items }}"+ + "{{ if not .metadata.deletionTimestamp }}"+ + "{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", "nginx-example-main", + ) + + podOutput, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + podNames := utils.GetNonEmptyLines(string(podOutput)) + fmt.Printf("expect 1 build pot, but got %d", len(podNames)) + if len(podNames) != 1 { + return fmt.Errorf("expect 1 build pod, but got %d", len(podNames)) + } + return nil + } + EventuallyWithOffset(1, verifyOnlyOneBuildPod, duration, interval).Should(Succeed()) + + By("validating that LagoonTasks are working") + for _, name := range []string{"1m5zypx"} { + if name == "1m5zypx" { + By("creating dynamic secret resource") + cmd = exec.Command( + "kubectl", + "apply", + "-f", + fmt.Sprintf("test/e2e/testdata/dynamic-secret-%s.yaml", name), + ) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + By("creating a LagoonTask resource") + cmd = exec.Command( + "kubectl", + "apply", + "-f", + fmt.Sprintf("test/e2e/testdata/lagoon-task-%s.yaml", name), + ) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("validating that the lagoon-task pod completes as expected") + verifyTaskPodCompletes := func() error { + // Validate pod status + cmd = exec.Command("kubectl", "get", + "pods", fmt.Sprintf("lagoon-task-%s", name), "-o", "jsonpath={.status.phase}", + "-n", "nginx-example-main", + ) + status, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + if string(status) != "Succeeded" { + return fmt.Errorf("controller pod in %s status", status) + } + return nil + } + EventuallyWithOffset(1, verifyTaskPodCompletes, duration, interval).Should(Succeed()) + + if name == "1m5zypx" { + By("validating that the dynamic secret is mounted") + cmd = exec.Command("kubectl", "get", + "pods", fmt.Sprintf("lagoon-task-%s", name), "-o", "jsonpath={.spec.containers[0].volumeMounts}", + "-n", "nginx-example-main", + ) + volumes, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + ExpectWithOffset(2, volumes).Should(ContainSubstring(fmt.Sprintf("/var/run/secrets/lagoon/dynamic/dynamic-secret-%s", name))) + } + } + + By("validating that restore tasks are working") + restores := map[string]string{ + "k8up-v1alpha1": "restore-bf072a0-uqxqo3", + "k8up-v1": "restore-bf072a0-uqxqo4", + } + for name, restore := range restores { + By(fmt.Sprintf("installing %s crds", name)) + cmd = exec.Command( + "kubectl", + "apply", + "-f", + fmt.Sprintf("test/e2e/testdata/%s-crds.yaml", name), + ) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + time.Sleep(5 * time.Second) + + By(fmt.Sprintf("creating a %s restore task via rabbitmq", name)) + cmd = exec.Command( + "curl", + "-s", + "-u", + "guest:guest", + "-H", + "'Accept: application/json'", + "-H", + "'Content-Type:application/json'", + "-X", + "POST", + "-d", + fmt.Sprintf("@test/e2e/testdata/%s-restore.json", name), + "http://172.17.0.1:15672/api/exchanges/%2f/lagoon-tasks/publish", + ) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + time.Sleep(10 * time.Second) + + By("validating that the restore is created") + restoreversion := "restores.k8up.io" + tmpl, err := os.ReadFile("test/e2e/testdata/results/restore.tpl") + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + if name == "k8up-v1alpha1" { + restoreversion = "restores.backup.appuio.ch" + } + cmd = exec.Command("kubectl", "get", + restoreversion, restore, + "-n", "nginx-example-main", "-o", fmt.Sprintf("go-template=%s", string(tmpl)), + ) + result, err := utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + testResult, err := os.ReadFile(fmt.Sprintf("test/e2e/testdata/results/%s.yaml", name)) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + Expect(strings.TrimSpace(string(result))).To(Equal(string(testResult))) + } + + By("validating that the harbor robot credentials get rotated successfully") + cmd = exec.Command("kubectl", "get", + "pods", "-l", "control-plane=controller-manager", + "-o", "go-template={{ range .items }}"+ + "{{ if not .metadata.deletionTimestamp }}"+ + "{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", namespace, + ) + podOutput, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + podNames := utils.GetNonEmptyLines(string(podOutput)) + controllerPodName = podNames[0] + ExpectWithOffset(2, controllerPodName).Should(ContainSubstring("controller-manager")) + verifyRobotCredentialsRotate := func() error { + // Validate pod status + cmd = exec.Command("kubectl", "logs", + controllerPodName, "-c", "manager", + "-n", namespace, + ) + podlogs, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + if !strings.Contains(string(podlogs), "Robot credentials rotated for nginx-example-main") { + return fmt.Errorf("robot credentials not rotated yet") + } + return nil + } + EventuallyWithOffset(1, verifyRobotCredentialsRotate, duration, interval).Should(Succeed()) + + By("delete environment via rabbitmq") + cmd = exec.Command( + "curl", + "-s", + "-u", + "guest:guest", + "-H", + "'Accept: application/json'", + "-H", + "'Content-Type:application/json'", + "-X", + "POST", + "-d", + "@test/e2e/testdata/remove-environment.json", + "http://172.17.0.1:15672/api/exchanges/%2f/lagoon-tasks/publish", + ) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("validating that the namespace deletes") + verifyNamespaceRemoved := func() error { + cmd = exec.Command("kubectl", "get", + "namespace", "nginx-example-main", "-o", "jsonpath={.status.phase}", + ) + status, err := utils.Run(cmd) + if err == nil { + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + if string(status) == "Active" || string(status) == "Terminating" { + return fmt.Errorf("namespace in %s status\n", status) + } + } + return nil + } + EventuallyWithOffset(1, verifyNamespaceRemoved, duration, interval).Should(Succeed()) + }) + // uncomment to debug ... + // time.Sleep(5 * time.Minute) + }) + +}) diff --git a/test-resources/bulk-storage.yaml b/test/e2e/testdata/bulk-storageclass.yaml similarity index 100% rename from test-resources/bulk-storage.yaml rename to test/e2e/testdata/bulk-storageclass.yaml diff --git a/test-resources/dynamic-secret-in-task-project1-secret.yaml b/test/e2e/testdata/dynamic-secret-1m5zypx.yaml similarity index 68% rename from test-resources/dynamic-secret-in-task-project1-secret.yaml rename to test/e2e/testdata/dynamic-secret-1m5zypx.yaml index c961f5d8..e93782ba 100644 --- a/test-resources/dynamic-secret-in-task-project1-secret.yaml +++ b/test/e2e/testdata/dynamic-secret-1m5zypx.yaml @@ -5,5 +5,6 @@ kind: Secret metadata: labels: lagoon.sh/dynamic-secret: "true" - name: test-dynamic-secret + name: dynamic-secret-1m5zypx + namespace: nginx-example-main type: Opaque \ No newline at end of file diff --git a/test-resources/k8upv1-crds.yaml b/test/e2e/testdata/k8up-v1-crds.yaml similarity index 100% rename from test-resources/k8upv1-crds.yaml rename to test/e2e/testdata/k8up-v1-crds.yaml diff --git a/test/e2e/testdata/k8up-v1-restore.json b/test/e2e/testdata/k8up-v1-restore.json new file mode 100644 index 00000000..8605665b --- /dev/null +++ b/test/e2e/testdata/k8up-v1-restore.json @@ -0,0 +1,17 @@ +{"properties":{"delivery_mode":2},"routing_key":"ci-local-controller-kubernetes:misc", + "payload":"{ + \"misc\":{ + \"miscResource\":\"eyJtZXRhZGF0YSI6eyJuYW1lIjoicmVzdG9yZS1iZjA3MmEwLXVxeHFvNCJ9LCJzcGVjIjp7InNuYXBzaG90IjoiYmYwNzJhMDllMTc3MjZkYTU0YWRjNzk5MzZlYzg3NDU1MjE5OTM1OTlkNDEyMTFkZmM5NDY2ZGZkNWJjMzJhNSIsInJlc3RvcmVNZXRob2QiOnsiczMiOnt9fSwiYmFja2VuZCI6eyJzMyI6eyJidWNrZXQiOiJiYWFzLW5naW54LWV4YW1wbGUifSwicmVwb1Bhc3N3b3JkU2VjcmV0UmVmIjp7ImtleSI6InJlcG8tcHciLCJuYW1lIjoiYmFhcy1yZXBvLXB3In19fX0=\" + }, + \"key\":\"deploytarget:restic:backup:restore\", + \"environment\":{ + \"name\":\"main\", + \"openshiftProjectName\":\"nginx-example-main\" + }, + \"project\":{ + \"name\":\"nginx-example\" + }, + \"advancedTask\":{} + }", +"payload_encoding":"string" +} \ No newline at end of file diff --git a/test-resources/k8upv1alpha1-crds.yaml b/test/e2e/testdata/k8up-v1alpha1-crds.yaml similarity index 100% rename from test-resources/k8upv1alpha1-crds.yaml rename to test/e2e/testdata/k8up-v1alpha1-crds.yaml diff --git a/test/e2e/testdata/k8up-v1alpha1-restore.json b/test/e2e/testdata/k8up-v1alpha1-restore.json new file mode 100644 index 00000000..58def511 --- /dev/null +++ b/test/e2e/testdata/k8up-v1alpha1-restore.json @@ -0,0 +1,17 @@ +{"properties":{"delivery_mode":2},"routing_key":"ci-local-controller-kubernetes:misc", + "payload":"{ + \"misc\":{ + \"miscResource\":\"eyJhcGlWZXJzaW9uIjoiYmFja3VwLmFwcHVpby5jaC92MWFscGhhMSIsImtpbmQiOiJSZXN0b3JlIiwibWV0YWRhdGEiOnsibmFtZSI6InJlc3RvcmUtYmYwNzJhMC11cXhxbzMifSwic3BlYyI6eyJzbmFwc2hvdCI6ImJmMDcyYTA5ZTE3NzI2ZGE1NGFkYzc5OTM2ZWM4NzQ1NTIxOTkzNTk5ZDQxMjExZGZjOTQ2NmRmZDViYzMyYTUiLCJyZXN0b3JlTWV0aG9kIjp7InMzIjp7fX0sImJhY2tlbmQiOnsiczMiOnsiYnVja2V0IjoiYmFhcy1uZ2lueC1leGFtcGxlIn0sInJlcG9QYXNzd29yZFNlY3JldFJlZiI6eyJrZXkiOiJyZXBvLXB3IiwibmFtZSI6ImJhYXMtcmVwby1wdyJ9fX19\" + }, + \"key\":\"deploytarget:restic:backup:restore\", + \"environment\":{ + \"name\":\"main\", + \"openshiftProjectName\":\"nginx-example-main\" + }, + \"project\":{ + \"name\":\"nginx-example\" + }, + \"advancedTask\":{} + }", +"payload_encoding":"string" +} \ No newline at end of file diff --git a/test-resources/example-project3.yaml b/test/e2e/testdata/lagoon-build-1m5zypx.yaml similarity index 95% rename from test-resources/example-project3.yaml rename to test/e2e/testdata/lagoon-build-1m5zypx.yaml index 57cba070..ba50e261 100644 --- a/test-resources/example-project3.yaml +++ b/test/e2e/testdata/lagoon-build-1m5zypx.yaml @@ -2,6 +2,9 @@ kind: LagoonBuild apiVersion: crd.lagoon.sh/v1beta1 metadata: name: lagoon-build-1m5zypx + namespace: remote-controller-system + labels: + lagoon.sh/controller: remote-controller-system spec: build: ci: 'true' #to make sure that readwritemany is changed to readwriteonce diff --git a/test-resources/example-project1.yaml b/test/e2e/testdata/lagoon-build-7m5zypx.yaml similarity index 96% rename from test-resources/example-project1.yaml rename to test/e2e/testdata/lagoon-build-7m5zypx.yaml index 8b620e46..f1ed59e8 100644 --- a/test-resources/example-project1.yaml +++ b/test/e2e/testdata/lagoon-build-7m5zypx.yaml @@ -2,7 +2,9 @@ kind: LagoonBuild apiVersion: crd.lagoon.sh/v1beta2 metadata: name: lagoon-build-7m5zypx + namespace: remote-controller-system labels: + lagoon.sh/controller: remote-controller-system crd.lagoon.sh/version: v1beta2 spec: build: diff --git a/test-resources/example-project2.yaml b/test/e2e/testdata/lagoon-build-8m5zypx.yaml similarity index 96% rename from test-resources/example-project2.yaml rename to test/e2e/testdata/lagoon-build-8m5zypx.yaml index 904528df..1ac0dccf 100644 --- a/test-resources/example-project2.yaml +++ b/test/e2e/testdata/lagoon-build-8m5zypx.yaml @@ -2,7 +2,9 @@ kind: LagoonBuild apiVersion: crd.lagoon.sh/v1beta2 metadata: name: lagoon-build-8m5zypx + namespace: remote-controller-system labels: + lagoon.sh/controller: remote-controller-system crd.lagoon.sh/version: v1beta2 spec: build: diff --git a/test/e2e/testdata/lagoon-build-9m5zypx.json b/test/e2e/testdata/lagoon-build-9m5zypx.json new file mode 100644 index 00000000..bcd95b64 --- /dev/null +++ b/test/e2e/testdata/lagoon-build-9m5zypx.json @@ -0,0 +1,43 @@ +{ + "properties":{ + "delivery_mode":2 + }, + "routing_key":"ci-local-controller-kubernetes:builddeploy", + "payload":"{ + \"metadata\": { + \"name\": \"lagoon-build-9m5zypx\" + }, + \"spec\": { + \"build\": { + \"ci\": \"true\", + \"type\": \"branch\" + }, + \"gitReference\": \"origin\/main\", + \"project\": { + \"name\": \"nginx-example\", + \"environment\": \"main\", + \"uiLink\": \"https:\/\/dashboard.amazeeio.cloud\/projects\/project\/project-environment\/deployments\/lagoon-build-9m5zypx\", + \"routerPattern\": \"main-nginx-example\", + \"environmentType\": \"production\", + \"productionEnvironment\": \"main\", + \"standbyEnvironment\": \"\", + \"gitUrl\": \"https:\/\/github.com\/shreddedbacon\/lagoon-nginx-example.git\", + \"deployTarget\": \"kind\", + \"projectSecret\": \"4d6e7dd0f013a75d62a0680139fa82d350c2a1285f43f867535bad1143f228b1\", + \"key\": \"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlDWFFJQkFBS0JnUUNjc1g2RG5KNXpNb0RqQ2R6a1JFOEg2TEh2TDQzaUhsekJLTWo4T1VNV05ZZG5YekdqCkR5Mkp1anQ3ZDNlMTVLeC8zOFo5UzJLdHNnVFVtWi9lUlRQSTdabE1idHRJK250UmtyblZLblBWNzhEeEFKNW8KTGZtQndmdWE2MnlVYnl0cnpYQ2pwVVJrQUlBMEZiR2VqS2Rvd3cxcnZGMzJoZFUzQ3ZIcG5rKzE2d0lEQVFBQgpBb0dCQUkrV0dyL1NDbVMzdCtIVkRPVGtMNk9vdVR6Y1QrRVFQNkVGbGIrRFhaV0JjZFhwSnB3c2NXZFBEK2poCkhnTEJUTTFWS3hkdnVEcEE4aW83cUlMTzJWYm1MeGpNWGk4TUdwY212dXJFNVJydTZTMXJzRDl2R0c5TGxoR3UKK0pUSmViMVdaZFduWFZ2am5LbExrWEV1eUthbXR2Z253Um5xNld5V05OazJ6SktoQWtFQThFenpxYnowcFVuTApLc241K2k0NUdoRGVpRTQvajRtamo1b1FHVzJxbUZWT2pHaHR1UGpaM2lwTis0RGlTRkFyMkl0b2VlK085d1pyCkRINHBkdU5YOFFKQkFLYnVOQ3dXK29sYXA4R2pUSk1TQjV1MW8wMVRHWFdFOGhVZG1leFBBdjl0cTBBT0gzUUQKUTIrM0RsaVY0ektoTlMra2xaSkVjNndzS0YyQmJIby81NXNDUVFETlBJd24vdERja3loSkJYVFJyc1RxZEZuOApCUWpZYVhBZTZEQ3o1eXg3S3ZFSmp1K1h1a01xTXV1ajBUSnpITFkySHVzK3FkSnJQVG9VMDNSS3JHV2hBa0JFCnB3aXI3Vk5pYy9jMFN2MnVLcWNZWWM1a2ViMnB1R0I3VUs1Q0lvaWdGakZzNmFJRDYyZXJwVVJ3S0V6RlFNbUgKNjQ5Y0ZXemhMVlA0aU1iZFREVHJBa0FFMTZXU1A3WXBWOHV1eFVGMGV0L3lFR3dURVpVU2R1OEppSTBHN0tqagpqcVR6RjQ3YkJZc0pIYTRYcWpVb2E3TXgwcS9FSUtRWkJ2NGFvQm42bGFOQwotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQ==\", + \"monitoring\": { + \"contact\": \"1234\", + \"statuspageID\": \"1234\" + }, + \"variables\": { + \"project\": \"W3sibmFtZSI6IkxBR09PTl9TWVNURU1fUk9VVEVSX1BBVFRFUk4iLCJ2YWx1ZSI6IiR7ZW52aXJvbm1lbnR9LiR7cHJvamVjdH0uZXhhbXBsZS5jb20iLCJzY29wZSI6ImludGVybmFsX3N5c3RlbSJ9XQ==\", + \"environment\": \"W10=\" + } + }, + \"branch\": { + \"name\": \"main\" + } + } + }", + "payload_encoding":"string" +} \ No newline at end of file diff --git a/test-resources/dynamic-secret-in-task-project1.yaml b/test/e2e/testdata/lagoon-task-1m5zypx.yaml similarity index 83% rename from test-resources/dynamic-secret-in-task-project1.yaml rename to test/e2e/testdata/lagoon-task-1m5zypx.yaml index e79cfa0f..20d1c5f1 100644 --- a/test-resources/dynamic-secret-in-task-project1.yaml +++ b/test/e2e/testdata/lagoon-task-1m5zypx.yaml @@ -1,12 +1,14 @@ apiVersion: crd.lagoon.sh/v1beta2 kind: LagoonTask metadata: - name: "lagoon-advanced-task-example-task-project-1" + name: lagoon-task-1m5zypx + namespace: nginx-example-main labels: lagoon.sh/controller: lagoon lagoon.sh/taskStatus: Pending lagoon.sh/taskType: advanced crd.lagoon.sh/version: v1beta2 + lagoon.sh/controller: remote-controller-system spec: advancedTask: JSONPayload: e30= diff --git a/test/e2e/testdata/remove-environment.json b/test/e2e/testdata/remove-environment.json new file mode 100644 index 00000000..d455b3b3 --- /dev/null +++ b/test/e2e/testdata/remove-environment.json @@ -0,0 +1,10 @@ +{"properties":{"delivery_mode":2},"routing_key":"ci-local-controller-kubernetes:remove", + "payload":"{ + \"projectName\": \"nginx-example\", + \"type\":\"branch\", + \"forceDeleteProductionEnvironment\":true, + \"branch\":\"main\", + \"openshiftProjectName\":\"nginx-example-main\" + }", +"payload_encoding":"string" +} \ No newline at end of file diff --git a/test-resources/results/k8upv1.yaml b/test/e2e/testdata/results/k8up-v1.yaml similarity index 95% rename from test-resources/results/k8upv1.yaml rename to test/e2e/testdata/results/k8up-v1.yaml index db25b92a..3033c1ce 100644 --- a/test-resources/results/k8upv1.yaml +++ b/test/e2e/testdata/results/k8up-v1.yaml @@ -10,4 +10,4 @@ spec: name: baas-repo-pw s3: bucket: baas-nginx-example - snapshot: bf072a09e17726da54adc79936ec8745521993599d41211dfc9466dfd5bc32a5 + snapshot: bf072a09e17726da54adc79936ec8745521993599d41211dfc9466dfd5bc32a5 \ No newline at end of file diff --git a/test-resources/results/k8upv1alpha1.yaml b/test/e2e/testdata/results/k8up-v1alpha1.yaml similarity index 96% rename from test-resources/results/k8upv1alpha1.yaml rename to test/e2e/testdata/results/k8up-v1alpha1.yaml index 6daaefe6..8c7f10c1 100644 --- a/test-resources/results/k8upv1alpha1.yaml +++ b/test/e2e/testdata/results/k8up-v1alpha1.yaml @@ -10,4 +10,4 @@ spec: name: baas-repo-pw s3: bucket: baas-nginx-example - snapshot: bf072a09e17726da54adc79936ec8745521993599d41211dfc9466dfd5bc32a5 + snapshot: bf072a09e17726da54adc79936ec8745521993599d41211dfc9466dfd5bc32a5 \ No newline at end of file diff --git a/test/e2e/testdata/results/restore.tpl b/test/e2e/testdata/results/restore.tpl new file mode 100644 index 00000000..0ba706cc --- /dev/null +++ b/test/e2e/testdata/results/restore.tpl @@ -0,0 +1,13 @@ +apiVersion: {{.apiVersion}} +kind: {{.kind}} +metadata: + name: {{.metadata.name}} + namespace: {{.metadata.namespace}} +spec: + backend: + repoPasswordSecretRef: + key: {{.spec.backend.repoPasswordSecretRef.key}} + name: {{.spec.backend.repoPasswordSecretRef.name}} + s3: + bucket: {{.spec.backend.s3.bucket}} + snapshot: {{.spec.snapshot}} \ No newline at end of file diff --git a/test/utils/utils.go b/test/utils/utils.go new file mode 100644 index 00000000..9a7a7a33 --- /dev/null +++ b/test/utils/utils.go @@ -0,0 +1,108 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + "os" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" //nolint:golint,revive +) + +func warnError(err error) { + fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) +} + +// StartLocalServices starts local services +func StartLocalServices() error { + cmd := exec.Command("docker", "compose", "up", "-d") + _, err := Run(cmd) + return err +} + +// StopLocalServices stops local services +func StopLocalServices() { + cmd := exec.Command("docker", "compose", "down") + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// InstallBulkStorage installs the bulk storage class. +func InstallBulkStorage() error { + cmd := exec.Command("kubectl", "apply", "-f", "test/e2e/testdata/bulk-storageclass.yaml") + _, err := Run(cmd) + return err +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) ([]byte, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + fmt.Fprintf(GinkgoWriter, "running: %s\n", command) + output, err := cmd.CombinedOutput() + if err != nil { + return output, fmt.Errorf("%s failed with error: (%v) %s", command, err, string(output)) + } + + return output, nil +} + +// LoadImageToKindCluster loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + cluster := "remote-controller" + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", name, "--name", cluster} + cmd := exec.Command("kind", kindOptions...) + _, err := Run(cmd) + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, err + } + wd = strings.Replace(wd, "/test/e2e", "", -1) + return wd, nil +}