From f1a63caea7b4b7e1f42b9e54a84aaba563f24478 Mon Sep 17 00:00:00 2001 From: Alexandr Demicev Date: Tue, 6 Aug 2024 10:43:19 +0200 Subject: [PATCH 1/3] Use new configuration in e2e workflows Signed-off-by: Alexandr Demicev --- .github/workflows/e2e-long.yaml | 12 ++++++------ .github/workflows/e2e-short-test.yaml | 6 +++++- .github/workflows/e2e-short.yaml | 6 +++++- .github/workflows/run-e2e-suite.yaml | 16 ++++++++++------ 4 files changed, 26 insertions(+), 14 deletions(-) diff --git a/.github/workflows/e2e-long.yaml b/.github/workflows/e2e-long.yaml index f6026380..8db7bfb1 100644 --- a/.github/workflows/e2e-long.yaml +++ b/.github/workflows/e2e-long.yaml @@ -15,7 +15,7 @@ jobs: test_name: Import via GitOps run_azure_janitor: false artifact_name: artifacts_import_gitops - use_eks: true + management_cluster_infrastructure: eks secrets: inherit e2e_import_gitops_v3: uses: ./.github/workflows/run-e2e-suite.yaml @@ -24,7 +24,7 @@ jobs: test_name: Import via GitOps [v3] run_azure_janitor: false artifact_name: artifacts_import_gitops_v3 - use_eks: true + management_cluster_infrastructure: eks secrets: inherit e2e_v2prov: uses: ./.github/workflows/run-e2e-suite.yaml @@ -33,7 +33,7 @@ jobs: test_name: v2 provisioning run_azure_janitor: true artifact_name: artifacts_v2prov - use_eks: true + management_cluster_infrastructure: eks secrets: inherit e2e_update_labels: uses: ./.github/workflows/run-e2e-suite.yaml @@ -42,7 +42,7 @@ jobs: test_name: Update labels run_azure_janitor: true artifact_name: artifacts_update_labels - use_eks: true + management_cluster_infrastructure: eks secrets: inherit e2e_embedded_capi_disabled: uses: ./.github/workflows/run-e2e-suite.yaml @@ -51,7 +51,7 @@ jobs: test_name: Embedded CAPI disabled run_azure_janitor: false artifact_name: artifacts_embedded_capi - use_eks: true + management_cluster_infrastructure: eks secrets: inherit e2e_embedded_capi_disabled_v3: uses: ./.github/workflows/run-e2e-suite.yaml @@ -60,5 +60,5 @@ jobs: test_name: Embedded CAPI disabled [v3] run_azure_janitor: false artifact_name: artifacts_embedded_capi_v3 - use_eks: true + management_cluster_infrastructure: eks secrets: inherit diff --git a/.github/workflows/e2e-short-test.yaml b/.github/workflows/e2e-short-test.yaml index 49867f9d..73749855 100644 --- a/.github/workflows/e2e-short-test.yaml +++ b/.github/workflows/e2e-short-test.yaml @@ -3,6 +3,10 @@ name: Run short e2e tests (with runner) on: workflow_dispatch: +env: + MANAGEMENT_CLUSTER_INFRASTRUCTURE: "isolated-kind" + GINKGO_LABEL_FILTER: "short" + jobs: e2e: runs-on: org--rancher--amd64-containers @@ -17,7 +21,7 @@ jobs: with: go-version: "=1.22.0" - name: Run e2e tests - run: ISOLATED_MODE=true USE_EKS=false GINKGO_LABEL_FILTER=short make test-e2e + run: make test-e2e - name: Collect run artifacts if: always() uses: actions/upload-artifact@v4 diff --git a/.github/workflows/e2e-short.yaml b/.github/workflows/e2e-short.yaml index 71f287db..a9d73c4c 100644 --- a/.github/workflows/e2e-short.yaml +++ b/.github/workflows/e2e-short.yaml @@ -4,6 +4,10 @@ on: pull_request: types: [opened, edited, synchronize, reopened, labeled, unlabeled] +env: + MANAGEMENT_CLUSTER_INFRASTRUCTURE: "isolated-kind" + GINKGO_LABEL_FILTER: "short" + jobs: e2e: runs-on: ubuntu-latest @@ -34,7 +38,7 @@ jobs: with: go-version: "=1.22.0" - name: Run e2e tests - run: ISOLATED_MODE=true USE_EKS=false GINKGO_LABEL_FILTER=short make test-e2e + run: make test-e2e - name: Collect run artifacts if: always() uses: actions/upload-artifact@v4 diff --git a/.github/workflows/run-e2e-suite.yaml b/.github/workflows/run-e2e-suite.yaml index f6ffc187..14464b6a 100644 --- a/.github/workflows/run-e2e-suite.yaml +++ b/.github/workflows/run-e2e-suite.yaml @@ -1,6 +1,11 @@ on: workflow_call: inputs: + management_cluster_infrastructure: + description: "The infrastructure to use for the management cluster: eks, kind or isolated-kind" + type: string + required: true + default: "eks" test_suite: description: "The test suite to run (i.e. path to it)" required: true @@ -18,10 +23,6 @@ on: required: false default: false type: boolean - use_eks: - description: "Use EKS for the management cluster" - required: true - type: boolean permissions: contents: read @@ -42,6 +43,9 @@ env: AWS_REGION: eu-west-2 AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + MANAGEMENT_CLUSTER_INFRASTRUCTURE: ${{ inputs.management_cluster_infrastructure }} + GINKGO_LABEL_FILTER: full + GINKGO_TESTS: ${{ inputs.test_suite }} jobs: run_e2e_tests: @@ -80,10 +84,10 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: build and push e2e image - if: ${{ inputs.use_eks }} + if: ${{ inputs.management_cluster_infrastructure == 'eks' }} run: make e2e-image-push - name: Run e2e tests - run: GINKGO_LABEL_FILTER=full USE_EKS=${{ inputs.use_eks }} GINKGO_TESTS=${{ inputs.test_suite }} make test-e2e + run: make test-e2e - name: Collect run artifacts if: always() uses: actions/upload-artifact@v4 From 99a043d12327b77633caa7282f81be4f3dd327b5 Mon Sep 17 00:00:00 2001 From: Alexandr Demicev Date: Tue, 6 Aug 2024 10:43:40 +0200 Subject: [PATCH 2/3] Replace flags with configuration variables in Makefile --- Makefile | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 816091dd..c255944e 100644 --- a/Makefile +++ b/Makefile @@ -75,13 +75,14 @@ E2E_CONF_FILE ?= $(ROOT_DIR)/$(TEST_DIR)/e2e/config/operator.yaml GINKGO_ARGS ?= SKIP_RESOURCE_CLEANUP ?= false USE_EXISTING_CLUSTER ?= false -USE_EKS ?= true -ISOLATED_MODE ?= false GITEA_CUSTOM_INGRESS ?= false GINKGO_NOCOLOR ?= false GINKGO_LABEL_FILTER ?= short GINKGO_TESTS ?= $(ROOT_DIR)/$(TEST_DIR)/e2e/suites/... +MANAGEMENT_CLUSTER_INFRASTRUCTURE ?= eks +E2ECONFIG_VARS ?= MANAGEMENT_CLUSTER_INFRASTRUCTURE=$(MANAGEMENT_CLUSTER_INFRASTRUCTURE) + # to set multiple ginkgo skip flags, if any ifneq ($(strip $(GINKGO_SKIP)),) _SKIP_ARGS := $(foreach arg,$(strip $(GINKGO_SKIP)),-skip="$(arg)") @@ -529,7 +530,7 @@ release-chart: $(HELM) $(NOTES) build-chart verify-gen .PHONY: test-e2e test-e2e: $(GINKGO) $(HELM) $(CLUSTERCTL) kubectl e2e-image ## Run the end-to-end tests - $(GINKGO) -v --trace -poll-progress-after=$(GINKGO_POLL_PROGRESS_AFTER) \ + $(E2ECONFIG_VARS) $(GINKGO) -v --trace -poll-progress-after=$(GINKGO_POLL_PROGRESS_AFTER) \ -poll-progress-interval=$(GINKGO_POLL_PROGRESS_INTERVAL) --tags=e2e --focus="$(GINKGO_FOCUS)" --label-filter="$(GINKGO_LABEL_FILTER)" \ $(_SKIP_ARGS) --nodes=$(GINKGO_NODES) --timeout=$(GINKGO_TIMEOUT) --no-color=$(GINKGO_NOCOLOR) \ --output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.1.xml" $(GINKGO_ARGS) $(GINKGO_TESTS) -- \ @@ -540,8 +541,6 @@ test-e2e: $(GINKGO) $(HELM) $(CLUSTERCTL) kubectl e2e-image ## Run the end-to-en -e2e.chart-path=$(ROOT_DIR)/$(CHART_RELEASE_DIR) \ -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) \ - -e2e.isolated-mode=$(ISOLATED_MODE) \ - -e2e.use-eks=$(USE_EKS) \ -e2e.gitea-custom-ingress=$(GITEA_CUSTOM_INGRESS) .PHONY: e2e-image From cba272a52ffdbef7ef1c6e44e8d29f8e03b8f273 Mon Sep 17 00:00:00 2001 From: Alexandr Demicev Date: Tue, 6 Aug 2024 10:44:05 +0200 Subject: [PATCH 3/3] Add different hooks to make the setup easier Signed-off-by: Alexandr Demicev --- test/e2e/config/operator.yaml | 1 + test/e2e/const.go | 10 ++ test/e2e/flags.go | 9 -- .../embedded-capi-disabled-v3/suite_test.go | 132 +++++---------- .../embedded-capi-disabled/suite_test.go | 132 +++++---------- .../e2e/suites/import-gitops-v3/suite_test.go | 130 +++++---------- test/e2e/suites/import-gitops/suite_test.go | 150 +++++------------- test/e2e/suites/migrate-gitops/suite_test.go | 101 +++++------- test/e2e/suites/update-labels/suite_test.go | 87 +++------- test/e2e/suites/v2prov/suite_test.go | 85 +++------- test/testenv/gitea.go | 20 ++- test/testenv/rancher.go | 61 +++++++ test/testenv/setupcluster.go | 36 ++++- test/testenv/turtles.go | 40 ++++- 14 files changed, 393 insertions(+), 601 deletions(-) diff --git a/test/e2e/config/operator.yaml b/test/e2e/config/operator.yaml index a4b851c7..ef9a8101 100644 --- a/test/e2e/config/operator.yaml +++ b/test/e2e/config/operator.yaml @@ -27,6 +27,7 @@ intervals: default/wait-turtles-uninstall: ["10m", "30s"] variables: + MANAGEMENT_CLUSTER_INFRASTRUCTURE: "isolated-kind" # supported options are eks, isolated-kind, kind RANCHER_VERSION: "v2.8.1" KUBERNETES_VERSION: "v1.28.6" KUBERNETES_MANAGEMENT_VERSION: "v1.27.0" diff --git a/test/e2e/const.go b/test/e2e/const.go index 9a723ffd..e94e4fbd 100644 --- a/test/e2e/const.go +++ b/test/e2e/const.go @@ -95,7 +95,17 @@ const ( NginxIngressDeployment = "ingress-nginx-controller" ) +type ManagementClusterInfrastuctureType string + const ( + ManagementClusterInfrastuctureEKS ManagementClusterInfrastuctureType = "eks" + ManagementClusterInfrastuctureIsolatedKind ManagementClusterInfrastuctureType = "isolated-kind" + ManagementClusterInfrastuctureKind ManagementClusterInfrastuctureType = "kind" +) + +const ( + ManagementClusterInfrastucture = "MANAGEMENT_CLUSTER_INFRASTRUCTURE" + KubernetesManagementVersionVar = "KUBERNETES_MANAGEMENT_VERSION" KubernetesVersionVar = "KUBERNETES_VERSION" diff --git a/test/e2e/flags.go b/test/e2e/flags.go index 249007b5..3be038b8 100644 --- a/test/e2e/flags.go +++ b/test/e2e/flags.go @@ -30,9 +30,6 @@ type FlagValues struct { // UseExistingCluster instructs the test to use the current cluster instead of creating a new one (default discovery rules apply). UseExistingCluster bool - // UseEKS instructs the test to create an EKS cluster instead of using kind. - UseEKS bool - // ArtifactFolder is the folder to store e2e test artifacts. ArtifactFolder string @@ -48,10 +45,6 @@ type FlagValues struct { // ChartPath is the path to the operator chart. ChartPath string - // IsolatedMode instructs the test to run without ngrok and exposing the cluster to the internet. This setup will only work with CAPD - // or other providers that run in the same network as the bootstrap cluster. - IsolatedMode bool - // ClusterctlBinaryPath is the path to the clusterctl binary to use. ClusterctlBinaryPath string @@ -65,11 +58,9 @@ func InitFlags(values *FlagValues) { flag.StringVar(&values.ArtifactFolder, "e2e.artifacts-folder", "_artifacts", "folder where e2e test artifact should be stored") flag.BoolVar(&values.SkipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") flag.BoolVar(&values.UseExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") - flag.BoolVar(&values.UseEKS, "e2e.use-eks", true, "if true, the test uses EKS for the management cluster") flag.StringVar(&values.HelmBinaryPath, "e2e.helm-binary-path", "helm", "path to the helm binary") flag.StringVar(&values.HelmExtraValuesDir, "e2e.helm-extra-values-path", "/tmp", "path to the extra values file") flag.StringVar(&values.ClusterctlBinaryPath, "e2e.clusterctl-binary-path", "helm", "path to the clusterctl binary") flag.StringVar(&values.ChartPath, "e2e.chart-path", "", "path to the operator chart") - flag.BoolVar(&values.IsolatedMode, "e2e.isolated-mode", false, "if true, the test will run without ngrok and exposing the cluster to the internet. This setup will only work with CAPD or other providers that run in the same network as the bootstrap cluster.") flag.BoolVar(&values.GiteaCustomIngress, "e2e.gitea-custom-ingress", false, "if true, the test will use a custom ingress for Gitea") } diff --git a/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go b/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go index 4e0fa034..93cf3c4c 100644 --- a/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go +++ b/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go @@ -31,9 +31,7 @@ import ( . "github.com/onsi/gomega" "github.com/rancher/turtles/test/e2e" "github.com/rancher/turtles/test/framework" - turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" - corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" @@ -84,26 +82,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -115,16 +94,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -137,32 +115,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - // NOTE: deploy Rancher first with the embedded-cluster-api feature disabled. // and the deploy Rancher Turtles. rancherInput := testenv.DeployRancherInput{ @@ -177,7 +129,6 @@ var _ = BeforeSuite(func() { RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherFeatures: "embedded-cluster-api=false", @@ -186,14 +137,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -201,7 +156,7 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: flagVals.ChartPath, CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), @@ -210,14 +165,9 @@ var _ = BeforeSuite(func() { "rancherTurtles.features.embedded-capi.disabled": "false", }, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + testenv.DeployRancherTurtles(ctx, rtInput) // NOTE: there are no short or local tests in this suite @@ -233,7 +183,7 @@ var _ = BeforeSuite(func() { }, CAPIProvidersYAML: e2e.FullProviders, TemplateData: map[string]string{ - "AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar), + "AWSEncodedCredentials": awsCreds, }, WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), WaitForDeployments: []testenv.NamespaceName{ @@ -248,21 +198,7 @@ var _ = BeforeSuite(func() { }, }) - giteaValues := map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - } - - giteaServiceType := corev1.ServiceTypeNodePort - if flagVals.UseEKS { - giteaServiceType = corev1.ServiceTypeLoadBalancer - } - - if flagVals.GiteaCustomIngress { - giteaServiceType = corev1.ServiceTypeClusterIP - } - - giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), @@ -270,16 +206,22 @@ var _ = BeforeSuite(func() { ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), ValuesFilePath: "../../data/gitea/values.yaml", - Values: giteaValues, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - ServiceType: giteaServiceType, - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - }) + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult = testenv.DeployGitea(ctx, giteaInput) }) var _ = AfterSuite(func() { diff --git a/test/e2e/suites/embedded-capi-disabled/suite_test.go b/test/e2e/suites/embedded-capi-disabled/suite_test.go index b29c9c77..4f38d3a7 100644 --- a/test/e2e/suites/embedded-capi-disabled/suite_test.go +++ b/test/e2e/suites/embedded-capi-disabled/suite_test.go @@ -29,14 +29,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" "github.com/rancher/turtles/test/e2e" "github.com/rancher/turtles/test/framework" - turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" ) @@ -85,26 +83,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -116,16 +95,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -138,32 +116,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - // NOTE: deploy Rancher first with the embedded-cluster-api feature disabled. // and the deploy Rancher Turtles. rancherInput := testenv.DeployRancherInput{ @@ -178,7 +130,6 @@ var _ = BeforeSuite(func() { RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherFeatures: "embedded-cluster-api=false", @@ -187,14 +138,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -202,7 +157,7 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: flagVals.ChartPath, CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), @@ -212,14 +167,9 @@ var _ = BeforeSuite(func() { "rancherTurtles.features.managementv3-cluster.enabled": "false", }, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + testenv.DeployRancherTurtles(ctx, rtInput) // NOTE: there are no short or local tests in this suite @@ -235,7 +185,7 @@ var _ = BeforeSuite(func() { }, CAPIProvidersYAML: e2e.FullProviders, TemplateData: map[string]string{ - "AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar), + "AWSEncodedCredentials": awsCreds, }, WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), WaitForDeployments: []testenv.NamespaceName{ @@ -250,21 +200,7 @@ var _ = BeforeSuite(func() { }, }) - giteaValues := map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - } - - giteaServiceType := corev1.ServiceTypeNodePort - if flagVals.UseEKS { - giteaServiceType = corev1.ServiceTypeLoadBalancer - } - - if flagVals.GiteaCustomIngress { - giteaServiceType = corev1.ServiceTypeClusterIP - } - - giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), @@ -272,16 +208,22 @@ var _ = BeforeSuite(func() { ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), ValuesFilePath: "../../data/gitea/values.yaml", - Values: giteaValues, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - ServiceType: giteaServiceType, - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - }) + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult = testenv.DeployGitea(ctx, giteaInput) }) var _ = AfterSuite(func() { diff --git a/test/e2e/suites/import-gitops-v3/suite_test.go b/test/e2e/suites/import-gitops-v3/suite_test.go index 1db18d20..9733b23e 100644 --- a/test/e2e/suites/import-gitops-v3/suite_test.go +++ b/test/e2e/suites/import-gitops-v3/suite_test.go @@ -30,10 +30,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/rancher/turtles/test/e2e" - "github.com/rancher/turtles/test/framework" turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" - corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" @@ -84,26 +82,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -115,16 +94,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -137,32 +115,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, @@ -175,7 +127,6 @@ var _ = BeforeSuite(func() { RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherPatches: [][]byte{e2e.RancherSettingPatch}, @@ -183,14 +134,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -206,14 +161,9 @@ var _ = BeforeSuite(func() { "rancherTurtles.features.addon-provider-fleet.enabled": "true", }, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + testenv.DeployRancherTurtles(ctx, rtInput) if !shortTestOnly() && !localTestOnly() { @@ -229,7 +179,7 @@ var _ = BeforeSuite(func() { }, CAPIProvidersYAML: e2e.FullProviders, TemplateData: map[string]string{ - "AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar), + "AWSEncodedCredentials": awsCreds, }, WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), WaitForDeployments: []testenv.NamespaceName{ @@ -245,21 +195,7 @@ var _ = BeforeSuite(func() { }) } - giteaValues := map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - } - - giteaServiceType := corev1.ServiceTypeNodePort - if flagVals.UseEKS { - giteaServiceType = corev1.ServiceTypeLoadBalancer - } - - if flagVals.GiteaCustomIngress { - giteaServiceType = corev1.ServiceTypeClusterIP - } - - giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), @@ -267,16 +203,22 @@ var _ = BeforeSuite(func() { ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), ValuesFilePath: "../../data/gitea/values.yaml", - Values: giteaValues, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - ServiceType: giteaServiceType, - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - }) + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult = testenv.DeployGitea(ctx, giteaInput) }) var _ = AfterSuite(func() { diff --git a/test/e2e/suites/import-gitops/suite_test.go b/test/e2e/suites/import-gitops/suite_test.go index db86bf14..f429bc53 100644 --- a/test/e2e/suites/import-gitops/suite_test.go +++ b/test/e2e/suites/import-gitops/suite_test.go @@ -30,14 +30,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/rancher/turtles/test/e2e" - opframework "github.com/rancher/turtles/test/framework" - turtlesframework "github.com/rancher/turtles/test/framework" + "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - "sigs.k8s.io/cluster-api/test/framework" + capiframework "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" ) @@ -87,26 +85,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -118,16 +97,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -140,32 +118,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - opframework.CreateDockerRegistrySecret(ctx, opframework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, @@ -178,7 +130,6 @@ var _ = BeforeSuite(func() { RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherPatches: [][]byte{e2e.RancherSettingPatch}, @@ -186,14 +137,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) if shortTestOnly() { @@ -202,7 +157,7 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: "https://rancher.github.io/turtles", CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Version: "v0.6.0", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{}, @@ -219,7 +174,7 @@ var _ = BeforeSuite(func() { upgradeInput := testenv.UpgradeRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), @@ -227,21 +182,14 @@ var _ = BeforeSuite(func() { PostUpgradeSteps: []func(){}, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) rtInput.AdditionalValues["rancherTurtles.features.addon-provider-fleet.enabled"] = "true" rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller upgradeInput.PostUpgradeSteps = append(upgradeInput.PostUpgradeSteps, func() { By("Waiting for CAAPF deployment to be available") - framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{ + capiframework.WaitForDeploymentsAvailable(ctx, capiframework.WaitForDeploymentsAvailableInput{ Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ Name: "caapf-controller-manager", @@ -257,20 +205,14 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: flagVals.ChartPath, CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{}, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller testenv.DeployRancherTurtles(ctx, rtInput) @@ -322,21 +264,7 @@ var _ = BeforeSuite(func() { }) } - giteaValues := map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - } - - giteaServiceType := corev1.ServiceTypeNodePort - if flagVals.UseEKS { - giteaServiceType = corev1.ServiceTypeLoadBalancer - } - - if flagVals.GiteaCustomIngress { - giteaServiceType = corev1.ServiceTypeClusterIP - } - - giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), @@ -344,16 +272,22 @@ var _ = BeforeSuite(func() { ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), ValuesFilePath: "../../data/gitea/values.yaml", - Values: giteaValues, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - ServiceType: giteaServiceType, - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - }) + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult = testenv.DeployGitea(ctx, giteaInput) }) var _ = AfterSuite(func() { @@ -366,7 +300,7 @@ var _ = AfterSuite(func() { testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), }) diff --git a/test/e2e/suites/migrate-gitops/suite_test.go b/test/e2e/suites/migrate-gitops/suite_test.go index c514e143..08bfcbfe 100644 --- a/test/e2e/suites/migrate-gitops/suite_test.go +++ b/test/e2e/suites/migrate-gitops/suite_test.go @@ -30,14 +30,13 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/rancher/turtles/test/e2e" - turtlesframework "github.com/rancher/turtles/test/framework" + "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - "sigs.k8s.io/cluster-api/test/framework" + capiframework "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" ) @@ -90,19 +89,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ UseExistingCluster: flagVals.UseExistingCluster, @@ -111,16 +98,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -133,10 +119,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, @@ -157,11 +139,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -169,11 +158,12 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: "https://rancher.github.io/turtles", CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Version: "v0.6.0", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{}, } + testenv.DeployRancherTurtles(ctx, rtInput) testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{ @@ -186,22 +176,21 @@ var _ = BeforeSuite(func() { upgradeInput := testenv.UpgradeRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: rtInput.AdditionalValues, } - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" rtInput.AdditionalValues["rancherTurtles.features.addon-provider-fleet.enabled"] = "true" rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller + testenv.PreRancherTurtlesUpgradelHook(&upgradeInput, e2eConfig) + upgradeInput.PostUpgradeSteps = append(upgradeInput.PostUpgradeSteps, func() { By("Waiting for CAAPF deployment to be available") - framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{ + capiframework.WaitForDeploymentsAvailable(ctx, capiframework.WaitForDeploymentsAvailableInput{ Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ Name: "caapf-controller-manager", @@ -212,21 +201,7 @@ var _ = BeforeSuite(func() { testenv.UpgradeRancherTurtles(ctx, upgradeInput) - giteaValues := map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - } - - giteaServiceType := corev1.ServiceTypeNodePort - if flagVals.UseEKS { - giteaServiceType = corev1.ServiceTypeLoadBalancer - } - - if flagVals.GiteaCustomIngress { - giteaServiceType = corev1.ServiceTypeClusterIP - } - - giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), @@ -234,16 +209,22 @@ var _ = BeforeSuite(func() { ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), ValuesFilePath: "../../data/gitea/values.yaml", - Values: giteaValues, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - ServiceType: giteaServiceType, - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - }) + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult = testenv.DeployGitea(ctx, giteaInput) }) var _ = AfterSuite(func() { @@ -256,7 +237,7 @@ var _ = AfterSuite(func() { testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), }) diff --git a/test/e2e/suites/update-labels/suite_test.go b/test/e2e/suites/update-labels/suite_test.go index 5afa860b..8384ddf9 100644 --- a/test/e2e/suites/update-labels/suite_test.go +++ b/test/e2e/suites/update-labels/suite_test.go @@ -35,7 +35,6 @@ import ( "github.com/rancher/turtles/test/e2e" "github.com/rancher/turtles/test/framework" - turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" ) @@ -83,26 +82,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -114,16 +94,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -136,32 +115,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, @@ -174,7 +127,6 @@ var _ = BeforeSuite(func() { RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherPatches: [][]byte{e2e.RancherSettingPatch}, @@ -182,14 +134,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -197,7 +153,7 @@ var _ = BeforeSuite(func() { HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: flagVals.ChartPath, CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), @@ -206,14 +162,9 @@ var _ = BeforeSuite(func() { "rancherTurtles.features.rancher-kubeconfigs.label": "true", // force to be true even if the default in teh chart changes }, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + testenv.DeployRancherTurtles(ctx, rtInput) testenv.RestartRancher(ctx, testenv.RestartRancherInput{ diff --git a/test/e2e/suites/v2prov/suite_test.go b/test/e2e/suites/v2prov/suite_test.go index 71f05cd8..e9022a89 100644 --- a/test/e2e/suites/v2prov/suite_test.go +++ b/test/e2e/suites/v2prov/suite_test.go @@ -34,7 +34,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "github.com/rancher/turtles/test/e2e" - "github.com/rancher/turtles/test/framework" turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" ) @@ -83,26 +82,7 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - ingressType := testenv.NgrokIngress - dockerUsername := "" - dockerPassword := "" - var customClusterProvider testenv.CustomClusterProvider - - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - customClusterProvider = testenv.EKSBootsrapCluster - Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") - ingressType = testenv.EKSNginxIngress - } - - if flagVals.IsolatedMode { - ingressType = testenv.CustomIngress - } + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -114,16 +94,15 @@ var _ = BeforeSuite(func() { Scheme: e2e.InitScheme(), ArtifactFolder: flagVals.ArtifactFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, HelmBinaryPath: flagVals.HelmBinaryPath, - CustomClusterProvider: customClusterProvider, + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IngressType: ingressType, + IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, CustomIngressDeployment: e2e.NginxIngressDeployment, @@ -136,32 +115,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, @@ -175,7 +128,6 @@ var _ = BeforeSuite(func() { RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), Development: true, - RancherHost: hostName, RancherNamespace: e2e.RancherNamespace, RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), RancherFeatures: e2eConfig.GetVariable(e2e.RancherFeaturesVar), @@ -184,14 +136,18 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode && !flagVals.UseEKS { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + hostName = rancherHookResult.HostName + testenv.DeployRancher(ctx, rancherInput) rtInput := testenv.DeployRancherTurtlesInput{ @@ -205,14 +161,9 @@ var _ = BeforeSuite(func() { WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{}, } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + testenv.DeployRancherTurtles(ctx, rtInput) testenv.RestartRancher(ctx, testenv.RestartRancherInput{ diff --git a/test/testenv/gitea.go b/test/testenv/gitea.go index 263d7280..5b77aee1 100644 --- a/test/testenv/gitea.go +++ b/test/testenv/gitea.go @@ -25,13 +25,14 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/rancher/turtles/test/e2e" + turtlesframework "github.com/rancher/turtles/test/framework" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" opframework "sigs.k8s.io/cluster-api-operator/test/framework" "sigs.k8s.io/cluster-api/test/framework" - - turtlesframework "github.com/rancher/turtles/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) type DeployGiteaInput struct { @@ -236,3 +237,18 @@ func UninstallGitea(ctx context.Context, input UninstallGiteaInput) { _, err := removeChart.Run(nil) Expect(err).ToNot(HaveOccurred()) } + +func PreGiteaInstallHook(giteaInput *DeployGiteaInput, e2eConfig *clusterctl.E2EConfig) { + infrastructureType := e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) + + switch infrastructureType { + case e2e.ManagementClusterInfrastuctureEKS: + giteaInput.ServiceType = corev1.ServiceTypeLoadBalancer + case e2e.ManagementClusterInfrastuctureIsolatedKind: + giteaInput.ServiceType = corev1.ServiceTypeNodePort + case e2e.ManagementClusterInfrastuctureKind: + giteaInput.ServiceType = corev1.ServiceTypeClusterIP + default: + Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", infrastructureType)) + } +} diff --git a/test/testenv/rancher.go b/test/testenv/rancher.go index 9c7a1930..10a64bf3 100644 --- a/test/testenv/rancher.go +++ b/test/testenv/rancher.go @@ -18,6 +18,7 @@ package testenv import ( "context" + "fmt" "io/ioutil" "os" @@ -32,6 +33,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" opframework "sigs.k8s.io/cluster-api-operator/test/framework" "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest/komega" "sigs.k8s.io/yaml" @@ -414,3 +416,62 @@ func deployNgrokIngress(ctx context.Context, input RancherDeployIngressInput) { By("Setting up default ingress class") Expect(input.BootstrapClusterProxy.Apply(ctx, input.DefaultIngressClassPatch, "--server-side")).To(Succeed()) } + +type PreRancherInstallHookInput struct { + Ctx context.Context + RancherInput *DeployRancherInput + PreSetupOutput PreManagementClusterSetupResult + SetupClusterResult *SetupTestClusterResult + E2EConfig *clusterctl.E2EConfig +} + +type PreRancherInstallHookResult struct { + HostName string +} + +// PreRancherInstallHook is a hook that can be used to perform actions before Rancher is installed. +func PreRancherInstallHook(input *PreRancherInstallHookInput) PreRancherInstallHookResult { + hostName := "" + + switch e2e.ManagementClusterInfrastuctureType(input.E2EConfig.GetVariable(e2e.ManagementClusterInfrastucture)) { + case e2e.ManagementClusterInfrastuctureEKS: + By("Getting ingress hostname") + svcRes := &WaitForServiceIngressHostnameResult{} + WaitForServiceIngressHostname(input.Ctx, WaitForServiceIngressHostnameInput{ + BootstrapClusterProxy: input.SetupClusterResult.BootstrapClusterProxy, + ServiceName: "ingress-nginx-controller", + ServiceNamespace: "ingress-nginx", + IngressWaitInterval: input.E2EConfig.GetIntervals(input.SetupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + }, svcRes) + + hostName = svcRes.Hostname + input.RancherInput.RancherHost = hostName + + By("Deploying ghcr details") + turtlesframework.CreateDockerRegistrySecret(input.Ctx, turtlesframework.CreateDockerRegistrySecretInput{ + Name: "regcred", + BootstrapClusterProxy: input.SetupClusterResult.BootstrapClusterProxy, + Namespace: "rancher-turtles-system", + DockerServer: "https://ghcr.io", + DockerUsername: input.PreSetupOutput.DockerUsername, + DockerPassword: input.PreSetupOutput.DockerPassword, + }) + + input.RancherInput.RancherIngressClassName = "nginx" + case e2e.ManagementClusterInfrastuctureIsolatedKind: + hostName = input.SetupClusterResult.IsolatedHostName + input.RancherInput.RancherHost = hostName + case e2e.ManagementClusterInfrastuctureKind: + // i.e. we are using ngrok locally + input.RancherInput.RancherIngressConfig = e2e.IngressConfig + input.RancherInput.RancherServicePatch = e2e.RancherServicePatch + hostName = input.E2EConfig.GetVariable(e2e.RancherHostnameVar) + input.RancherInput.RancherHost = hostName + default: + Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", input.E2EConfig.GetVariable(e2e.ManagementClusterInfrastucture))) + } + + return PreRancherInstallHookResult{ + HostName: hostName, + } +} diff --git a/test/testenv/setupcluster.go b/test/testenv/setupcluster.go index e1814ecc..cb4a2043 100644 --- a/test/testenv/setupcluster.go +++ b/test/testenv/setupcluster.go @@ -25,6 +25,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/rancher/turtles/test/e2e" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/test/framework" @@ -43,7 +44,6 @@ type SetupTestClusterInput struct { ArtifactFolder string // Hostname string KubernetesVersion string - IsolatedMode bool HelmBinaryPath string CustomClusterProvider CustomClusterProvider } @@ -145,3 +145,37 @@ func getInternalClusterHostname(ctx context.Context, clusterProxy framework.Clus func createClusterName(baseName string) string { return fmt.Sprintf("%s-%s", baseName, util.RandomString(6)) } + +// PreManagementClusterSetupResult is the output of the preManagementClusterSetupHook. +type PreManagementClusterSetupResult struct { + IngressType IngressType + DockerUsername string + DockerPassword string + CustomClusterProvider CustomClusterProvider +} + +// PreManagementClusterSetupHook is a hook that can be used to perform actions before the management cluster is setup. +func PreManagementClusterSetupHook(e2eConfig *clusterctl.E2EConfig) PreManagementClusterSetupResult { + output := PreManagementClusterSetupResult{} + + infrastructureType := e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) + + switch infrastructureType { + case e2e.ManagementClusterInfrastuctureEKS: + output.DockerUsername = os.Getenv("GITHUB_USERNAME") + Expect(output.DockerUsername).NotTo(BeEmpty(), "Github username is required") + output.DockerPassword = os.Getenv("GITHUB_TOKEN") + Expect(output.DockerPassword).NotTo(BeEmpty(), "Github token is required") + output.CustomClusterProvider = EKSBootsrapCluster + Expect(output.CustomClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") + output.IngressType = EKSNginxIngress + case e2e.ManagementClusterInfrastuctureIsolatedKind: + output.IngressType = CustomIngress + case e2e.ManagementClusterInfrastuctureKind: + output.IngressType = NgrokIngress + default: + Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", infrastructureType)) + } + + return output +} diff --git a/test/testenv/turtles.go b/test/testenv/turtles.go index 8a552589..6ad05c86 100644 --- a/test/testenv/turtles.go +++ b/test/testenv/turtles.go @@ -26,12 +26,13 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/rancher/turtles/test/e2e" + turtlesframework "github.com/rancher/turtles/test/framework" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" opframework "sigs.k8s.io/cluster-api-operator/test/framework" "sigs.k8s.io/cluster-api/test/framework" - - turtlesframework "github.com/rancher/turtles/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) type DeployRancherTurtlesInput struct { @@ -292,3 +293,38 @@ func UninstallRancherTurtles(ctx context.Context, input UninstallRancherTurtlesI _, err := removeChart.Run(nil) Expect(err).ToNot(HaveOccurred()) } + +// PreRancherTurtlesInstallHook is a hook that can be used to perform actions before Rancher Turtles is installed. +func PreRancherTurtlesInstallHook(rtInput *DeployRancherTurtlesInput, e2eConfig *clusterctl.E2EConfig) { + infrastructureType := e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) + + switch infrastructureType { + case e2e.ManagementClusterInfrastuctureEKS: + rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" + rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" + case e2e.ManagementClusterInfrastuctureIsolatedKind: + // NOTE: rancher turtles image is loadded into kind manually, we can set the imagePullPolicy to Never + rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" + case e2e.ManagementClusterInfrastuctureKind: + rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" + default: + Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", infrastructureType)) + } +} + +// PreRancherTurtlesInstallHook is a hook that can be used to perform actions before Rancher Turtles is installed. +func PreRancherTurtlesUpgradelHook(rtUpgradeInput *UpgradeRancherTurtlesInput, e2eConfig *clusterctl.E2EConfig) { + infrastructureType := e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) + switch infrastructureType { + case e2e.ManagementClusterInfrastuctureEKS: + rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" + rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" + case e2e.ManagementClusterInfrastuctureIsolatedKind: + // NOTE: rancher turtles image is loadded into kind manually, we can set the imagePullPolicy to Never + rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" + case e2e.ManagementClusterInfrastuctureKind: + rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" + default: + Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", infrastructureType)) + } +}