Skip to content

Commit

Permalink
Merge pull request #145 from richardcase/aws_e2e
Browse files Browse the repository at this point in the history
feat: add CAPA e2e test
  • Loading branch information
alexander-demicev authored Sep 26, 2023
2 parents 80726e2 + 63160a1 commit d36af93
Show file tree
Hide file tree
Showing 12 changed files with 219 additions and 28 deletions.
1 change: 1 addition & 0 deletions .github/workflows/e2e-long.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ env:
NGROK_API_KEY: ${{ secrets.NGROK_API_KEY }}
RANCHER_HOSTNAME: ${{ secrets.NGROK_DOMAIN }}
RANCHER_PASSWORD: ${{ secrets.RANCHER_PASSWORD }}
CAPA_ENCODED_CREDS: ${{ secrets.CAPA_ENCODED_CREDS }}

jobs:
e2e:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/e2e-short.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
with:
go-version: '=1.20.7'
- name: Run e2e tests
run: ISOLATED_MODE=true make test-e2e
run: ISOLATED_MODE=true GINKGO_LABEL_FILTER=short make test-e2e
- name: Collect run artifacts
if: always()
uses: actions/upload-artifact@v3
Expand Down
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ SKIP_RESOURCE_CLEANUP ?= false
USE_EXISTING_CLUSTER ?= false
ISOLATED_MODE ?= false
GINKGO_NOCOLOR ?= false
GINKGO_LABEL_FILTER ?= "short || full"

# to set multiple ginkgo skip flags, if any
ifneq ($(strip $(GINKGO_SKIP)),)
Expand Down Expand Up @@ -465,7 +466,7 @@ release-chart: $(HELM) $(NOTES) build-chart verify-gen
test-e2e: $(GINKGO) $(HELM) $(CLUSTERCTL) kubectl e2e-image ## Run the end-to-end tests
RANCHER_HOSTNAME=$(RANCHER_HOSTNAME) \
$(GINKGO) -v --trace -poll-progress-after=$(GINKGO_POLL_PROGRESS_AFTER) \
-poll-progress-interval=$(GINKGO_POLL_PROGRESS_INTERVAL) --tags=e2e --focus="$(GINKGO_FOCUS)" \
-poll-progress-interval=$(GINKGO_POLL_PROGRESS_INTERVAL) --tags=e2e --focus="$(GINKGO_FOCUS)" --label-filter="$(GINKGO_LABEL_FILTER)" \
$(_SKIP_ARGS) --nodes=$(GINKGO_NODES) --timeout=$(GINKGO_TIMEOUT) --no-color=$(GINKGO_NOCOLOR) \
--output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.1.xml" $(GINKGO_ARGS) $(ROOT_DIR)/$(TEST_DIR)/e2e -- \
-e2e.artifacts-folder="$(ARTIFACTS)" \
Expand Down
2 changes: 2 additions & 0 deletions test/e2e/config/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,11 @@ images:
intervals:
default/wait-controllers: ["3m", "10s"]
default/wait-rancher: ["15m", "30s"]
default/wait-capa-create-cluster: ["30m", "30s"]
default/wait-gitea: ["3m", "10s"]
default/wait-consistently: ["30s", "5s"]
default/wait-getservice: ["60s", "5s"]
default/wait-eks-delete: ["20m", "30s"]

variables:
RANCHER_VERSION: "v2.7.6"
Expand Down
1 change: 1 addition & 0 deletions test/e2e/data/capi-operator/capi-providers-secret.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,5 @@ type: Opaque
stringData:
CLUSTER_TOPOLOGY: "true"
EXP_CLUSTER_RESOURCE_SET: "true"
EXP_MACHINE_POOL: "true"

14 changes: 14 additions & 0 deletions test/e2e/data/capi-operator/full-providers.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: capa-system
---
apiVersion: operator.cluster.x-k8s.io/v1alpha1
kind: InfrastructureProvider
metadata:
name: aws
namespace: capa-system
spec:
secretName: full-variables
secretNamespace: default
11 changes: 11 additions & 0 deletions test/e2e/data/capi-operator/full-variables.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: full-variables
namespace: default
type: Opaque
stringData:
AWS_B64ENCODED_CREDENTIALS: "{{ .AWSEncodedCredentials }}"
EXP_MACHINE_POOL: "true"
CAPA_LOGLEVEL: "4"
EXP_EXTERNAL_RESOURCE_GC: "true"
62 changes: 62 additions & 0 deletions test/e2e/data/cluster-templates/aws-eks-mmp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: "${CLUSTER_NAME}"
spec:
clusterNetwork:
pods:
cidrBlocks: ["192.168.0.0/16"]
infrastructureRef:
kind: AWSManagedCluster
apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}"
controlPlaneRef:
kind: AWSManagedControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1beta2
name: "${CLUSTER_NAME}-control-plane"
---
kind: AWSManagedCluster
apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}"
annotations:
"helm.sh/resource-policy": keep
spec: {}
---
kind: AWSManagedControlPlane
apiVersion: controlplane.cluster.x-k8s.io/v1beta2
metadata:
name: "${CLUSTER_NAME}-control-plane"
annotations:
"helm.sh/resource-policy": keep
spec:
region: "eu-west-1"
version: "${KUBERNETES_VERSION}"
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachinePool
metadata:
name: "${CLUSTER_NAME}-pool-0"
annotations:
"helm.sh/resource-policy": keep
spec:
clusterName: "${CLUSTER_NAME}"
replicas: ${WORKER_MACHINE_COUNT}
template:
spec:
clusterName: "${CLUSTER_NAME}"
bootstrap:
dataSecretName: ""
infrastructureRef:
name: "${CLUSTER_NAME}-pool-0"
apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
kind: AWSManagedMachinePool
metadata:
name: "${CLUSTER_NAME}-pool-0"
annotations:
"helm.sh/resource-policy": keep
spec: {}
21 changes: 21 additions & 0 deletions test/e2e/e2e_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@ const (
giteaUserName = "GITEA_USER_NAME"
giteaUserPassword = "GITEA_USER_PWD"

capaEncodedCredentials = "CAPA_ENCODED_CREDS"

authSecretName = "basic-auth-secret"

shortTestLabel = "short"
Expand Down Expand Up @@ -357,6 +359,25 @@ func initRancherTurtles(clusterProxy framework.ClusterProxy, config *clusterctl.
Namespace: "capd-system",
}},
}, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...)

if !Label(fullTestLabel).MatchesLabelFilter(GinkgoLabelFilter()) {
By("Running fast tests, skipping adding additional infrastructure providers")
return
}

By("Adding CAPI infrastructure providers for full test")
providerVars := getFullProviderVariables(config, string(fullProvidersSecret))
Expect(clusterProxy.Apply(ctx, providerVars)).To(Succeed(), "Failed to apply secret for infra providers")
Expect(clusterProxy.Apply(ctx, fullProviders)).To(Succeed(), "Failed to infra providers for full test run")

By("Waiting for CAPI aws provider deployment to be available")
framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{
Getter: bootstrapClusterProxy.GetClient(),
Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{
Name: "capa-controller-manager",
Namespace: "capa-system",
}},
}, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...)
}

func initRancher(clusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig) {
Expand Down
32 changes: 32 additions & 0 deletions test/e2e/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,19 @@ limitations under the License.
package e2e

import (
"bytes"
"context"
_ "embed"
"fmt"
"path/filepath"
"text/template"

. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"

turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework"
Expand All @@ -42,6 +47,12 @@ var (
//go:embed data/capi-operator/capi-providers.yaml
capiProviders []byte

//go:embed data/capi-operator/full-variables.yaml
fullProvidersSecret []byte

//go:embed data/capi-operator/full-providers.yaml
fullProviders []byte

//go:embed data/rancher/ingress.yaml
ingressConfig []byte

Expand Down Expand Up @@ -116,3 +127,24 @@ func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterPr
}
cancelWatches()
}

func getFullProviderVariables(config *clusterctl.E2EConfig, varsTemplate string) []byte {
capaCreds := config.GetVariable(capaEncodedCredentials)
Expect(capaCreds).ToNot(BeEmpty(), "Invalid input. You must supply encoded CAPA credentials")

providerVars := struct {
AWSEncodedCredentials string
}{
AWSEncodedCredentials: capaCreds,
}

t := template.New("providers-variables")
t, err := t.Parse(varsTemplate)
Expect(err).ShouldNot(HaveOccurred(), "Failed to pass full infra variables")

var renderedTemplate bytes.Buffer
err = t.Execute(&renderedTemplate, providerVars)
Expect(err).NotTo(HaveOccurred(), "Failed to execute template")

return renderedTemplate.Bytes()
}
34 changes: 24 additions & 10 deletions test/e2e/import_gitops.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@ type CreateUsingGitOpsSpecInput struct {
ClusterTemplatePath string
ClusterName string

CAPIClusterCreateWaitName string
DeleteClusterWaitName string

// ControlPlaneMachineCount defines the number of control plane machines to be added to the workload cluster.
// If not specified, 1 will be used.
ControlPlaneMachineCount *int
Expand All @@ -73,14 +76,16 @@ type CreateUsingGitOpsSpecInput struct {
// automatically imports into Rancher Manager.
func CreateUsingGitOpsSpec(ctx context.Context, inputGetter func() CreateUsingGitOpsSpecInput) {
var (
specName = "creategitops"
input CreateUsingGitOpsSpecInput
namespace *corev1.Namespace
repoName string
cancelWatches context.CancelFunc
capiCluster *types.NamespacedName
rancherKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult
rancherConnectRes *turtlesframework.RunCommandResult
specName = "creategitops"
input CreateUsingGitOpsSpecInput
namespace *corev1.Namespace
repoName string
cancelWatches context.CancelFunc
capiCluster *types.NamespacedName
rancherKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult
rancherConnectRes *turtlesframework.RunCommandResult
capiClusterCreateWait []interface{}
deleteClusterWait []interface{}
)

BeforeEach(func() {
Expand All @@ -95,6 +100,12 @@ func CreateUsingGitOpsSpec(ctx context.Context, inputGetter func() CreateUsingGi
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
repoName = createRepoName(specName)

capiClusterCreateWait = input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), input.CAPIClusterCreateWaitName)
Expect(capiClusterCreateWait).ToNot(BeNil(), "Failed to get wait intervals %s", input.CAPIClusterCreateWaitName)

deleteClusterWait = input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), input.DeleteClusterWaitName)
Expect(capiClusterCreateWait).ToNot(BeNil(), "Failed to get wait intervals %s", input.CAPIClusterCreateWaitName)

capiCluster = &types.NamespacedName{
Namespace: namespace.Name,
Name: input.ClusterName,
Expand Down Expand Up @@ -195,13 +206,16 @@ func CreateUsingGitOpsSpec(ctx context.Context, inputGetter func() CreateUsingGi
input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).
Should(Succeed(), "Failed to apply CAPI cluster definition to cluster via Fleet")

By("Waiting for cluster control plane to be Ready")
Eventually(komega.Object(capiCluster), capiClusterCreateWait...).Should(HaveField("Status.ControlPlaneReady", BeTrue()))

By("Waiting for the CAPI cluster to be connectable")
Eventually(func() error {
remoteClient := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, capiCluster.Namespace, capiCluster.Name).GetClient()
namespaces := &corev1.NamespaceList{}

return remoteClient.List(ctx, namespaces)
}, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed(), "Failed to connect to workload cluster using CAPI kubeconfig")
}, capiClusterCreateWait...).Should(Succeed(), "Failed to connect to workload cluster using CAPI kubeconfig")

By("Waiting for the rancher cluster record to appear")
rancherCluster := &provisioningv1.Cluster{ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -246,7 +260,7 @@ func CreateUsingGitOpsSpec(ctx context.Context, inputGetter func() CreateUsingGi
})

By("Waiting for the rancher cluster record to be removed")
Eventually(komega.Get(rancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-controllers")...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted")
Eventually(komega.Get(rancherCluster), deleteClusterWait...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted")

})

Expand Down
64 changes: 48 additions & 16 deletions test/e2e/import_gitops_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
. "sigs.k8s.io/controller-runtime/pkg/envtest/komega"
)

var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(shortTestLabel), func() {
var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(shortTestLabel, fullTestLabel), func() {

BeforeEach(func() {
SetClient(bootstrapClusterProxy.GetClient())
Expand All @@ -34,21 +34,53 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit

CreateUsingGitOpsSpec(ctx, func() CreateUsingGitOpsSpecInput {
return CreateUsingGitOpsSpecInput{
E2EConfig: e2eConfig,
BootstrapClusterProxy: bootstrapClusterProxy,
ClusterctlConfigPath: clusterctlConfigPath,
ClusterctlBinaryPath: clusterctlBinaryPath,
ArtifactFolder: artifactFolder,
ClusterTemplatePath: "./data/cluster-templates/docker-kubeadm.yaml",
ClusterName: "cluster1",
ControlPlaneMachineCount: ptr.To[int](1),
WorkerMachineCount: ptr.To[int](1),
GitAddr: gitAddress,
GitAuthSecretName: authSecretName,
SkipCleanup: false,
SkipDeletionTest: false,
LabelNamespace: true,
RancherServerURL: hostName,
E2EConfig: e2eConfig,
BootstrapClusterProxy: bootstrapClusterProxy,
ClusterctlConfigPath: clusterctlConfigPath,
ClusterctlBinaryPath: clusterctlBinaryPath,
ArtifactFolder: artifactFolder,
ClusterTemplatePath: "./data/cluster-templates/docker-kubeadm.yaml",
ClusterName: "cluster1",
ControlPlaneMachineCount: ptr.To[int](1),
WorkerMachineCount: ptr.To[int](1),
GitAddr: gitAddress,
GitAuthSecretName: authSecretName,
SkipCleanup: false,
SkipDeletionTest: false,
LabelNamespace: true,
RancherServerURL: hostName,
CAPIClusterCreateWaitName: "wait-rancher",
DeleteClusterWaitName: "wait-controllers",
}
})
})

var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(fullTestLabel), func() {

BeforeEach(func() {
SetClient(bootstrapClusterProxy.GetClient())
SetContext(ctx)
})

CreateUsingGitOpsSpec(ctx, func() CreateUsingGitOpsSpecInput {
return CreateUsingGitOpsSpecInput{
E2EConfig: e2eConfig,
BootstrapClusterProxy: bootstrapClusterProxy,
ClusterctlConfigPath: clusterctlConfigPath,
ClusterctlBinaryPath: clusterctlBinaryPath,
ArtifactFolder: artifactFolder,
ClusterTemplatePath: "./data/cluster-templates/aws-eks-mmp.yaml",
ClusterName: "cluster2",
ControlPlaneMachineCount: ptr.To[int](1),
WorkerMachineCount: ptr.To[int](1),
GitAddr: gitAddress,
GitAuthSecretName: authSecretName,
SkipCleanup: false,
SkipDeletionTest: false,
LabelNamespace: true,
RancherServerURL: hostName,
CAPIClusterCreateWaitName: "wait-capa-create-cluster",
DeleteClusterWaitName: "wait-eks-delete",
}
})
})

0 comments on commit d36af93

Please sign in to comment.