Skip to content

Commit

Permalink
test: add e2e test to ensure label patcher works
Browse files Browse the repository at this point in the history
This adds a new test suite that ensure that the Rancher label patcher
work and that a v2prov cluster can be provisined with CAPI 1.5.x.

In the future we can use this test to ensure we don't break v2prov
instead of the existing test. Although the current test is done against
the HEAD version of Rancher which is valuable.

Signed-off-by: Richard Case <[email protected]>
  • Loading branch information
richardcase committed Oct 9, 2023
1 parent 74e00a4 commit 65f64db
Show file tree
Hide file tree
Showing 9 changed files with 359 additions and 5 deletions.
2 changes: 1 addition & 1 deletion charts/rancher-turtles/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ spec:
containers:
- args:
- --leader-elect
- --feature-gates=rancher-kube-secret-patch={{ .Values.rancherTurtles.rancher-kubeconfigs.label }}
- --feature-gates=rancher-kube-secret-patch={{ index .Values "rancherTurtles" "features" "rancher-kubeconfigs" "label"}}
{{- range .Values.rancherTurtles.managerArguments }}
- {{ . }}
{{- end }}
Expand Down
12 changes: 12 additions & 0 deletions charts/rancher-turtles/templates/rancher-turtles-components.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,18 @@ rules:
- patch
- update
- watch
- apiGroups:
- ""
resources:
- events
- secrets
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
Expand Down
2 changes: 1 addition & 1 deletion charts/rancher-turtles/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ rancherTurtles:
tag: v0.0.0
imagePullPolicy: Never
namespace: rancher-turtles-system
managerArguments: {}
managerArguments: []
imagePullSecrets: []
features:
embedded-capi:
Expand Down
1 change: 0 additions & 1 deletion internal/controllers/patch_kcfg_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ func (r *RancherKubeconfigSecretReconciler) SetupWithManager(ctx context.Context

// +kubebuilder:rbac:groups="",resources=secrets;events,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;create;update
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=provisioning.cattle.io,resources=clusters;clusters/status,verbs=get;list;watch

// Reconcile will patch v2prov created kubeconfig secrets to add the required owner label if its missing.
Expand Down
2 changes: 2 additions & 0 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,8 @@ func initFlags(fs *pflag.FlagSet) {

fs.BoolVar(&insecureSkipVerify, "insecure-skip-verify", false,
"Skip TLS certificate verification when connecting to Rancher. Only used for development and testing purposes. Use at your own risk.")

feature.MutableGates.AddFlag(fs)
}

func main() {
Expand Down
1 change: 1 addition & 0 deletions test/e2e/config/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ intervals:
default/wait-getservice: ["60s", "5s"]
default/wait-eks-delete: ["20m", "30s"]
default/wait-aks-delete: ["20m", "30s"]
default/wait-azure-delete: ["15m", "30s"]
default/wait-azure: ["30m", "30s"]

variables:
Expand Down
165 changes: 165 additions & 0 deletions test/e2e/suites/update-labels/suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
//go:build e2e
// +build e2e

/*
Copyright 2023 SUSE.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package update_labels

import (
"context"
"fmt"
"os"
"path/filepath"
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

"k8s.io/klog/v2"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
ctrl "sigs.k8s.io/controller-runtime"

"github.com/rancher-sandbox/rancher-turtles/test/e2e"
turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework"
"github.com/rancher-sandbox/rancher-turtles/test/testenv"
)

// Test suite flags.
var (
flagVals *e2e.FlagValues
)

// Test suite global vars.
var (
// e2eConfig to be used for this test, read from configPath.
e2eConfig *clusterctl.E2EConfig

// clusterctlConfigPath to be used for this test, created by generating a clusterctl local repository
// with the providers specified in the configPath.
clusterctlConfigPath string

// hostName is the host name for the Rancher Manager server.
hostName string

ctx = context.Background()

setupClusterResult *testenv.SetupTestClusterResult
)

func init() {
flagVals = &e2e.FlagValues{}
e2e.InitFlags(flagVals)
}

func TestE2E(t *testing.T) {
RegisterFailHandler(Fail)

ctrl.SetLogger(klog.Background())

RunSpecs(t, "rancher-turtles-e2e-import-gitops")
}

var _ = BeforeSuite(func() {
Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.")
Expect(os.MkdirAll(flagVals.ArtifactFolder, 0755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder)
Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.")
Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.")

By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath))
e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath)

By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder))
clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository"))

hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar)

setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{
UseExistingCluster: flagVals.UseExistingCluster,
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
Scheme: e2e.InitScheme(),
ArtifactFolder: flagVals.ArtifactFolder,
Hostname: hostName,
KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesVersionVar),
IsolatedMode: flagVals.IsolatedMode,
HelmBinaryPath: flagVals.HelmBinaryPath,
})

if flagVals.IsolatedMode {
hostName = setupClusterResult.IsolatedHostName
}

testenv.DeployRancherTurtles(ctx, testenv.DeployRancherTurtlesInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
HelmBinaryPath: flagVals.HelmBinaryPath,
ChartPath: flagVals.ChartPath,
CAPIProvidersSecretYAML: e2e.CapiProvidersSecret,
CAPIProvidersYAML: e2e.CapiProviders,
Namespace: turtlesframework.DefaultRancherTurtlesNamespace,
Image: "ghcr.io/rancher-sandbox/rancher-turtles-amd64",
Tag: "v0.0.1",
WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"),
AdditionalValues: map[string]string{
"cluster-api-operator.cluster-api.version": "v1.5.2",
"rancherTurtles.features.embedded-capi.disabled": "false",
"rancherTurtles.features.rancher-webhook.cleanup": "false",
"rancherTurtles.features.rancher-kubeconfigs.label": "true", // force to be true even if the default in teh chart changes
},
})

testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
HelmBinaryPath: flagVals.HelmBinaryPath,
IsolatedMode: flagVals.IsolatedMode,
NginxIngress: e2e.NginxIngress,
NginxIngressNamespace: e2e.NginxIngressNamespace,
IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"),
NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar),
NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar),
NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar),
NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar),
NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar),
DefaultIngressClassPatch: e2e.IngressClassPatch,
})

testenv.DeployRancher(ctx, testenv.DeployRancherInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
HelmBinaryPath: flagVals.HelmBinaryPath,
RancherChartRepoName: "rancher-latest",
RancherChartURL: "https://releases.rancher.com/server-charts/latest",
RancherChartPath: "rancher-latest/rancher",
RancherVersion: "2.7.7",
RancherHost: hostName,
RancherNamespace: e2e.RancherNamespace,
RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar),
RancherFeatures: e2eConfig.GetVariable(e2e.RancherFeaturesVar),
RancherSettingsPatch: e2e.RancherSettingPatch,
RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"),
ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"),
IsolatedMode: flagVals.IsolatedMode,
RancherIngressConfig: e2e.IngressConfig,
RancherServicePatch: e2e.RancherServicePatch,
})
})

var _ = AfterSuite(func() {
testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{
SetupTestClusterResult: *setupClusterResult,
SkipCleanup: flagVals.SkipCleanup,
ArtifactFolder: flagVals.ArtifactFolder,
})
})
167 changes: 167 additions & 0 deletions test/e2e/suites/update-labels/update_labels_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
//go:build e2e
// +build e2e

/*
Copyright 2023 SUSE.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package update_labels

import (
"fmt"
"os"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

"github.com/drone/envsubst/v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/envtest/komega"

provisioningv1 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/provisioning/v1"
"github.com/rancher-sandbox/rancher-turtles/test/e2e"
turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework"
)

var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still work with CAPI 1.5.x and label renaming", Label(e2e.FullTestLabel), func() {

BeforeEach(func() {
komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient())
komega.SetContext(ctx)
})

It("Should create a RKE2 cluster in Azure", func() {
azSubId := e2eConfig.GetVariable(e2e.AzureSubIDVar)
Expect(azSubId).ToNot(BeEmpty(), "Azure Subscription ID is required")
azClientId := e2eConfig.GetVariable(e2e.AzureClientIDVar)
Expect(azSubId).ToNot(BeEmpty(), "Azure Client ID is required")
azClientSecret := e2eConfig.GetVariable(e2e.AzureClientSecretVar)
Expect(azSubId).ToNot(BeEmpty(), "Azure Client Secret is required")

rke2Version := e2eConfig.GetVariable(e2e.RKE2VersionVar)
Expect(rke2Version).ToNot(BeEmpty(), "RKE2 version is required")

credsSecretName := "cc-test99"
credsName := "az-ecm"
poolName := "az-test-pool"
clusterName := "az-cluster1"

lookupResult := &turtlesframework.RancherLookupUserResult{}
turtlesframework.RancherLookupUser(ctx, turtlesframework.RancherLookupUserInput{
Username: "admin",
ClusterProxy: setupClusterResult.BootstrapClusterProxy,
}, lookupResult)

turtlesframework.CreateSecret(ctx, turtlesframework.CreateSecretInput{
Creator: setupClusterResult.BootstrapClusterProxy.GetClient(),
Name: credsSecretName,
Namespace: "cattle-global-data",
Type: corev1.SecretTypeOpaque,
Data: map[string]string{
"azurecredentialConfig-clientId": azClientId,
"azurecredentialConfig-clientSecret": azClientSecret,
"azurecredentialConfig-environment": "AzurePublicCloud",
"azurecredentialConfig-subscriptionId": azSubId,
"azurecredentialConfig-tenantId": "",
},
Annotations: map[string]string{
"field.cattle.io/name": credsName,
"provisioning.cattle.io/driver": "azure",
"field.cattle.io/creatorId": lookupResult.User,
},
Labels: map[string]string{
"cattle.io/creator": "norman",
},
})

rkeConfig, err := envsubst.Eval(string(e2e.V2ProvAzureRkeConfig), func(s string) string {
switch s {
case "POOL_NAME":
return poolName
case "USER":
return lookupResult.User
default:
return os.Getenv(s)
}
})
Expect(err).ToNot(HaveOccurred())
Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, []byte(rkeConfig))).To(Succeed(), "Failed apply Digital Ocean RKE config")

cluster, err := envsubst.Eval(string(e2e.V2ProvAzureCluster), func(s string) string {
switch s {
case "CLUSTER_NAME":
return clusterName
case "USER":
return lookupResult.User
case "CREDENTIAL_SECRET":
return fmt.Sprintf("cattle-global-data:%s", credsSecretName)
case "KUBERNETES_VERSION":
return rke2Version
case "AZ_CONFIG_NAME":
return poolName
default:
return os.Getenv(s)
}
})
Expect(err).ToNot(HaveOccurred())
Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, []byte(cluster))).To(Succeed(), "Failed apply Digital Ocean cluster config")

By("Waiting for the rancher cluster record to appear")
rancherCluster := &provisioningv1.Cluster{ObjectMeta: metav1.ObjectMeta{
Namespace: "fleet-default",
Name: clusterName,
}}
Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed())

By("Waiting for the rancher cluster to have a deployed agent")
Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-v2prov-create")...).Should(HaveField("Status.AgentDeployed", BeTrue()))

By("Waiting for the rancher cluster to be ready")
Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(HaveField("Status.Ready", BeTrue()))

By("Getting kubeconfig from Rancher for new cluster")
rancherKubeconfig := &turtlesframework.RancherGetClusterKubeconfigResult{}
turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{
Getter: setupClusterResult.BootstrapClusterProxy.GetClient(),
SecretName: fmt.Sprintf("%s-kubeconfig", rancherCluster.Name),
Namespace: rancherCluster.Namespace,
RancherServerURL: hostName,
WriteToTempFile: true,
}, rancherKubeconfig)

By("Using kubeconfig to query new cluster")
rancherConnectRes := &turtlesframework.RunCommandResult{}
turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{
Command: "kubectl",
Args: []string{
"--kubeconfig",
rancherKubeconfig.TempFilePath,
"get",
"nodes",
"--insecure-skip-tls-verify",
},
}, rancherConnectRes)
Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig")
Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code")

By("Deleting cluster from Rancher")
err = setupClusterResult.BootstrapClusterProxy.GetClient().Delete(ctx, rancherCluster)
Expect(err).NotTo(HaveOccurred(), "Failed to delete rancher cluster")

By("Waiting for the rancher cluster record to be removed")
Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-azure-delete")...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted")
})
})
Loading

0 comments on commit 65f64db

Please sign in to comment.