Skip to content

Commit

Permalink
test: add e2e test to ensure label patcher works
Browse files Browse the repository at this point in the history
This adds a new test suite that ensure that the Rancher label patcher
work and that a v2prov cluster can be provisined with CAPI 1.5.x.

In the future we can use this test to ensure we don't break v2prov
instead of the existing test. Although the current test is done against
the HEAD version of Rancher which is valuable.

Signed-off-by: Richard Case <[email protected]>
  • Loading branch information
richardcase committed Oct 10, 2023
1 parent 74e00a4 commit 2b2ae5a
Show file tree
Hide file tree
Showing 10 changed files with 362 additions and 19 deletions.
2 changes: 1 addition & 1 deletion charts/rancher-turtles/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ spec:
containers:
- args:
- --leader-elect
- --feature-gates=rancher-kube-secret-patch={{ .Values.rancherTurtles.rancher-kubeconfigs.label }}
- --feature-gates=rancher-kube-secret-patch={{ index .Values "rancherTurtles" "features" "rancher-kubeconfigs" "label"}}
{{- range .Values.rancherTurtles.managerArguments }}
- {{ . }}
{{- end }}
Expand Down
12 changes: 12 additions & 0 deletions charts/rancher-turtles/templates/rancher-turtles-components.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,18 @@ rules:
- patch
- update
- watch
- apiGroups:
- ""
resources:
- events
- secrets
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
Expand Down
2 changes: 1 addition & 1 deletion charts/rancher-turtles/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ rancherTurtles:
tag: v0.0.0
imagePullPolicy: Never
namespace: rancher-turtles-system
managerArguments: {}
managerArguments: []
imagePullSecrets: []
features:
embedded-capi:
Expand Down
12 changes: 2 additions & 10 deletions internal/controllers/patch_kcfg_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ import (

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controllers/external"
"sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/cluster-api/util/predicates"

provisioningv1 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/provisioning/v1"
Expand All @@ -44,24 +43,18 @@ import (
// part of provisioning v2. Its job is to add the label required by Cluster API v1.5.0 and higher.
type RancherKubeconfigSecretReconciler struct {
Client client.Client
RancherClient client.Client
recorder record.EventRecorder
WatchFilterValue string
Scheme *runtime.Scheme

controller controller.Controller
externalTracker external.ObjectTracker
remoteClientGetter remote.ClusterClientGetter
controller controller.Controller
externalTracker external.ObjectTracker
}

// SetupWithManager will setup the controller.
func (r *RancherKubeconfigSecretReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
log := log.FromContext(ctx)

if r.remoteClientGetter == nil {
r.remoteClientGetter = remote.NewClusterClient
}

capiPredicates := predicates.All(log,
turtlespredicates.V2ProvClusterOwned(log),
turtlespredicates.NameHasSuffix(log, "-kubeconfig"),
Expand All @@ -87,7 +80,6 @@ func (r *RancherKubeconfigSecretReconciler) SetupWithManager(ctx context.Context

// +kubebuilder:rbac:groups="",resources=secrets;events,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;create;update
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=provisioning.cattle.io,resources=clusters;clusters/status,verbs=get;list;watch

// Reconcile will patch v2prov created kubeconfig secrets to add the required owner label if its missing.
Expand Down
5 changes: 1 addition & 4 deletions internal/controllers/patch_kcfg_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/cluster-api/util/secret"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
Expand All @@ -44,9 +43,7 @@ var _ = Describe("Patch Rancher v2Prov Kubeconfig secrets", func() {

BeforeEach(func() {
r = &RancherKubeconfigSecretReconciler{
Client: cl,
RancherClient: cl,
remoteClientGetter: remote.NewClusterClient,
Client: cl,
}
clusterName = "test1"

Expand Down
3 changes: 2 additions & 1 deletion main.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,8 @@ func initFlags(fs *pflag.FlagSet) {

fs.BoolVar(&insecureSkipVerify, "insecure-skip-verify", false,
"Skip TLS certificate verification when connecting to Rancher. Only used for development and testing purposes. Use at your own risk.")

feature.MutableGates.AddFlag(fs)
}

func main() {
Expand Down Expand Up @@ -192,7 +194,6 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) {

if err := (&controllers.RancherKubeconfigSecretReconciler{
Client: mgr.GetClient(),
RancherClient: rancherClient,
WatchFilterValue: watchFilterValue,
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: concurrencyNumber}); err != nil {
setupLog.Error(err, "unable to create Rancher kubeconfig secret controller")
Expand Down
1 change: 1 addition & 0 deletions test/e2e/config/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ intervals:
default/wait-getservice: ["60s", "5s"]
default/wait-eks-delete: ["20m", "30s"]
default/wait-aks-delete: ["20m", "30s"]
default/wait-azure-delete: ["15m", "30s"]
default/wait-azure: ["30m", "30s"]

variables:
Expand Down
165 changes: 165 additions & 0 deletions test/e2e/suites/update-labels/suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
//go:build e2e
// +build e2e

/*
Copyright 2023 SUSE.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package update_labels

import (
"context"
"fmt"
"os"
"path/filepath"
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

"k8s.io/klog/v2"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
ctrl "sigs.k8s.io/controller-runtime"

"github.com/rancher-sandbox/rancher-turtles/test/e2e"
turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework"
"github.com/rancher-sandbox/rancher-turtles/test/testenv"
)

// Test suite flags.
var (
flagVals *e2e.FlagValues
)

// Test suite global vars.
var (
// e2eConfig to be used for this test, read from configPath.
e2eConfig *clusterctl.E2EConfig

// clusterctlConfigPath to be used for this test, created by generating a clusterctl local repository
// with the providers specified in the configPath.
clusterctlConfigPath string

// hostName is the host name for the Rancher Manager server.
hostName string

ctx = context.Background()

setupClusterResult *testenv.SetupTestClusterResult
)

func init() {
flagVals = &e2e.FlagValues{}
e2e.InitFlags(flagVals)
}

func TestE2E(t *testing.T) {
RegisterFailHandler(Fail)

ctrl.SetLogger(klog.Background())

RunSpecs(t, "rancher-turtles-e2e-import-gitops")
}

var _ = BeforeSuite(func() {
Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.")
Expect(os.MkdirAll(flagVals.ArtifactFolder, 0755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder)
Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.")
Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.")

By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath))
e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath)

By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder))
clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository"))

hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar)

setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{
UseExistingCluster: flagVals.UseExistingCluster,
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
Scheme: e2e.InitScheme(),
ArtifactFolder: flagVals.ArtifactFolder,
Hostname: hostName,
KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesVersionVar),
IsolatedMode: flagVals.IsolatedMode,
HelmBinaryPath: flagVals.HelmBinaryPath,
})

if flagVals.IsolatedMode {
hostName = setupClusterResult.IsolatedHostName
}

testenv.DeployRancherTurtles(ctx, testenv.DeployRancherTurtlesInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
HelmBinaryPath: flagVals.HelmBinaryPath,
ChartPath: flagVals.ChartPath,
CAPIProvidersSecretYAML: e2e.CapiProvidersSecret,
CAPIProvidersYAML: e2e.CapiProviders,
Namespace: turtlesframework.DefaultRancherTurtlesNamespace,
Image: "ghcr.io/rancher-sandbox/rancher-turtles-amd64",
Tag: "v0.0.1",
WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"),
AdditionalValues: map[string]string{
"cluster-api-operator.cluster-api.version": "v1.5.2",
"rancherTurtles.features.embedded-capi.disabled": "false",
"rancherTurtles.features.rancher-webhook.cleanup": "false",
"rancherTurtles.features.rancher-kubeconfigs.label": "true", // force to be true even if the default in teh chart changes
},
})

testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
HelmBinaryPath: flagVals.HelmBinaryPath,
IsolatedMode: flagVals.IsolatedMode,
NginxIngress: e2e.NginxIngress,
NginxIngressNamespace: e2e.NginxIngressNamespace,
IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"),
NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar),
NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar),
NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar),
NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar),
NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar),
DefaultIngressClassPatch: e2e.IngressClassPatch,
})

testenv.DeployRancher(ctx, testenv.DeployRancherInput{
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
HelmBinaryPath: flagVals.HelmBinaryPath,
RancherChartRepoName: "rancher-latest",
RancherChartURL: "https://releases.rancher.com/server-charts/latest",
RancherChartPath: "rancher-latest/rancher",
RancherVersion: "2.7.7",
RancherHost: hostName,
RancherNamespace: e2e.RancherNamespace,
RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar),
RancherFeatures: e2eConfig.GetVariable(e2e.RancherFeaturesVar),
RancherSettingsPatch: e2e.RancherSettingPatch,
RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"),
ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"),
IsolatedMode: flagVals.IsolatedMode,
RancherIngressConfig: e2e.IngressConfig,
RancherServicePatch: e2e.RancherServicePatch,
})
})

var _ = AfterSuite(func() {
testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{
SetupTestClusterResult: *setupClusterResult,
SkipCleanup: flagVals.SkipCleanup,
ArtifactFolder: flagVals.ArtifactFolder,
})
})
Loading

0 comments on commit 2b2ae5a

Please sign in to comment.