Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: E2E cloud env configuration to support volumesnapshot #56

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 33 additions & 1 deletion .github/workflows/continuous-delivery.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,11 @@ env:
GOLANG_VERSION: "1.20.x"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.20.0"
ROOK_VERSION: "v1.10.6"
EXTERNAL_SNAPSHOTTER_VERSION: "v6.2.2"
CNPG_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"


defaults:
run:
# default failure handling for shell scripts in 'run' steps
Expand Down Expand Up @@ -539,6 +542,7 @@ jobs:
BUILD_IMAGE: "false"
CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }}
E2E_DEFAULT_STORAGE_CLASS: standard
E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: ""
LOG_DIR: ${{ github.workspace }}/kind-logs/
DOCKER_REGISTRY_MIRROR: https://mirror.gcr.io
TEST_CLOUD_VENDOR: "local"
Expand Down Expand Up @@ -770,6 +774,7 @@ jobs:
BUILD_IMAGE: "false"
CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }}
E2E_DEFAULT_STORAGE_CLASS: rook-ceph-block
E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: csi-rbdplugin-snapclass
TEST_CLOUD_VENDOR: "aks"
steps:
-
Expand Down Expand Up @@ -877,6 +882,8 @@ jobs:
echo "Waiting for Rook OSDs to be available"
kubectl wait deploy -n rook-ceph --for condition=available --timeout 480s -l app=rook-ceph-osd
kubectl apply -f ${ROOK_BASE_URL}/csi/rbd/storageclass.yaml
kubectl apply -f ${ROOK_BASE_URL}/csi/rbd/snapshotclass.yaml
kubectl annotate storageclass ${{env.E2E_DEFAULT_STORAGE_CLASS}} storage.kubernetes.io/default-snapshot-class=${{env.E2E_DEFAULT_VOLUMESNAPSHOT_CLASS}} --overwrite
-
name: Prepare patch for customization
env:
Expand Down Expand Up @@ -1089,6 +1096,7 @@ jobs:
BUILD_IMAGE: "false"
CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }}
E2E_DEFAULT_STORAGE_CLASS: gp3
E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: ebs-csi-snapclass

AWS_REGION: eu-central-1
AWS_EKS_ADMIN_IAM_ROLES: ${{ secrets.AWS_EKS_ADMIN_IAM_ROLES }}
Expand Down Expand Up @@ -1165,9 +1173,24 @@ jobs:
# Updating .kubeconfig to use the correct version of client.authentication.k8s.io API
aws eks update-kubeconfig --name ${CLUSTER_NAME} --region ${AWS_REGION}

# Installing CRD for support volumeSnapshot
SNAPSHOTTER_BASE_URL=https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${{env.EXTERNAL_SNAPSHOTTER_VERSION}}
kubectl apply -f ${SNAPSHOTTER_BASE_URL}/client/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
kubectl apply -f ${SNAPSHOTTER_BASE_URL}/client/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
kubectl apply -f ${SNAPSHOTTER_BASE_URL}/client/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml

## Controller
kubectl apply -f ${SNAPSHOTTER_BASE_URL}/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml
kubectl apply -f ${SNAPSHOTTER_BASE_URL}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml

# Install volume snapshot class
kubectl apply -f hack/e2e/volumesnapshotclass-ebs-csi.yaml
kubectl get volumesnapshotclass

# Change to use gp3 as default storage account
kubectl annotate storageclass gp2 storageclass.kubernetes.io/is-default-class=false --overwrite
kubectl apply -f hack/e2e/storage-class-gp3.yaml
kubectl annotate storageclass ${{env.E2E_DEFAULT_STORAGE_CLASS}} storage.kubernetes.io/default-snapshot-class=${{env.E2E_DEFAULT_VOLUMESNAPSHOT_CLASS}} --overwrite
kubectl get storageclass
-
name: Setup Velero
Expand Down Expand Up @@ -1448,7 +1471,8 @@ jobs:
DEBUG: "true"
BUILD_IMAGE: "false"
CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }}
E2E_DEFAULT_STORAGE_CLASS: standard
E2E_DEFAULT_STORAGE_CLASS: standard-rwo
E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: pd-csi-snapclass

ZONE: europe-west3-a
TEST_CLOUD_VENDOR: "gke"
Expand Down Expand Up @@ -1529,6 +1553,14 @@ jobs:
USE_GKE_GCLOUD_AUTH_PLUGIN: "True"
run: |
gcloud container clusters get-credentials ${{ env.CLUSTER_NAME }} --zone ${{ env.ZONE }} --project ${{ secrets.GCP_PROJECT_ID }}
-
name: Configure Storage
run: |
# Install volume snapshot class
kubectl apply -f hack/e2e/volumesnapshotclass-pd-csi.yaml
# Change to use standard-rwo as default storage account
kubectl annotate storageclass ${{env.E2E_DEFAULT_STORAGE_CLASS}} storage.kubernetes.io/default-snapshot-class=${{env.E2E_DEFAULT_VOLUMESNAPSHOT_CLASS}} --overwrite
kubectl get storageclass
-
name: Prepare patch for customization
env:
Expand Down
8 changes: 8 additions & 0 deletions hack/e2e/volumesnapshotclass-ebs-csi.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
name: ebs-csi-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: ebs.csi.aws.com
deletionPolicy: Delete
8 changes: 8 additions & 0 deletions hack/e2e/volumesnapshotclass-pd-csi.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
name: pd-csi-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: pd.csi.storage.gke.io
deletionPolicy: Delete
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: volume-snapshot
spec:
instances: 3

bootstrap:
initdb:
database: app
owner: app

# Persistent storage configuration
storage:
storageClass: ${E2E_DEFAULT_STORAGE_CLASS}
size: 1Gi
walStorage:
storageClass: ${E2E_DEFAULT_STORAGE_CLASS}
size: 1Gi
84 changes: 84 additions & 0 deletions tests/e2e/volume_snapshot_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
/*
Copyright The CloudNativePG Contributors

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"fmt"
"os"

"github.com/cloudnative-pg/cloudnative-pg/tests"
"github.com/cloudnative-pg/cloudnative-pg/tests/utils"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

// Test case for validating volume snapshots
// with different storage providers in different k8s environments
var _ = Describe("Verify volume snapshot", Label(tests.LabelBackupRestore, tests.LabelStorage), func() {
const (
sampleFile = fixturesDir + "/volume_snapshot/cluster_volume_snapshot.yaml.template"
clusterName = "volume-snapshot"
level = tests.Medium
)
BeforeEach(func() {
if testLevelEnv.Depth < int(level) {
Skip("Test depth is lower than the amount requested for this test")
}
// This need to be removed later
if IsLocal() {
Skip("This test is only run on AKS, EKS and GKE clusters for now")
}
})
// Initializing a global namespace variable to be used in each test case
var namespace, namespacePrefix string
// Gathering the default volumeSnapshot class for the current environment
volumeSnapshotClassName := os.Getenv("E2E_DEFAULT_VOLUMESNAPSHOT_CLASS")

Context("Can create a Volume Snapshot", Ordered, func() {
BeforeAll(func() {
var err error
// Initializing namespace variable to be used in test case
namespacePrefix = "volume-snapshot"
namespace, err = env.CreateUniqueNamespace(namespacePrefix)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() error {
if CurrentSpecReport().Failed() {
env.DumpNamespaceObjects(namespace, "out/"+CurrentSpecReport().LeafNodeText+".log")
}
return env.DeleteNamespace(namespace)
})
// Creating a cluster with three nodes
AssertCreateCluster(namespace, clusterName, sampleFile, env)
})

It("Using the kubectl cnp plugin", func() {
err := utils.CreateVolumeSnapshotBackup(volumeSnapshotClassName, namespace, clusterName)
Expect(err).ToNot(HaveOccurred())

out, _, err := utils.Run(fmt.Sprintf("kubectl get volumesnapshot -n %v", namespace))
Expect(err).ToNot(HaveOccurred())
GinkgoWriter.Print("output of current volumesnapshot")
GinkgoWriter.Print(out)

out, _, err = utils.Run(fmt.Sprintf("kubectl get volumesnapshotcontent -n %v", namespace))
Expect(err).ToNot(HaveOccurred())
GinkgoWriter.Print("output of current volumesnapshotcontent")
GinkgoWriter.Print(out)
})
})
})
20 changes: 20 additions & 0 deletions tests/utils/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -492,3 +492,23 @@ func GetConditionsInClusterStatus(

return nil, fmt.Errorf("no condition matching requested type found: %v", conditionType)
}

// CreateVolumeSnapshotBackup use kubectl plugin to create volumesnapshot backup
func CreateVolumeSnapshotBackup(
volumeSnapshotClass,
namespace,
clusterName string,
) error {
var err error
if volumeSnapshotClass == "" {
_, _, err = Run(fmt.Sprintf("kubectl cnp snapshot %v -n %v",
clusterName, namespace))
} else {
_, _, err = Run(fmt.Sprintf("kubectl cnp snapshot %v -c %v -n %v",
clusterName, volumeSnapshotClass, namespace))
}
if err != nil {
return err
}
return nil
}
Loading