From 46657933f4894588191b62076ca1d3f408a9711f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 15:58:02 +0200 Subject: [PATCH 01/10] fix(deps): update github.com/cloudnative-pg/cnpg-i digest to 8d61352 (main) (#5865) https://redirect.github.com/cloudnative-pg/cnpg-i `7e24b2e` -> `8d61352` google.golang.org/protobuf `v1.34.2` -> `v1.35.1` --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index d0ca1763c6..b519ff235d 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c - github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 + github.com/cloudnative-pg/cnpg-i v0.0.0-20241016132832-8d61352831c6 github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 @@ -114,7 +114,7 @@ require ( golang.org/x/tools v0.25.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 19a21dfdaa..9be832577f 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c h1:JQK5GOXSukWTInG5GzgmlTwY/rs5yO446+xy09NqbLg= github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 h1:Rm/bbC0GNCuWth5fHVMos99RzNczbWRVBdjubh3JMPs= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50/go.mod h1:lTWPq8pluS0PSnRMwt0zShftbyssoRhTJ5zAip8unl8= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241016132832-8d61352831c6 h1:QokKbYfQ0sRWMHDB0sVUL1H/kGQki+AXBfBRp7J+9Og= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241016132832-8d61352831c6/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b h1:4Q2VQsPlLHliJdi87zodQ0FHLd1cJINMm4N70eu8rRg= github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b/go.mod h1:+mUFdys1IX+qwQUrV+/i56Tey/mYh8ZzWZYttwivRns= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -262,8 +262,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From ccfa3be896dd5ab24a0c00a703ec73fd953dde4d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 23:04:19 +0200 Subject: [PATCH 02/10] fix(deps): update module github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to v0.77.2 (main) (#5919) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b519ff235d..f6aa11ec03 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.34.2 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 diff --git a/go.sum b/go.sum index 9be832577f..1c1d7bd228 100644 --- a/go.sum +++ b/go.sum @@ -157,8 +157,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1 h1:XGoEXT6WTTihO+MD8MAao+YaQIH905HbK0WK2lyo28k= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1/go.mod h1:D0KY8md81DQKdaR/cXwnhoWB3MYYyc/UjvqE8GFkIvA= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2 h1:F/MALZ518KfI1zEg+Kg8/uTzoXKDyqw+LNC/5irJlJE= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2/go.mod h1:D0KY8md81DQKdaR/cXwnhoWB3MYYyc/UjvqE8GFkIvA= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= From 17a84eda078811f556ec119c035b3cc4f6592534 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 23 Oct 2024 10:26:51 +0200 Subject: [PATCH 03/10] chore(tests): Refactored backup and restore tests by separating backend-specific logic (#5735) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, all backup and restore tests for different backends (MinIO, Azurite, Azure) were combined in a single file, making it difficult to manage and maintain. This change separates the tests, creating individual files for each backend, with dedicated functions tailored to each backend's requirements. This improves readability and organization of the code. Closes #5632 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Francesco Canovai Signed-off-by: Niccolò Fei Co-authored-by: Francesco Canovai Co-authored-by: Niccolò Fei --- hack/setup-cluster.sh | 2 +- tests/e2e/asserts_test.go | 389 +------ tests/e2e/backup_restore_azure_test.go | 486 +++++++++ tests/e2e/backup_restore_azurite_test.go | 353 ++++++ tests/e2e/backup_restore_minio_test.go | 800 ++++++++++++++ tests/e2e/backup_restore_test.go | 1250 ---------------------- tests/e2e/replica_mode_cluster_test.go | 33 +- tests/e2e/suite_test.go | 5 +- tests/e2e/tablespaces_test.go | 38 +- tests/e2e/upgrade_test.go | 27 +- tests/e2e/volume_snapshot_test.go | 30 +- tests/e2e/wal_restore_parallel_test.go | 57 +- tests/utils/azurite.go | 299 +++++- tests/utils/backup.go | 243 +---- tests/utils/{ => minio}/minio.go | 145 +-- tests/utils/secrets.go | 28 + 16 files changed, 2241 insertions(+), 1944 deletions(-) create mode 100644 tests/e2e/backup_restore_azure_test.go create mode 100644 tests/e2e/backup_restore_azurite_test.go create mode 100644 tests/e2e/backup_restore_minio_test.go delete mode 100644 tests/e2e/backup_restore_test.go rename tests/utils/{ => minio}/minio.go (76%) diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 9b24a33022..a5444c392f 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -83,7 +83,7 @@ registry_name=registry.dev POSTGRES_IMG=${POSTGRES_IMG:-$(grep 'DefaultImageName.*=' "${ROOT_DIR}/pkg/versions/versions.go" | cut -f 2 -d \")} E2E_PRE_ROLLING_UPDATE_IMG=${E2E_PRE_ROLLING_UPDATE_IMG:-${POSTGRES_IMG%.*}} PGBOUNCER_IMG=${PGBOUNCER_IMG:-$(grep 'DefaultPgbouncerImage.*=' "${ROOT_DIR}/pkg/specs/pgbouncer/deployments.go" | cut -f 2 -d \")} -MINIO_IMG=${MINIO_IMG:-$(grep 'minioImage.*=' "${ROOT_DIR}/tests/utils/minio.go" | cut -f 2 -d \")} +MINIO_IMG=${MINIO_IMG:-$(grep 'minioImage.*=' "${ROOT_DIR}/tests/utils/minio/minio.go" | cut -f 2 -d \")} APACHE_IMG=${APACHE_IMG:-"httpd"} HELPER_IMGS=("$POSTGRES_IMG" "$E2E_PRE_ROLLING_UPDATE_IMG" "$PGBOUNCER_IMG" "$MINIO_IMG" "$APACHE_IMG") diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 9fc3c9dba6..685630691f 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -43,6 +43,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -173,7 +174,8 @@ func AssertSwitchoverWithHistory( } numHistory := len(strings.Split(strings.TrimSpace(out), "\n")) - GinkgoWriter.Printf("count %d: pod: %s, the number of history file in pg_wal: %d\n", count, pod, numHistory) + GinkgoWriter.Printf("count %d: pod: %s, the number of history file in pg_wal: %d\n", count, pod, + numHistory) count++ if numHistory > 0 { continue @@ -291,8 +293,11 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env }) } -func AssertClusterDefault(namespace string, clusterName string, - isExpectedToDefault bool, env *testsUtils.TestingEnvironment, +func AssertClusterDefault( + namespace string, + clusterName string, + isExpectedToDefault bool, + env *testsUtils.TestingEnvironment, ) { By("having a Cluster object populated with default values", func() { // Eventually the number of ready instances should be equal to the @@ -334,8 +339,14 @@ func AssertWebhookEnabled(env *testsUtils.TestingEnvironment, mutating, validati } // Update the secrets and verify cluster reference the updated resource version of secrets -func AssertUpdateSecret(field string, value string, secretName string, namespace string, - clusterName string, timeout int, env *testsUtils.TestingEnvironment, +func AssertUpdateSecret( + field string, + value string, + secretName string, + namespace string, + clusterName string, + timeout int, + env *testsUtils.TestingEnvironment, ) { var secret corev1.Secret Eventually(func(g Gomega) { @@ -376,8 +387,14 @@ func AssertUpdateSecret(field string, value string, secretName string, namespace // AssertConnection is used if a connection from a pod to a postgresql // database works -func AssertConnection(host string, user string, dbname string, - password string, queryingPod *corev1.Pod, timeout int, env *testsUtils.TestingEnvironment, +func AssertConnection( + host string, + user string, + dbname string, + password string, + queryingPod *corev1.Pod, + timeout int, + env *testsUtils.TestingEnvironment, ) { By(fmt.Sprintf("connecting to the %v service as %v", host, user), func() { Eventually(func() string { @@ -746,25 +763,6 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) { }) } -func AssertStorageCredentialsAreCreated(namespace string, name string, id string, key string) { - Eventually(func() error { - _, _, err := testsUtils.Run(fmt.Sprintf("kubectl create secret generic %v -n %v "+ - "--from-literal='ID=%v' "+ - "--from-literal='KEY=%v'", - name, namespace, id, key)) - return err - }, 60, 5).Should(BeNil()) -} - -// minioPath gets the MinIO file string for WAL/backup objects in a configured bucket -func minioPath(serverName, fileName string) string { - // the * regexes enable matching these typical paths: - // minio/backups/serverName/base/20220618T140300/data.tar - // minio/backups/serverName/wals/0000000100000000/000000010000000000000002.gz - // minio/backups/serverName/wals/00000002.history.gz - return filepath.Join("*", serverName, "*", fileName) -} - // CheckPointAndSwitchWalOnPrimary trigger a checkpoint and switch wal on primary pod and returns the latest WAL file func CheckPointAndSwitchWalOnPrimary(namespace, clusterName string) string { var latestWAL string @@ -786,13 +784,13 @@ func AssertArchiveWalOnMinio(namespace, clusterName string, serverName string) { Expect(err).ToNot(HaveOccurred()) primary := pod.GetName() latestWAL := switchWalAndGetLatestArchive(namespace, primary) - latestWALPath = minioPath(serverName, latestWAL+".gz") + latestWALPath = minio.GetFilePath(serverName, latestWAL+".gz") }) By(fmt.Sprintf("verify the existence of WAL %v in minio", latestWALPath), func() { Eventually(func() (int, error) { // WALs are compressed with gzip in the fixture - return testsUtils.CountFilesOnMinio(minioEnv, latestWALPath) + return minio.CountFiles(minioEnv, latestWALPath) }, testTimeouts[testsUtils.WalsInMinio]).Should(BeEquivalentTo(1)) }) } @@ -1380,9 +1378,11 @@ func AssertMetricsData(namespace, targetOne, targetTwo, targetSecret string, clu podName := pod.GetName() out, err := testsUtils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled()) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Contains(out, fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetOne))).Should(BeTrue(), + Expect(strings.Contains(out, + fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetOne))).Should(BeTrue(), "Metric collection issues on %v.\nCollected metrics:\n%v", podName, out) - Expect(strings.Contains(out, fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetTwo))).Should(BeTrue(), + Expect(strings.Contains(out, + fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetTwo))).Should(BeTrue(), "Metric collection issues on %v.\nCollected metrics:\n%v", podName, out) Expect(strings.Contains(out, fmt.Sprintf(`cnpg_some_query_test_rows{datname="%v"} 1`, targetSecret))).Should(BeTrue(), @@ -1453,54 +1453,6 @@ func AssertSSLVerifyFullDBConnectionFromAppPod(namespace string, clusterName str }) } -func AssertCreateSASTokenCredentials(namespace string, id string, key string) { - // Adding 24 hours to the current time - date := time.Now().UTC().Add(time.Hour * 24) - // Creating date time format for az command - expiringDate := fmt.Sprintf("%v"+"-"+"%d"+"-"+"%v"+"T"+"%v"+":"+"%v"+"Z", - date.Year(), - date.Month(), - date.Day(), - date.Hour(), - date.Minute()) - - out, _, err := testsUtils.Run(fmt.Sprintf( - // SAS Token at Blob Container level does not currently work in Barman Cloud - // https://github.com/EnterpriseDB/barman/issues/388 - // we will use SAS Token at Storage Account level - // ( "az storage container generate-sas --account-name %v "+ - // "--name %v "+ - // "--https-only --permissions racwdl --auth-mode key --only-show-errors "+ - // "--expiry \"$(date -u -d \"+4 hours\" '+%%Y-%%m-%%dT%%H:%%MZ')\"", - // id, blobContainerName ) - "az storage account generate-sas --account-name %v "+ - "--https-only --permissions cdlruwap --account-key %v "+ - "--resource-types co --services b --expiry %v -o tsv", - id, key, expiringDate)) - Expect(err).ToNot(HaveOccurred()) - SASTokenRW := strings.TrimRight(out, "\n") - - out, _, err = testsUtils.Run(fmt.Sprintf( - "az storage account generate-sas --account-name %v "+ - "--https-only --permissions lr --account-key %v "+ - "--resource-types co --services b --expiry %v -o tsv", - id, key, expiringDate)) - Expect(err).ToNot(HaveOccurred()) - SASTokenRO := strings.TrimRight(out, "\n") - - AssertROSASTokenUnableToWrite("restore-cluster-sas", id, SASTokenRO) - - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds-sas", id, SASTokenRW) - AssertStorageCredentialsAreCreated(namespace, "restore-storage-creds-sas", id, SASTokenRO) -} - -func AssertROSASTokenUnableToWrite(containerName string, id string, key string) { - _, _, err := testsUtils.RunUnchecked(fmt.Sprintf("az storage container create "+ - "--name %v --account-name %v "+ - "--sas-token %v", containerName, id, key)) - Expect(err).To(HaveOccurred()) -} - func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, tableName string) { By("Async Replication into external cluster", func() { restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile) @@ -1864,7 +1816,8 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta }) // Gather credentials - appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, + env) Expect(err).ToNot(HaveOccurred()) primaryPod, err := env.GetClusterPrimary(namespace, clusterName) @@ -1945,38 +1898,6 @@ func AssertArchiveConditionMet(namespace, clusterName, timeout string) { }) } -func AssertArchiveWalOnAzurite(namespace, clusterName string) { - // Create a WAL on the primary and check if it arrives at the Azure Blob Storage within a short time - By("archiving WALs and verifying they exist", func() { - primary := clusterName + "-1" - latestWAL := switchWalAndGetLatestArchive(namespace, primary) - // verifying on blob storage using az - // Define what file we are looking for in Azurite. - // Escapes are required since az expects forward slashes to be escaped - path := fmt.Sprintf("%v\\/wals\\/0000000100000000\\/%v.gz", clusterName, latestWAL) - // verifying on blob storage using az - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, path) - }, 60).Should(BeEquivalentTo(1)) - }) -} - -func AssertArchiveWalOnAzureBlob(namespace, clusterName string, configuration testsUtils.AzureConfiguration) { - // Create a WAL on the primary and check if it arrives at the Azure Blob Storage, within a short time - By("archiving WALs and verifying they exist", func() { - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - latestWAL := switchWalAndGetLatestArchive(primary.Namespace, primary.Name) - // Define what file we are looking for in Azure. - // Escapes are required since az expects forward slashes to be escaped - path := fmt.Sprintf("wals\\/0000000100000000\\/%v.gz", latestWAL) - // Verifying on blob storage using az - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzureBlobStorage(configuration, clusterName, path) - }, 60).Should(BeEquivalentTo(1)) - }) -} - // switchWalAndGetLatestArchive trigger a new wal and get the name of latest wal file func switchWalAndGetLatestArchive(namespace, podName string) string { _, _, err := env.ExecQueryInInstancePod( @@ -2000,236 +1921,6 @@ func switchWalAndGetLatestArchive(namespace, podName string) string { return strings.TrimSpace(out) } -func prepareClusterForPITROnMinio( - namespace, - clusterName, - backupSampleFile string, - expectedVal int, - currentTimestamp *string, -) { - const tableNamePitr = "for_restore" - - By("backing up a cluster and verifying it exists on minio", func() { - testsUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testsUtils.BackupIsReady], env) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testsUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeNumerically(">=", expectedVal), - fmt.Sprintf("verify the number of backups %v is greater than or equal to %v", latestTar, - expectedVal)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Write a table and insert 2 entries on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, - TableName: tableNamePitr, - } - AssertCreateTestData(env, tableLocator) - - By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - - By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, - namespace, - clusterName, - testsUtils.AppDBName, - apiv1.ApplicationUserSecretSuffix, - ) - defer func() { - _ = conn.Close() - forward.Close() - }() - Expect(err).ToNot(HaveOccurred()) - - insertRecordIntoTable(tableNamePitr, 3, conn) - }) - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - AssertArchiveConditionMet(namespace, clusterName, "5m") - AssertBackupConditionInClusterStatus(namespace, clusterName) -} - -func prepareClusterForPITROnAzureBlob( - namespace string, - clusterName string, - backupSampleFile string, - azureConfig testsUtils.AzureConfiguration, - expectedVal int, - currentTimestamp *string, -) { - const tableNamePitr = "for_restore" - By("backing up a cluster and verifying it exists on Azure Blob", func() { - testsUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testsUtils.BackupIsReady], env) - - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzureBlobStorage(azureConfig, clusterName, "data.tar") - }, 30).Should(BeEquivalentTo(expectedVal)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Write a table and insert 2 entries on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, - TableName: tableNamePitr, - } - AssertCreateTestData(env, tableLocator) - - By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - - By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, - namespace, - clusterName, - testsUtils.AppDBName, - apiv1.ApplicationUserSecretSuffix, - ) - defer func() { - _ = conn.Close() - forward.Close() - }() - Expect(err).ToNot(HaveOccurred()) - insertRecordIntoTable(tableNamePitr, 3, conn) - }) - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - AssertArchiveConditionMet(namespace, clusterName, "5m") - AssertBackupConditionInClusterStatus(namespace, clusterName) -} - -func prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile string) { - By("creating the Azurite storage credentials", func() { - err := testsUtils.CreateStorageCredentialsOnAzurite(namespace, env) - Expect(err).ToNot(HaveOccurred()) - }) - - By("setting up Azurite to hold the backups", func() { - // Deploying azurite for blob storage - err := testsUtils.InstallAzurite(namespace, env) - Expect(err).ToNot(HaveOccurred()) - }) - - By("setting up az-cli", func() { - // This is required as we have a service of Azurite running locally. - // In order to connect, we need az cli inside the namespace - err := testsUtils.InstallAzCli(namespace, env) - Expect(err).ToNot(HaveOccurred()) - }) - - // Creating cluster - AssertCreateCluster(namespace, clusterName, clusterSampleFile, env) - - AssertArchiveConditionMet(namespace, clusterName, "5m") -} - -func prepareClusterBackupOnAzurite( - namespace, - clusterName, - clusterSampleFile, - backupFile, - tableName string, -) { - // Setting up Azurite and az cli along with Postgresql cluster - prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile) - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - AssertArchiveWalOnAzurite(namespace, clusterName) - - By("backing up a cluster and verifying it exists on azurite", func() { - // We create a Backup - testsUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testsUtils.BackupIsReady], env) - // Verifying file called data.tar should be available on Azurite blob storage - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - AssertBackupConditionInClusterStatus(namespace, clusterName) -} - -func prepareClusterForPITROnAzurite( - namespace, - clusterName, - backupSampleFile string, - currentTimestamp *string, -) { - By("backing up a cluster and verifying it exists on azurite", func() { - // We create a Backup - testsUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testsUtils.BackupIsReady], env) - // Verifying file called data.tar should be available on Azurite blob storage - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Write a table and insert 2 entries on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, - TableName: "for_restore", - } - AssertCreateTestData(env, tableLocator) - - By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - - By(fmt.Sprintf("writing 3rd entry into test table '%v'", "for_restore"), func() { - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, - namespace, - clusterName, - testsUtils.AppDBName, - apiv1.ApplicationUserSecretSuffix, - ) - defer func() { - _ = conn.Close() - forward.Close() - }() - Expect(err).ToNot(HaveOccurred()) - insertRecordIntoTable("for_restore", 3, conn) - }) - AssertArchiveWalOnAzurite(namespace, clusterName) -} - func createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerYamlFilePath string, expectedInstanceCount int) { CreateResourceFromFile(namespace, poolerYamlFilePath) Eventually(func() (int32, error) { @@ -2925,19 +2616,6 @@ func AssertBackupConditionTimestampChangedInClusterStatus( }) } -func AssertBackupConditionInClusterStatus(namespace, clusterName string) { - By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() { - Eventually(func() (string, error) { - getBackupCondition, err := testsUtils.GetConditionsInClusterStatus( - namespace, clusterName, env, apiv1.ConditionBackup) - if err != nil { - return "", err - } - return string(getBackupCondition.Status), nil - }, 300, 5).Should(BeEquivalentTo("True")) - }) -} - func AssertClusterReadinessStatusIsReached( namespace, clusterName string, @@ -3081,7 +2759,8 @@ func AssertClusterHAReplicationSlots(namespace, clusterName string) { podList, err := env.GetClusterPodList(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { - expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), env) + expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), + env) Expect(err).ToNot(HaveOccurred()) AssertReplicationSlotsOnPod(namespace, clusterName, pod, expectedSlots, true, false) } diff --git a/tests/e2e/backup_restore_azure_test.go b/tests/e2e/backup_restore_azure_test.go new file mode 100644 index 0000000000..65c3f8a2ed --- /dev/null +++ b/tests/e2e/backup_restore_azure_test.go @@ -0,0 +1,486 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), func() { + const ( + tableName = "to_restore" + ) + + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + if !IsAKS() { + Skip("This test is only run on AKS clusters") + } + }) + + Context("using azure blobs as object storage with storage account access authentication", Ordered, func() { + // We must be careful here. All the clusters use the same remote storage + // and that means that we must use different cluster names otherwise + // we risk mixing WALs and backups + const azureBlobSampleFile = fixturesDir + "/backup/azure_blob/cluster-with-backup-azure-blob.yaml.template" + const clusterRestoreSampleFile = fixturesDir + "/backup/azure_blob/cluster-from-restore.yaml.template" + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azure-blob.yaml" + backupFile := fixturesDir + "/backup/azure_blob/backup-azure-blob.yaml" + var namespace, clusterName string + + BeforeAll(func() { + const namespacePrefix = "cluster-backup-azure-blob" + var err error + clusterName, err = env.GetResourceNameFromYAML(azureBlobSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // The Azure Blob Storage should have been created ad-hoc for the tests. + // The credentials are retrieved from the environment variables, as we can't create + // a fixture for them + By("creating the Azure Blob Storage credentials", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + env.AzureConfiguration.StorageAccount, + env.AzureConfiguration.StorageKey, + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, azureBlobSampleFile, env) + }) + + // We back up and restore a cluster, and verify some expected data to + // be there + It("backs up and restore a cluster", func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + By("uploading a backup", func() { + // We create a backup + testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + + // Verifying file called data.tar should be available on Azure blob storage + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Restore backup in a new cluster + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) + + By("deleting the restored cluster", func() { + err := DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available + It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) + + // Only one data.tar files should be present + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, + clusterName, "data.tar") + }, 30).Should(BeNumerically("==", 2)) + }) + + It("backs up and restore a cluster with PITR", func() { + restoredClusterName := "restore-cluster-azure-pitr" + currentTimestamp := new(string) + + prepareClusterForPITROnAzureBlob( + namespace, + clusterName, + backupFile, + env.AzureConfiguration, + 2, + currentTimestamp, + ) + + assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + + cluster, err := testUtils.CreateClusterFromBackupUsingPITR( + namespace, + restoredClusterName, + backupFile, + *currentTimestamp, + env, + ) + Expect(err).ToNot(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") + By("deleting the restored cluster", func() { + Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) + }) + }) + + // We create a cluster, create a scheduled backup, patch it to suspend its + // execution. We verify that the number of backups does not increase. + // We then patch it again back to its initial state and verify that + // the amount of backups keeps increasing again + It("verifies that scheduled backups can be suspended", func() { + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azure-blob.yaml" + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + By("scheduling backups", func() { + AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 480) + + // AssertScheduledBackupsImmediate creates at least two backups, we should find + // their base backups + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, + clusterName, "data.tar") + }, 60).Should(BeNumerically(">=", 2)) + }) + AssertSuspendScheduleBackups(namespace, scheduledBackupName) + }) + }) +}) + +var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tests.LabelBackupRestore), func() { + const ( + fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" + sourceBackupFileAzure = fixturesBackupDir + "backup-azure-blob-02.yaml" + clusterSourceFileAzure = fixturesBackupDir + "source-cluster-azure-blob-01.yaml.template" + externalClusterFileAzure = fixturesBackupDir + "external-clusters-azure-blob-03.yaml.template" + sourceBackupFileAzurePITR = fixturesBackupDir + "backup-azure-blob-pitr.yaml" + tableName = "to_restore" + clusterSourceFileAzureSAS = fixturesBackupDir + "cluster-with-backup-azure-blob-sas.yaml.template" + clusterRestoreFileAzureSAS = fixturesBackupDir + "cluster-from-restore-sas.yaml.template" + sourceBackupFileAzureSAS = fixturesBackupDir + "backup-azure-blob-sas.yaml" + sourceBackupFileAzurePITRSAS = fixturesBackupDir + "backup-azure-blob-pitr-sas.yaml" + level = tests.High + ) + + currentTimestamp := new(string) + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + if !IsAKS() { + Skip("This test is only executed on AKS clusters") + } + }) + + // Restore cluster using a recovery object store, that is a backup of another cluster, + // created by Barman Cloud, and defined via the barmanObjectStore option in the externalClusters section + + Context("using azure blobs as object storage", func() { + Context("storage account access authentication", Ordered, func() { + var namespace, clusterName string + BeforeAll(func() { + const namespacePrefix = "recovery-barman-object-azure" + var err error + clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzure) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // The Azure Blob Storage should have been created ad-hoc for the tests. + // The credentials are retrieved from the environment variables, as we can't create + // a fixture for them + By("creating the Azure Blob Storage credentials", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + env.AzureConfiguration.StorageAccount, + env.AzureConfiguration.StorageKey, + env) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, clusterSourceFileAzure, env) + }) + + It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + + By("backing up a cluster and verifying it exists on azure blob storage", func() { + // Create the backup + testUtils.ExecuteBackup(namespace, sourceBackupFileAzure, false, testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + // Verifying file called data.tar should be available on Azure blob storage + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + }) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterRestore(namespace, externalClusterFileAzure, tableName) + }) + + It("restores a cluster with 'PITR' from barman object using "+ + "'barmanObjectStore' option in 'externalClusters' section", func() { + externalClusterName := "external-cluster-azure-pitr" + + prepareClusterForPITROnAzureBlob( + namespace, + clusterName, + sourceBackupFileAzurePITR, + env.AzureConfiguration, + 1, + currentTimestamp, + ) + + restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( + namespace, + externalClusterName, + clusterName, + *currentTimestamp, + "backup-storage-creds", + env.AzureConfiguration.StorageAccount, + env.AzureConfiguration.BlobContainer, + env) + Expect(err).ToNot(HaveOccurred()) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterName, + tableName, + "00000002", + ) + + By("delete restored cluster", func() { + Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + }) + }) + }) + + Context("storage account SAS Token authentication", Ordered, func() { + var namespace, clusterName string + BeforeAll(func() { + if !IsAKS() { + Skip("This test is only executed on AKS clusters") + } + const namespacePrefix = "cluster-backup-azure-blob-sas" + var err error + clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzureSAS) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // The Azure Blob Storage should have been created ad-hoc for the tests, + // we get the credentials from the environment variables as we can't create + // a fixture for them + By("creating the Azure Blob Container SAS Token credentials", func() { + err = testUtils.CreateSASTokenCredentials( + namespace, + env.AzureConfiguration.StorageAccount, + env.AzureConfiguration.StorageKey, + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the Cluster + AssertCreateCluster(namespace, clusterName, clusterSourceFileAzureSAS, env) + }) + + It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + // Create a WAL on the primary and check if it arrives in the + // Azure Blob Storage within a short time + assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + + By("backing up a cluster and verifying it exists on azure blob storage", func() { + // We create a Backup + testUtils.ExecuteBackup(namespace, sourceBackupFileAzureSAS, false, testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + // Verifying file called data.tar should be available on Azure blob storage + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + }) + + // Restore backup in a new cluster + AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName) + }) + + It("restores a cluster with 'PITR' from barman object using "+ + "'barmanObjectStore' option in 'externalClusters' section", func() { + externalClusterName := "external-cluster-azure-pitr" + + prepareClusterForPITROnAzureBlob( + namespace, + clusterName, + sourceBackupFileAzurePITRSAS, + env.AzureConfiguration, + 1, + currentTimestamp, + ) + + restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( + namespace, + externalClusterName, + clusterName, + *currentTimestamp, + "backup-storage-creds-sas", + env.AzureConfiguration.StorageAccount, + env.AzureConfiguration.BlobContainer, + env) + Expect(err).ToNot(HaveOccurred()) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterName, + tableName, + "00000002", + ) + + By("delete restored cluster", func() { + Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + }) + }) + }) + }) +}) + +func assertArchiveWalOnAzureBlob(namespace, clusterName string, configuration testUtils.AzureConfiguration) { + // Create a WAL on the primary and check if it arrives at the Azure Blob Storage, within a short time + By("archiving WALs and verifying they exist", func() { + primary, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + latestWAL := switchWalAndGetLatestArchive(primary.Namespace, primary.Name) + // Define what file we are looking for in Azure. + // Escapes are required since az expects forward slashes to be escaped + path := fmt.Sprintf("wals\\/0000000100000000\\/%v.gz", latestWAL) + // Verifying on blob storage using az + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(configuration, clusterName, path) + }, 60).Should(BeEquivalentTo(1)) + }) +} + +func prepareClusterForPITROnAzureBlob( + namespace string, + clusterName string, + backupSampleFile string, + azureConfig testUtils.AzureConfiguration, + expectedVal int, + currentTimestamp *string, +) { + const tableNamePitr = "for_restore" + By("backing up a cluster and verifying it exists on Azure Blob", func() { + testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env) + + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(azureConfig, clusterName, "data.tar") + }, 30).Should(BeEquivalentTo(expectedVal)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Write a table and insert 2 entries on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableNamePitr, + } + AssertCreateTestData(env, tableLocator) + + By("getting currentTimestamp", func() { + ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + + By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable(tableNamePitr, 3, conn) + }) + assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + AssertArchiveConditionMet(namespace, clusterName, "5m") + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) +} diff --git a/tests/e2e/backup_restore_azurite_test.go b/tests/e2e/backup_restore_azurite_test.go new file mode 100644 index 0000000000..cb3254c5a3 --- /dev/null +++ b/tests/e2e/backup_restore_azurite_test.go @@ -0,0 +1,353 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore), func() { + const ( + tableName = "to_restore" + azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" + ) + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + + if !(IsLocal() || IsGKE() || IsOpenshift()) { + Skip("This test is only executed on gke, openshift and local") + } + }) + + Context("using Azurite blobs as object storage", Ordered, func() { + // This is a set of tests using an Azurite server deployed in the same + // namespace as the cluster. Since each cluster is installed in its + // own namespace, they can share the configuration file + const ( + clusterRestoreSampleFile = fixturesDir + "/backup/azurite/cluster-from-restore.yaml.template" + scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azurite.yaml" + scheduledBackupImmediateSampleFile = fixturesDir + + "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azurite.yaml" + backupFile = fixturesDir + "/backup/azurite/backup.yaml" + azuriteCaSecName = "azurite-ca-secret" + azuriteTLSSecName = "azurite-tls-secret" + ) + var namespace, clusterName string + + BeforeAll(func() { + const namespacePrefix = "cluster-backup-azurite" + var err error + clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // Create and assert ca and tls certificate secrets on Azurite + By("creating ca and tls certificate secrets", func() { + err := testUtils.CreateCertificateSecretsOnAzurite(namespace, clusterName, + azuriteCaSecName, azuriteTLSSecName, env) + Expect(err).ToNot(HaveOccurred()) + }) + // Setup Azurite and az cli along with Postgresql cluster + prepareClusterBackupOnAzurite(namespace, clusterName, azuriteBlobSampleFile, backupFile, tableName) + }) + + It("restores a backed up cluster", func() { + // Restore backup in a new cluster + AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreSampleFile, tableName) + }) + + // Create a scheduled backup with the 'immediate' option enabled. + // We expect the backup to be available + It("immediately starts a backup using ScheduledBackups immediate option", func() { + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupImmediateSampleFile) + Expect(err).ToNot(HaveOccurred()) + + AssertScheduledBackupsImmediate(namespace, scheduledBackupImmediateSampleFile, scheduledBackupName) + + // AssertScheduledBackupsImmediate creates at least two backups, we should find + // their base backups + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 30).Should(BeNumerically("==", 2)) + }) + + It("backs up and restore a cluster with PITR Azurite", func() { + const ( + restoredClusterName = "restore-cluster-pitr-azurite" + backupFilePITR = fixturesDir + "/backup/azurite/backup-pitr.yaml" + ) + currentTimestamp := new(string) + + prepareClusterForPITROnAzurite(namespace, clusterName, backupFilePITR, currentTimestamp) + + cluster, err := testUtils.CreateClusterFromBackupUsingPITR( + namespace, + restoredClusterName, + backupFilePITR, + *currentTimestamp, + env, + ) + Expect(err).NotTo(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") + + By("deleting the restored cluster", func() { + Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) + }) + }) + + // We create a cluster, create a scheduled backup, patch it to suspend its + // execution. We verify that the number of backups does not increase. + // We then patch it again back to its initial state and verify that + // the amount of backups keeps increasing again + It("verifies that scheduled backups can be suspended", func() { + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + By("scheduling backups", func() { + AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 60).Should(BeNumerically(">=", 3)) + }) + + AssertSuspendScheduleBackups(namespace, scheduledBackupName) + }) + }) +}) + +var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.LabelBackupRestore), func() { + const ( + fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" + azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" + backupFileAzurite = fixturesBackupDir + "backup-azurite-02.yaml" + externalClusterFileAzurite = fixturesBackupDir + "external-clusters-azurite-03.yaml.template" + + azuriteCaSecName = "azurite-ca-secret" + azuriteTLSSecName = "azurite-tls-secret" + tableName = "to_restore" + ) + Context("using Azurite blobs as object storage", Ordered, func() { + var namespace, clusterName string + BeforeAll(func() { + if IsAKS() { + Skip("This test is not run on AKS") + } + const namespacePrefix = "recovery-barman-object-azurite" + var err error + clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // Create and assert ca and tls certificate secrets on Azurite + By("creating ca and tls certificate secrets", func() { + err := testUtils.CreateCertificateSecretsOnAzurite( + namespace, + clusterName, + azuriteCaSecName, + azuriteTLSSecName, + env) + Expect(err).ToNot(HaveOccurred()) + }) + // Setup Azurite and az cli along with PostgreSQL cluster + prepareClusterBackupOnAzurite( + namespace, + clusterName, + azuriteBlobSampleFile, + backupFileAzurite, + tableName, + ) + }) + + It("restore cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { + // Restore backup in a new cluster + AssertClusterRestoreWithApplicationDB(namespace, externalClusterFileAzurite, tableName) + }) + + It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ + " option in 'externalClusters' section", func() { + const ( + externalClusterRestoreName = "restore-external-cluster-pitr" + backupFileAzuritePITR = fixturesBackupDir + "backup-azurite-pitr.yaml" + ) + currentTimestamp := new(string) + prepareClusterForPITROnAzurite(namespace, clusterName, backupFileAzuritePITR, currentTimestamp) + + // Create a cluster from a particular time using external backup. + restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzurite( + namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) + Expect(err).NotTo(HaveOccurred()) + + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterRestoreName, + tableName, + "00000002", + ) + + By("delete restored cluster", func() { + Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + }) + }) + }) +}) + +func prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile string) { + By("creating the Azurite storage credentials", func() { + err := testUtils.CreateStorageCredentialsOnAzurite(namespace, env) + Expect(err).ToNot(HaveOccurred()) + }) + + By("setting up Azurite to hold the backups", func() { + // Deploying azurite for blob storage + err := testUtils.InstallAzurite(namespace, env) + Expect(err).ToNot(HaveOccurred()) + }) + + By("setting up az-cli", func() { + // This is required as we have a service of Azurite running locally. + // In order to connect, we need az cli inside the namespace + err := testUtils.InstallAzCli(namespace, env) + Expect(err).ToNot(HaveOccurred()) + }) + + // Creating cluster + AssertCreateCluster(namespace, clusterName, clusterSampleFile, env) + + AssertArchiveConditionMet(namespace, clusterName, "5m") +} + +func prepareClusterBackupOnAzurite( + namespace, + clusterName, + clusterSampleFile, + backupFile, + tableName string, +) { + // Setting up Azurite and az cli along with Postgresql cluster + prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile) + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + assertArchiveWalOnAzurite(namespace, clusterName) + + By("backing up a cluster and verifying it exists on azurite", func() { + // We create a Backup + testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) + // Verifying file called data.tar should be available on Azurite blob storage + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) +} + +func prepareClusterForPITROnAzurite( + namespace, + clusterName, + backupSampleFile string, + currentTimestamp *string, +) { + By("backing up a cluster and verifying it exists on azurite", func() { + // We create a Backup + testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env) + // Verifying file called data.tar should be available on Azurite blob storage + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Write a table and insert 2 entries on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: "for_restore", + } + AssertCreateTestData(env, tableLocator) + + By("getting currentTimestamp", func() { + ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + + By(fmt.Sprintf("writing 3rd entry into test table '%v'", "for_restore"), func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable("for_restore", 3, conn) + }) + assertArchiveWalOnAzurite(namespace, clusterName) +} + +func assertArchiveWalOnAzurite(namespace, clusterName string) { + // Create a WAL on the primary and check if it arrives at the Azure Blob Storage within a short time + By("archiving WALs and verifying they exist", func() { + primary := clusterName + "-1" + latestWAL := switchWalAndGetLatestArchive(namespace, primary) + // verifying on blob storage using az + // Define what file we are looking for in Azurite. + // Escapes are required since az expects forward slashes to be escaped + path := fmt.Sprintf("%v\\/wals\\/0000000100000000\\/%v.gz", clusterName, latestWAL) + // verifying on blob storage using az + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, path) + }, 60).Should(BeEquivalentTo(1)) + }) +} diff --git a/tests/e2e/backup_restore_minio_test.go b/tests/e2e/backup_restore_minio_test.go new file mode 100644 index 0000000000..41ada349f0 --- /dev/null +++ b/tests/e2e/backup_restore_minio_test.go @@ -0,0 +1,800 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "path/filepath" + + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), func() { + const ( + tableName = "to_restore" + barmanCloudBackupLogEntry = "Starting barman-cloud-backup" + ) + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + Context("using minio as object storage for backup", Ordered, func() { + // This is a set of tests using a minio server deployed in the same + // namespace as the cluster. Since each cluster is installed in its + // own namespace, they can share the configuration file + var namespace, clusterName string + const ( + backupFile = fixturesDir + "/backup/minio/backup-minio.yaml" + customQueriesSampleFile = fixturesDir + "/metrics/custom-queries-with-target-databases.yaml" + ) + + clusterWithMinioSampleFile := fixturesDir + "/backup/minio/cluster-with-backup-minio.yaml.template" + + BeforeAll(func() { + if !IsLocal() { + Skip("This test is only run on local clusters") + } + const namespacePrefix = "cluster-backup-minio" + var err error + clusterName, err = env.GetResourceNameFromYAML(clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + By("create the certificates for MinIO", func() { + err := minioEnv.CreateCaSecret(env, namespace) + Expect(err).ToNot(HaveOccurred()) + }) + + By("creating the credentials for minio", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create ConfigMap and secrets to verify metrics for target database after backup restore + AssertCustomMetricsResourcesExist(namespace, customQueriesSampleFile, 1, 1) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, clusterWithMinioSampleFile, env) + + By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() (bool, error) { + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( + namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) + if err != nil { + return false, err + } + return connectionStatus, nil + }, 60).Should(BeTrue()) + }) + }) + + // We back up and restore a cluster, and verify some expected data to + // be there + It("backs up and restores a cluster using minio", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore.yaml.template" + ) + var backup *apiv1.Backup + restoredClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + backupName, err := env.GetResourceNameFromYAML(backupFile) + Expect(err).ToNot(HaveOccurred()) + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + latestTar := minio.GetFilePath(clusterName, "data.tar") + + // There should be a backup resource and + By(fmt.Sprintf("backing up a cluster and verifying it exists on minio, backup path is %v", latestTar), + func() { + backup = testUtils.ExecuteBackup(namespace, backupFile, false, + testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.LastSuccessfulBackup, err + }, 30).ShouldNot(BeEmpty()) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.LastFailedBackup, err + }, 30).Should(BeEmpty()) + }) + + By("verifying the backup is using the expected barman-cloud-backup options", func() { + Expect(backup).ToNot(BeNil()) + Expect(backup.Status.InstanceID).ToNot(BeNil()) + logEntries, err := testUtils.ParseJSONLogs(namespace, backup.Status.InstanceID.PodName, env) + Expect(err).ToNot(HaveOccurred()) + expectedBaseBackupOptions := []string{ + "--immediate-checkpoint", + "--min-chunk-size=5MB", + "--read-timeout=59", + } + result, err := testUtils.CheckOptionsForBarmanCommand( + logEntries, + barmanCloudBackupLogEntry, + backup.Name, + backup.Status.InstanceID.PodName, + expectedBaseBackupOptions, + ) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(BeTrue()) + }) + + By("executing a second backup and verifying the number of backups on minio", func() { + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + + // delete the first backup and create a second backup + backup := &apiv1.Backup{} + err := env.Client.Get(env.Ctx, + ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, + backup) + Expect(err).ToNot(HaveOccurred()) + err = env.Client.Delete(env.Ctx, backup) + Expect(err).ToNot(HaveOccurred()) + // create a second backup + testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) + latestTar = minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(2)) + }) + + By("verifying the backupName is properly set in the status of the backup", func() { + backup := &apiv1.Backup{} + err := env.Client.Get(env.Ctx, + ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, + backup) + Expect(err).ToNot(HaveOccurred()) + cluster, err := env.GetCluster(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + // We know that our current images always contain the latest barman version + if cluster.ShouldForceLegacyBackup() { + Expect(backup.Status.BackupName).To(BeEmpty()) + } else { + Expect(backup.Status.BackupName).To(HavePrefix("backup-")) + } + }) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) + + cluster, err := env.GetCluster(namespace, restoredClusterName) + Expect(err).ToNot(HaveOccurred()) + AssertMetricsData(namespace, targetDBOne, targetDBTwo, targetDBSecret, cluster) + + previous := 0 + latestGZ := filepath.Join("*", clusterName, "*", "*.history.gz") + By(fmt.Sprintf("checking the previous number of .history files in minio, history file name is %v", + latestGZ), func() { + previous, err = minio.CountFiles(minioEnv, latestGZ) + Expect(err).ToNot(HaveOccurred()) + }) + + AssertSwitchover(namespace, clusterName, env) + + By("checking the number of .history after switchover", func() { + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestGZ) + }, 60).Should(BeNumerically(">", previous)) + }) + + By("deleting the restored cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // We backup and restore a cluster from a standby, and verify some expected data to + // be there + It("backs up and restore a cluster from standby", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterWithMinioStandbySampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-standby.yaml.template" + backupStandbyFile = fixturesDir + "/backup/minio/backup-minio-standby.yaml" + ) + + targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioStandbySampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create the cluster with custom serverName in the backup spec + AssertCreateCluster(namespace, targetClusterName, clusterWithMinioStandbySampleFile, env) + + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: targetClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) + latestTar := minio.GetFilePath(targetClusterName, "data.tar") + + // There should be a backup resource and + By(fmt.Sprintf("backing up a cluster from standby and verifying it exists on minio, backup path is %v", + latestTar), func() { + testUtils.ExecuteBackup(namespace, backupStandbyFile, true, testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, targetClusterName) + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, targetClusterName) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + }) + + // We backup and restore a cluster from a standby, and verify some expected data to + // be there + It("backs up a cluster from standby with backup target defined in backup", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterWithMinioSampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-primary.yaml.template" + backupWithTargetFile = fixturesDir + "/backup/minio/backup-minio-override-target.yaml" + ) + + targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create the cluster with custom serverName in the backup spec + AssertCreateCluster(namespace, targetClusterName, clusterWithMinioSampleFile, env) + + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: targetClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) + latestTar := minio.GetFilePath(targetClusterName, "data.tar") + + // There should be a backup resource and + By(fmt.Sprintf("backing up a cluster from standby (defined in backup file) and verifying it exists on minio,"+ + " backup path is %v", latestTar), func() { + testUtils.ExecuteBackup(namespace, backupWithTargetFile, true, testTimeouts[testUtils.BackupIsReady], + env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, targetClusterName) + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, targetClusterName) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + By("deleting the cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // Test that the restore works if the source cluster has a custom + // backup.barmanObjectStore.serverName that is different from the cluster name + It("backs up and restores a cluster with custom backup serverName", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore-custom.yaml.template" + // clusterWithMinioCustomSampleFile has metadata.name != backup.barmanObjectStore.serverName + clusterWithMinioCustomSampleFile = fixturesDir + + "/backup/minio/cluster-with-backup-minio-custom-servername.yaml.template" + backupFileCustom = fixturesDir + "/backup/minio/backup-minio-custom-servername.yaml" + clusterServerName = "pg-backup-minio-Custom-Name" + ) + + customClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioCustomSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create the cluster with custom serverName in the backup spec + AssertCreateCluster(namespace, customClusterName, clusterWithMinioCustomSampleFile, env) + + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: customClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, customClusterName, clusterServerName) + + // There should be a backup resource and + By("backing up a cluster and verifying it exists on minio", func() { + testUtils.ExecuteBackup(namespace, backupFileCustom, false, testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, customClusterName) + latestBaseTar := minio.GetFilePath(clusterServerName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestBaseTar) + }, 60).Should(BeEquivalentTo(1), + fmt.Sprintf("verify the number of backup %v is equals to 1", latestBaseTar)) + // this is the second backup we take on the bucket + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, customClusterName) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Restore backup in a new cluster + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) + + By("deleting the primary cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterWithMinioCustomSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + + By("deleting the restored cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available + It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_immediate/scheduled-backup-immediate-minio.yaml" + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) + latestBaseTar := minio.GetFilePath(clusterName, "data.tar") + // AssertScheduledBackupsImmediate creates at least two backups, we should find + // their base backups + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestBaseTar) + }, 60).Should(BeNumerically(">=", 2), + fmt.Sprintf("verify the number of backup %v is >= 2", latestBaseTar)) + }) + + It("backs up and restore a cluster with PITR MinIO", func() { + const ( + restoredClusterName = "restore-cluster-pitr-minio" + backupFilePITR = fixturesDir + "/backup/minio/backup-minio-pitr.yaml" + ) + currentTimestamp := new(string) + prepareClusterForPITROnMinio( + namespace, + clusterName, + backupFilePITR, + 3, + currentTimestamp, + ) + + cluster, err := testUtils.CreateClusterFromBackupUsingPITR( + namespace, + restoredClusterName, + backupFilePITR, + *currentTimestamp, + env, + ) + Expect(err).NotTo(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000003") + + By("deleting the restored cluster", func() { + Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) + }) + }) + + // We create a cluster and a scheduled backup, then it is patched to suspend its + // execution. We verify that the number of backups does not increase. + // We then patch it again back to its initial state and verify that + // the amount of backups keeps increasing again + It("verifies that scheduled backups can be suspended", func() { + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_suspend/scheduled-backup-suspend-minio.yaml" + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + By("scheduling backups", func() { + AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeNumerically(">=", 2), + fmt.Sprintf("verify the number of backup %v is great than 2", latestTar)) + }) + + AssertSuspendScheduleBackups(namespace, scheduledBackupName) + }) + + It("verify tags in backed files", func() { + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + tags, err := minio.GetFileTags(minioEnv, minio.GetFilePath(clusterName, "*1.gz")) + Expect(err).ToNot(HaveOccurred()) + Expect(tags.Tags).ToNot(BeEmpty()) + + currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + oldPrimary := currentPrimary.GetName() + // Force-delete the primary + quickDelete := &ctrlclient.DeleteOptions{ + GracePeriodSeconds: &quickDeletionPeriod, + } + err = env.DeletePod(namespace, currentPrimary.GetName(), quickDelete) + Expect(err).ToNot(HaveOccurred()) + + AssertNewPrimary(namespace, clusterName, oldPrimary) + + tags, err = minio.GetFileTags(minioEnv, minio.GetFilePath(clusterName, "*.history.gz")) + Expect(err).ToNot(HaveOccurred()) + Expect(tags.Tags).ToNot(BeEmpty()) + }) + }) +}) + +var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tests.LabelBackupRestore), func() { + const ( + fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" + externalClusterFileMinioReplica = fixturesBackupDir + "external-clusters-minio-replica-04.yaml.template" + clusterSourceFileMinio = fixturesBackupDir + "source-cluster-minio-01.yaml.template" + externalClusterFileMinio = fixturesBackupDir + "external-clusters-minio-03.yaml.template" + sourceTakeFirstBackupFileMinio = fixturesBackupDir + "backup-minio-02.yaml" + sourceTakeSecondBackupFileMinio = fixturesBackupDir + "backup-minio-03.yaml" + sourceTakeThirdBackupFileMinio = fixturesBackupDir + "backup-minio-04.yaml" + tableName = "to_restore" + ) + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + // Restore cluster using a recovery object store, that is a backup of another cluster, + // created by Barman Cloud, and defined via the barmanObjectStore option in the externalClusters section + Context("using minio as object storage", Ordered, func() { + var namespace, clusterName string + + BeforeAll(func() { + if !IsLocal() { + Skip("This test is only executed on local") + } + const namespacePrefix = "recovery-barman-object-minio" + var err error + clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileMinio) + Expect(err).ToNot(HaveOccurred()) + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + By("creating the credentials for minio", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + By("create the certificates for MinIO", func() { + err := minioEnv.CreateCaSecret(env, namespace) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, clusterSourceFileMinio, env) + + By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() (bool, error) { + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( + namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) + if err != nil { + return false, err + } + return connectionStatus, nil + }, 60).Should(BeTrue()) + }) + }) + + It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", + func() { + externalClusterName, err := env.GetResourceNameFromYAML(externalClusterFileMinio) + Expect(err).ToNot(HaveOccurred()) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + + // There should be a backup resource and + By("backing up a cluster and verifying it exists on minio", func() { + testUtils.ExecuteBackup(namespace, sourceTakeFirstBackupFileMinio, false, + testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + + // TODO: this is to force a CHECKPOINT when we run the backup on standby. + // This should be better handled inside ExecuteBackup + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1), + fmt.Sprintf("verify the number of backup %v is equals to 1", latestTar)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterRestore(namespace, externalClusterFileMinio, tableName) + + // verify test data on restored external cluster + tableLocator = TableLocator{ + Namespace: namespace, + ClusterName: externalClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) + + By("deleting the restored cluster", func() { + err = DeleteResourcesFromFile(namespace, externalClusterFileMinio) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ + " option in 'externalClusters' section", func() { + externalClusterRestoreName := "restore-external-cluster-pitr" + + currentTimestamp := new(string) + // We have already written 2 rows in test table 'to_restore' in above test now we will take current + // timestamp. It will use to restore cluster from source using PITR + By("getting currentTimestamp", func() { + ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + By(fmt.Sprintf("writing 2 more entries in table '%v'", tableName), func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + // insert 2 more rows entries 3,4 on the "app" database + insertRecordIntoTable(tableName, 3, conn) + insertRecordIntoTable(tableName, 4, conn) + }) + By("creating second backup and verifying it exists on minio", func() { + testUtils.ExecuteBackup(namespace, sourceTakeSecondBackupFileMinio, false, + testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(2), + fmt.Sprintf("verify the number of backup %v is equals to 2", latestTar)) + }) + var restoredCluster *apiv1.Cluster + By("create a cluster from backup with PITR", func() { + var err error + restoredCluster, err = testUtils.CreateClusterFromExternalClusterBackupWithPITROnMinio( + namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) + Expect(err).NotTo(HaveOccurred()) + }) + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterRestoreName, + tableName, + "00000002", + ) + By("delete restored cluster", func() { + Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + }) + }) + + It("restore cluster from barman object using replica option in spec", func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: "for_restore_repl", + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + + By("backing up a cluster and verifying it exists on minio", func() { + testUtils.ExecuteBackup(namespace, sourceTakeThirdBackupFileMinio, false, + testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(3), + fmt.Sprintf("verify the number of backup %v is great than 3", latestTar)) + }) + + // Replicating a cluster with asynchronous replication + AssertClusterAsyncReplica( + namespace, + clusterSourceFileMinio, + externalClusterFileMinioReplica, + "for_restore_repl", + ) + }) + }) +}) + +func prepareClusterForPITROnMinio( + namespace, + clusterName, + backupSampleFile string, + expectedVal int, + currentTimestamp *string, +) { + const tableNamePitr = "for_restore" + + By("backing up a cluster and verifying it exists on minio", func() { + testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeNumerically(">=", expectedVal), + fmt.Sprintf("verify the number of backups %v is greater than or equal to %v", latestTar, + expectedVal)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Write a table and insert 2 entries on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableNamePitr, + } + AssertCreateTestData(env, tableLocator) + + By("getting currentTimestamp", func() { + ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + + By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + insertRecordIntoTable(tableNamePitr, 3, conn) + }) + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + AssertArchiveConditionMet(namespace, clusterName, "5m") + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) +} diff --git a/tests/e2e/backup_restore_test.go b/tests/e2e/backup_restore_test.go deleted file mode 100644 index a452eaee94..0000000000 --- a/tests/e2e/backup_restore_test.go +++ /dev/null @@ -1,1250 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "fmt" - "path/filepath" - - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { - const ( - level = tests.High - - azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" - - tableName = "to_restore" - - barmanCloudBackupLogEntry = "Starting barman-cloud-backup" - ) - - currentTimestamp := new(string) - - BeforeEach(func() { - if testLevelEnv.Depth < int(level) { - Skip("Test depth is lower than the amount requested for this test") - } - }) - - Context("using minio as object storage for backup", Ordered, func() { - // This is a set of tests using a minio server deployed in the same - // namespace as the cluster. Since each cluster is installed in its - // own namespace, they can share the configuration file - var namespace, clusterName string - const ( - backupFile = fixturesDir + "/backup/minio/backup-minio.yaml" - customQueriesSampleFile = fixturesDir + "/metrics/custom-queries-with-target-databases.yaml" - ) - - clusterWithMinioSampleFile := fixturesDir + "/backup/minio/cluster-with-backup-minio.yaml.template" - - BeforeAll(func() { - if !IsLocal() { - Skip("This test is only run on local clusters") - } - const namespacePrefix = "cluster-backup-minio" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - By("create the certificates for MinIO", func() { - err := minioEnv.CreateCaSecret(env, namespace) - Expect(err).ToNot(HaveOccurred()) - }) - - By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") - }) - - // Create ConfigMap and secrets to verify metrics for target database after backup restore - AssertCustomMetricsResourcesExist(namespace, customQueriesSampleFile, 1, 1) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, clusterWithMinioSampleFile, env) - - By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( - namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) - if err != nil { - return false, err - } - return connectionStatus, nil - }, 60).Should(BeTrue()) - }) - }) - - // We backup and restore a cluster, and verify some expected data to - // be there - It("backs up and restores a cluster using minio", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore.yaml.template" - ) - var backup *apiv1.Backup - restoredClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - backupName, err := env.GetResourceNameFromYAML(backupFile) - Expect(err).ToNot(HaveOccurred()) - // Create required test data - AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBOne, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBTwo, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBSecret, testTableName) - - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - latestTar := minioPath(clusterName, "data.tar") - - // There should be a backup resource and - By(fmt.Sprintf("backing up a cluster and verifying it exists on minio, backup path is %v", latestTar), func() { - backup = testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.LastSuccessfulBackup, err - }, 30).ShouldNot(BeEmpty()) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.LastFailedBackup, err - }, 30).Should(BeEmpty()) - }) - - By("verifying the backup is using the expected barman-cloud-backup options", func() { - Expect(backup).ToNot(BeNil()) - Expect(backup.Status.InstanceID).ToNot(BeNil()) - logEntries, err := testUtils.ParseJSONLogs(namespace, backup.Status.InstanceID.PodName, env) - Expect(err).ToNot(HaveOccurred()) - expectedBaseBackupOptions := []string{ - "--immediate-checkpoint", - "--min-chunk-size=5MB", - "--read-timeout=59", - } - result, err := testUtils.CheckOptionsForBarmanCommand( - logEntries, - barmanCloudBackupLogEntry, - backup.Name, - backup.Status.InstanceID.PodName, - expectedBaseBackupOptions, - ) - Expect(err).ToNot(HaveOccurred()) - Expect(result).To(BeTrue()) - }) - - By("executing a second backup and verifying the number of backups on minio", func() { - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - - // delete the first backup and create a second backup - backup := &apiv1.Backup{} - err := env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, - backup) - Expect(err).ToNot(HaveOccurred()) - err = env.Client.Delete(env.Ctx, backup) - Expect(err).ToNot(HaveOccurred()) - // create a second backup - testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) - latestTar = minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(2)) - }) - - By("verifying the backupName is properly set in the status of the backup", func() { - backup := &apiv1.Backup{} - err := env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, - backup) - Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - // We know that our current images always contain the latest barman version - if cluster.ShouldForceLegacyBackup() { - Expect(backup.Status.BackupName).To(BeEmpty()) - } else { - Expect(backup.Status.BackupName).To(HavePrefix("backup-")) - } - }) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) - - cluster, err := env.GetCluster(namespace, restoredClusterName) - Expect(err).ToNot(HaveOccurred()) - AssertMetricsData(namespace, targetDBOne, targetDBTwo, targetDBSecret, cluster) - - previous := 0 - latestGZ := filepath.Join("*", clusterName, "*", "*.history.gz") - By(fmt.Sprintf("checking the previous number of .history files in minio, history file name is %v", - latestGZ), func() { - previous, err = testUtils.CountFilesOnMinio(minioEnv, latestGZ) - Expect(err).ToNot(HaveOccurred()) - }) - - AssertSwitchover(namespace, clusterName, env) - - By("checking the number of .history after switchover", func() { - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestGZ) - }, 60).Should(BeNumerically(">", previous)) - }) - - By("deleting the restored cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // We backup and restore a cluster from a standby, and verify some expected data to - // be there - It("backs up and restore a cluster from standby", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterWithMinioStandbySampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-standby.yaml.template" - backupStandbyFile = fixturesDir + "/backup/minio/backup-minio-standby.yaml" - ) - - targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioStandbySampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create the cluster with custom serverName in the backup spec - AssertCreateCluster(namespace, targetClusterName, clusterWithMinioStandbySampleFile, env) - - // Create required test data - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) - - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: targetClusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) - latestTar := minioPath(targetClusterName, "data.tar") - - // There should be a backup resource and - By(fmt.Sprintf("backing up a cluster from standby and verifying it exists on minio, backup path is %v", - latestTar), func() { - testUtils.ExecuteBackup(namespace, backupStandbyFile, true, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, targetClusterName) - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, targetClusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - }) - - // We backup and restore a cluster from a standby, and verify some expected data to - // be there - It("backs up a cluster from standby with backup target defined in backup", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterWithMinioSampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-primary.yaml.template" - backupWithTargetFile = fixturesDir + "/backup/minio/backup-minio-override-target.yaml" - ) - - targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create the cluster with custom serverName in the backup spec - AssertCreateCluster(namespace, targetClusterName, clusterWithMinioSampleFile, env) - - // Create required test data - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) - - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: targetClusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) - latestTar := minioPath(targetClusterName, "data.tar") - - // There should be a backup resource and - By(fmt.Sprintf("backing up a cluster from standby (defined in backup file) and verifying it exists on minio,"+ - " backup path is %v", latestTar), func() { - testUtils.ExecuteBackup(namespace, backupWithTargetFile, true, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, targetClusterName) - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, targetClusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - By("deleting the cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // Test that the restore works if the source cluster has a custom - // backup.barmanObjectStore.serverName that is different than the cluster name - It("backs up and restores a cluster with custom backup serverName", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore-custom.yaml.template" - // clusterWithMinioCustomSampleFile has metadata.name != backup.barmanObjectStore.serverName - clusterWithMinioCustomSampleFile = fixturesDir + - "/backup/minio/cluster-with-backup-minio-custom-servername.yaml.template" - backupFileCustom = fixturesDir + "/backup/minio/backup-minio-custom-servername.yaml" - clusterServerName = "pg-backup-minio-Custom-Name" - ) - - customClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioCustomSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create the cluster with custom serverName in the backup spec - AssertCreateCluster(namespace, customClusterName, clusterWithMinioCustomSampleFile, env) - - // Create required test data - AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBOne, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBTwo, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBSecret, testTableName) - - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: customClusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, customClusterName, clusterServerName) - - // There should be a backup resource and - By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, backupFileCustom, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, customClusterName) - latestBaseTar := minioPath(clusterServerName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestBaseTar) - }, 60).Should(BeEquivalentTo(1), - fmt.Sprintf("verify the number of backup %v is equals to 1", latestBaseTar)) - // this is the second backup we take on the bucket - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, customClusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Restore backup in a new cluster - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) - - By("deleting the primary cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterWithMinioCustomSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - - By("deleting the restored cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available - It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_immediate/scheduled-backup-immediate-minio.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) - latestBaseTar := minioPath(clusterName, "data.tar") - // AssertScheduledBackupsImmediate creates at least two backups, we should find - // their base backups - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestBaseTar) - }, 60).Should(BeNumerically(">=", 2), - fmt.Sprintf("verify the number of backup %v is >= 2", latestBaseTar)) - }) - - It("backs up and restore a cluster with PITR MinIO", func() { - const ( - restoredClusterName = "restore-cluster-pitr-minio" - backupFilePITR = fixturesDir + "/backup/minio/backup-minio-pitr.yaml" - ) - - prepareClusterForPITROnMinio( - namespace, - clusterName, - backupFilePITR, - 3, - currentTimestamp, - ) - - cluster, err := testUtils.CreateClusterFromBackupUsingPITR( - namespace, - restoredClusterName, - backupFilePITR, - *currentTimestamp, - env, - ) - Expect(err).NotTo(HaveOccurred()) - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000003") - - By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) - }) - }) - - // We create a cluster and a scheduled backup, then it is patched to suspend its - // execution. We verify that the number of backups does not increase. - // We then patch it again back to its initial state and verify that - // the amount of backups keeps increasing again - It("verifies that scheduled backups can be suspended", func() { - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_suspend/scheduled-backup-suspend-minio.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - By("scheduling backups", func() { - AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeNumerically(">=", 2), - fmt.Sprintf("verify the number of backup %v is great than 2", latestTar)) - }) - - AssertSuspendScheduleBackups(namespace, scheduledBackupName) - }) - - It("verify tags in backed files", func() { - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - tags, err := testUtils.GetFileTagsOnMinio(minioEnv, minioPath(clusterName, "*1.gz")) - Expect(err).ToNot(HaveOccurred()) - Expect(tags.Tags).ToNot(BeEmpty()) - - currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - oldPrimary := currentPrimary.GetName() - // Force-delete the primary - quickDelete := &ctrlclient.DeleteOptions{ - GracePeriodSeconds: &quickDeletionPeriod, - } - err = env.DeletePod(namespace, currentPrimary.GetName(), quickDelete) - Expect(err).ToNot(HaveOccurred()) - - AssertNewPrimary(namespace, clusterName, oldPrimary) - - tags, err = testUtils.GetFileTagsOnMinio(minioEnv, minioPath(clusterName, "*.history.gz")) - Expect(err).ToNot(HaveOccurred()) - Expect(tags.Tags).ToNot(BeEmpty()) - }) - }) - - Context("using azure blobs as object storage with storage account access authentication", Ordered, func() { - // We must be careful here. All the clusters use the same remote storage - // and that means that we must use different cluster names otherwise - // we risk mixing WALs and backups - const azureBlobSampleFile = fixturesDir + "/backup/azure_blob/cluster-with-backup-azure-blob.yaml.template" - const clusterRestoreSampleFile = fixturesDir + "/backup/azure_blob/cluster-from-restore.yaml.template" - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azure-blob.yaml" - backupFile := fixturesDir + "/backup/azure_blob/backup-azure-blob.yaml" - var namespace, clusterName string - - BeforeAll(func() { - if !IsAKS() { - Skip("This test is only run on AKS clusters") - } - const namespacePrefix = "cluster-backup-azure-blob" - var err error - clusterName, err = env.GetResourceNameFromYAML(azureBlobSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // The Azure Blob Storage should have been created ad-hoc for the test. - // The credentials are retrieved from the environment variables, as we can't create - // a fixture for them - By("creating the Azure Blob Storage credentials", func() { - AssertStorageCredentialsAreCreated( - namespace, - "backup-storage-creds", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.StorageKey, - ) - }) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, azureBlobSampleFile, env) - }) - - // We backup and restore a cluster, and verify some expected data to - // be there - It("backs up and restore a cluster", func() { - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - By("uploading a backup", func() { - // We create a backup - testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - - // Verifying file called data.tar should be available on Azure blob storage - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Restore backup in a new cluster - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) - - By("deleting the restored cluster", func() { - err := DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available - It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) - - // Only one data.tar files should be present - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, - clusterName, "data.tar") - }, 30).Should(BeNumerically("==", 2)) - }) - - It("backs up and restore a cluster with PITR", func() { - restoredClusterName := "restore-cluster-azure-pitr" - - prepareClusterForPITROnAzureBlob( - namespace, - clusterName, - backupFile, - env.AzureConfiguration, - 2, - currentTimestamp, - ) - - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - - cluster, err := testUtils.CreateClusterFromBackupUsingPITR( - namespace, - restoredClusterName, - backupFile, - *currentTimestamp, - env, - ) - Expect(err).ToNot(HaveOccurred()) - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") - By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) - }) - }) - - // We create a cluster, create a scheduled backup, patch it to suspend its - // execution. We verify that the number of backups does not increase. - // We then patch it again back to its initial state and verify that - // the amount of backups keeps increasing again - It("verifies that scheduled backups can be suspended", func() { - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azure-blob.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - By("scheduling backups", func() { - AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 480) - - // AssertScheduledBackupsImmediate creates at least two backups, we should find - // their base backups - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, - clusterName, "data.tar") - }, 60).Should(BeNumerically(">=", 2)) - }) - AssertSuspendScheduleBackups(namespace, scheduledBackupName) - }) - }) - - Context("using Azurite blobs as object storage", Ordered, func() { - // This is a set of tests using an Azurite server deployed in the same - // namespace as the cluster. Since each cluster is installed in its - // own namespace, they can share the configuration file - const ( - clusterRestoreSampleFile = fixturesDir + "/backup/azurite/cluster-from-restore.yaml.template" - scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azurite.yaml" - scheduledBackupImmediateSampleFile = fixturesDir + - "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azurite.yaml" - backupFile = fixturesDir + "/backup/azurite/backup.yaml" - azuriteCaSecName = "azurite-ca-secret" - azuriteTLSSecName = "azurite-tls-secret" - ) - var namespace, clusterName string - - BeforeAll(func() { - if !(IsLocal() || IsGKE() || IsOpenshift()) { - Skip("This test is only executed on gke, openshift and local") - } - const namespacePrefix = "cluster-backup-azurite" - var err error - clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // Create and assert ca and tls certificate secrets on Azurite - By("creating ca and tls certificate secrets", func() { - err := testUtils.CreateCertificateSecretsOnAzurite(namespace, clusterName, - azuriteCaSecName, azuriteTLSSecName, env) - Expect(err).ToNot(HaveOccurred()) - }) - // Setup Azurite and az cli along with Postgresql cluster - prepareClusterBackupOnAzurite(namespace, clusterName, azuriteBlobSampleFile, backupFile, tableName) - }) - - It("restores a backed up cluster", func() { - // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreSampleFile, tableName) - }) - - // Create a scheduled backup with the 'immediate' option enabled. - // We expect the backup to be available - It("immediately starts a backup using ScheduledBackups immediate option", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupImmediateSampleFile) - Expect(err).ToNot(HaveOccurred()) - - AssertScheduledBackupsImmediate(namespace, scheduledBackupImmediateSampleFile, scheduledBackupName) - - // AssertScheduledBackupsImmediate creates at least two backups, we should find - // their base backups - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 30).Should(BeNumerically("==", 2)) - }) - - It("backs up and restore a cluster with PITR Azurite", func() { - const ( - restoredClusterName = "restore-cluster-pitr-azurite" - backupFilePITR = fixturesDir + "/backup/azurite/backup-pitr.yaml" - ) - - prepareClusterForPITROnAzurite(namespace, clusterName, backupFilePITR, currentTimestamp) - - cluster, err := testUtils.CreateClusterFromBackupUsingPITR( - namespace, - restoredClusterName, - backupFilePITR, - *currentTimestamp, - env, - ) - Expect(err).NotTo(HaveOccurred()) - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") - - By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) - }) - }) - - // We create a cluster, create a scheduled backup, patch it to suspend its - // execution. We verify that the number of backups does not increase. - // We then patch it again back to its initial state and verify that - // the amount of backups keeps increasing again - It("verifies that scheduled backups can be suspended", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - By("scheduling backups", func() { - AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 60).Should(BeNumerically(">=", 3)) - }) - - AssertSuspendScheduleBackups(namespace, scheduledBackupName) - }) - }) -}) - -var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.LabelBackupRestore), func() { - const ( - fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" - azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" - externalClusterFileMinio = fixturesBackupDir + "external-clusters-minio-03.yaml.template" - externalClusterFileMinioReplica = fixturesBackupDir + "external-clusters-minio-replica-04.yaml.template" - sourceTakeFirstBackupFileMinio = fixturesBackupDir + "backup-minio-02.yaml" - sourceTakeSecondBackupFileMinio = fixturesBackupDir + "backup-minio-03.yaml" - sourceTakeThirdBackupFileMinio = fixturesBackupDir + "backup-minio-04.yaml" - clusterSourceFileMinio = fixturesBackupDir + "source-cluster-minio-01.yaml.template" - sourceBackupFileAzure = fixturesBackupDir + "backup-azure-blob-02.yaml" - clusterSourceFileAzure = fixturesBackupDir + "source-cluster-azure-blob-01.yaml.template" - externalClusterFileAzure = fixturesBackupDir + "external-clusters-azure-blob-03.yaml.template" - sourceBackupFileAzurePITR = fixturesBackupDir + "backup-azure-blob-pitr.yaml" - externalClusterFileAzurite = fixturesBackupDir + "external-clusters-azurite-03.yaml.template" - backupFileAzurite = fixturesBackupDir + "backup-azurite-02.yaml" - tableName = "to_restore" - clusterSourceFileAzureSAS = fixturesBackupDir + "cluster-with-backup-azure-blob-sas.yaml.template" - clusterRestoreFileAzureSAS = fixturesBackupDir + "cluster-from-restore-sas.yaml.template" - sourceBackupFileAzureSAS = fixturesBackupDir + "backup-azure-blob-sas.yaml" - sourceBackupFileAzurePITRSAS = fixturesBackupDir + "backup-azure-blob-pitr-sas.yaml" - level = tests.High - minioCaSecName = "minio-server-ca-secret" - minioTLSSecName = "minio-server-tls-secret" - azuriteCaSecName = "azurite-ca-secret" - azuriteTLSSecName = "azurite-tls-secret" - ) - - currentTimestamp := new(string) - - BeforeEach(func() { - if testLevelEnv.Depth < int(level) { - Skip("Test depth is lower than the amount requested for this test") - } - }) - - // Restore cluster using a recovery object store, that is a backup of another cluster, - // created by Barman Cloud, and defined via the barmanObjectStore option in the externalClusters section - Context("using minio as object storage", Ordered, func() { - var namespace, clusterName string - - BeforeAll(func() { - if !IsLocal() { - Skip("This test is only executed on local") - } - const namespacePrefix = "recovery-barman-object-minio" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileMinio) - Expect(err).ToNot(HaveOccurred()) - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") - - By("create the certificates for MinIO", func() { - err := minioEnv.CreateCaSecret(env, namespace) - Expect(err).ToNot(HaveOccurred()) - }) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, clusterSourceFileMinio, env) - - By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( - namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) - if err != nil { - return false, err - } - return connectionStatus, nil - }, 60).Should(BeTrue()) - }) - }) - - It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - externalClusterName, err := env.GetResourceNameFromYAML(externalClusterFileMinio) - Expect(err).ToNot(HaveOccurred()) - - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - - // There should be a backup resource and - By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeFirstBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - - // TODO: this is to force a CHECKPOINT when we run the backup on standby. - // This should be better handled inside ExecuteBackup - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1), - fmt.Sprintf("verify the number of backup %v is equals to 1", latestTar)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterRestore(namespace, externalClusterFileMinio, tableName) - - // verify test data on restored external cluster - tableLocator = TableLocator{ - Namespace: namespace, - ClusterName: externalClusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertDataExpectedCount(env, tableLocator, 2) - - By("deleting the restored cluster", func() { - err = DeleteResourcesFromFile(namespace, externalClusterFileMinio) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ - " option in 'externalClusters' section", func() { - externalClusterRestoreName := "restore-external-cluster-pitr" - // We have already written 2 rows in test table 'to_restore' in above test now we will take current - // timestamp. It will use to restore cluster from source using PITR - - By("getting currentTimestamp", func() { - ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - By(fmt.Sprintf("writing 2 more entries in table '%v'", tableName), func() { - forward, conn, err := testUtils.ForwardPSQLConnection( - env, - namespace, - clusterName, - testUtils.AppDBName, - apiv1.ApplicationUserSecretSuffix, - ) - defer func() { - _ = conn.Close() - forward.Close() - }() - Expect(err).ToNot(HaveOccurred()) - // insert 2 more rows entries 3,4 on the "app" database - insertRecordIntoTable(tableName, 3, conn) - insertRecordIntoTable(tableName, 4, conn) - }) - By("creating second backup and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeSecondBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(2), - fmt.Sprintf("verify the number of backup %v is equals to 2", latestTar)) - }) - var restoredCluster *apiv1.Cluster - By("create a cluster from backup with PITR", func() { - var err error - restoredCluster, err = testUtils.CreateClusterFromExternalClusterBackupWithPITROnMinio( - namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) - Expect(err).NotTo(HaveOccurred()) - }) - AssertClusterWasRestoredWithPITRAndApplicationDB( - namespace, - externalClusterRestoreName, - tableName, - "00000002", - ) - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - - It("restore cluster from barman object using replica option in spec", func() { - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: "for_restore_repl", - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - - By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeThirdBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(3), - fmt.Sprintf("verify the number of backup %v is great than 3", latestTar)) - }) - - // Replicating a cluster with asynchronous replication - AssertClusterAsyncReplica( - namespace, - clusterSourceFileMinio, - externalClusterFileMinioReplica, - "for_restore_repl", - ) - }) - }) - - Context("using azure blobs as object storage", func() { - Context("storage account access authentication", Ordered, func() { - var namespace, clusterName string - BeforeAll(func() { - if !IsAKS() { - Skip("This test is only executed on AKS clusters") - } - const namespacePrefix = "recovery-barman-object-azure" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzure) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // The Azure Blob Storage should have been created ad-hoc for the test. - // The credentials are retrieved from the environment variables, as we can't create - // a fixture for them - By("creating the Azure Blob Storage credentials", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", - env.AzureConfiguration.StorageAccount, env.AzureConfiguration.StorageKey) - }) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, clusterSourceFileAzure, env) - }) - - It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - - By("backing up a cluster and verifying it exists on azure blob storage", func() { - // Create the backup - testUtils.ExecuteBackup(namespace, sourceBackupFileAzure, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - // Verifying file called data.tar should be available on Azure blob storage - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - }) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterRestore(namespace, externalClusterFileAzure, tableName) - }) - - It("restores a cluster with 'PITR' from barman object using "+ - "'barmanObjectStore' option in 'externalClusters' section", func() { - externalClusterName := "external-cluster-azure-pitr" - - prepareClusterForPITROnAzureBlob( - namespace, - clusterName, - sourceBackupFileAzurePITR, - env.AzureConfiguration, - 1, - currentTimestamp, - ) - - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( - namespace, - externalClusterName, - clusterName, - *currentTimestamp, - "backup-storage-creds", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.BlobContainer, - env) - Expect(err).ToNot(HaveOccurred()) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterWasRestoredWithPITRAndApplicationDB( - namespace, - externalClusterName, - tableName, - "00000002", - ) - - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - }) - - Context("storage account SAS Token authentication", Ordered, func() { - var namespace, clusterName string - BeforeAll(func() { - if !IsAKS() { - Skip("This test is only executed on AKS clusters") - } - const namespacePrefix = "cluster-backup-azure-blob-sas" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzureSAS) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // The Azure Blob Storage should have been created ad-hoc for the test, - // we get the credentials from the environment variables as we can't create - // a fixture for them - By("creating the Azure Blob Container SAS Token credentials", func() { - AssertCreateSASTokenCredentials(namespace, env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.StorageKey) - }) - - // Create the Cluster - AssertCreateCluster(namespace, clusterName, clusterSourceFileAzureSAS, env) - }) - - It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - // Create a WAL on the primary and check if it arrives in the - // Azure Blob Storage within a short time - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - - By("backing up a cluster and verifying it exists on azure blob storage", func() { - // We create a Backup - testUtils.ExecuteBackup(namespace, sourceBackupFileAzureSAS, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - // Verifying file called data.tar should be available on Azure blob storage - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - }) - - // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName) - }) - - It("restores a cluster with 'PITR' from barman object using "+ - "'barmanObjectStore' option in 'externalClusters' section", func() { - externalClusterName := "external-cluster-azure-pitr" - - prepareClusterForPITROnAzureBlob( - namespace, - clusterName, - sourceBackupFileAzurePITRSAS, - env.AzureConfiguration, - 1, - currentTimestamp, - ) - - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( - namespace, - externalClusterName, - clusterName, - *currentTimestamp, - "backup-storage-creds-sas", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.BlobContainer, - env) - Expect(err).ToNot(HaveOccurred()) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterWasRestoredWithPITRAndApplicationDB( - namespace, - externalClusterName, - tableName, - "00000002", - ) - - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - }) - }) - - Context("using Azurite blobs as object storage", Ordered, func() { - var namespace, clusterName string - BeforeAll(func() { - if IsAKS() { - Skip("This test is not run on AKS") - } - const namespacePrefix = "recovery-barman-object-azurite" - var err error - clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // Create and assert ca and tls certificate secrets on Azurite - By("creating ca and tls certificate secrets", func() { - err := testUtils.CreateCertificateSecretsOnAzurite( - namespace, - clusterName, - azuriteCaSecName, - azuriteTLSSecName, - env) - Expect(err).ToNot(HaveOccurred()) - }) - // Setup Azurite and az cli along with PostgreSQL cluster - prepareClusterBackupOnAzurite( - namespace, - clusterName, - azuriteBlobSampleFile, - backupFileAzurite, - tableName, - ) - }) - - It("restore cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, externalClusterFileAzurite, tableName) - }) - - It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ - " option in 'externalClusters' section", func() { - const ( - externalClusterRestoreName = "restore-external-cluster-pitr" - backupFileAzuritePITR = fixturesBackupDir + "backup-azurite-pitr.yaml" - ) - - prepareClusterForPITROnAzurite(namespace, clusterName, backupFileAzuritePITR, currentTimestamp) - - // Create a cluster from a particular time using external backup. - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzurite( - namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) - Expect(err).NotTo(HaveOccurred()) - - AssertClusterWasRestoredWithPITRAndApplicationDB( - namespace, - externalClusterRestoreName, - tableName, - "00000002", - ) - - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - }) -}) diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go index 45998be4ae..a22f4b98ad 100644 --- a/tests/e2e/replica_mode_cluster_test.go +++ b/tests/e2e/replica_mode_cluster_test.go @@ -36,6 +36,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -255,8 +256,16 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { Expect(err).ToNot(HaveOccurred()) replicaNamespace, err := env.CreateUniqueTestNamespace(replicaNamespacePrefix) Expect(err).ToNot(HaveOccurred()) + By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(replicaNamespace, "backup-storage-creds", "minio", "minio123") + _, err = testUtils.CreateObjectStorageSecret( + replicaNamespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { @@ -313,7 +322,14 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { Expect(err).ToNot(HaveOccurred()) By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { @@ -522,11 +538,11 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f DeferCleanup(func() error { // Since we use multiple times the same cluster names for the same minio instance, we need to clean it up // between tests - _, err = testUtils.CleanFilesOnMinio(minioEnv, path.Join("minio", "cluster-backups", clusterAName)) + _, err = minio.CleanFiles(minioEnv, path.Join("minio", "cluster-backups", clusterAName)) if err != nil { return err } - _, err = testUtils.CleanFilesOnMinio(minioEnv, path.Join("minio", "cluster-backups", clusterBName)) + _, err = minio.CleanFiles(minioEnv, path.Join("minio", "cluster-backups", clusterBName)) if err != nil { return err } @@ -537,7 +553,14 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f DeferCleanup(func() { close(stopLoad) }) By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go index c70c87180c..fa637fffce 100644 --- a/tests/e2e/suite_test.go +++ b/tests/e2e/suite_test.go @@ -36,6 +36,7 @@ import ( cnpgUtils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/sternmultitailer" . "github.com/onsi/ginkgo/v2" @@ -57,7 +58,7 @@ var ( operatorWasRestarted bool quickDeletionPeriod = int64(1) testTimeouts map[utils.Timeout]int - minioEnv = &utils.MinioEnv{ + minioEnv = &minio.Env{ Namespace: "minio", ServiceName: "minio-service.minio", CaSecretName: "minio-server-ca-secret", @@ -98,7 +99,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { Expect(err).ToNot(HaveOccurred()) }) minioEnv.Timeout = uint(testTimeouts[utils.MinioInstallation]) - minioClient, err := utils.MinioDeploy(minioEnv, env) + minioClient, err := minio.Deploy(minioEnv, env) Expect(err).ToNot(HaveOccurred()) caSecret := minioEnv.CaPair.GenerateCASecret(minioEnv.Namespace, minioEnv.CaSecretName) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index 72987be873..cad9524df3 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -39,6 +39,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -109,7 +110,16 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) // We create the MinIO credentials required to login into the system - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + By("creating the credentials for minio", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) By("create the certificates for MinIO", func() { err := minioEnv.CreateCaSecret(env, namespace) @@ -168,8 +178,9 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("creating backup %s and verifying backup is ready", backupName), func() { - testUtils.ExecuteBackup(namespace, clusterBackupManifest, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) + testUtils.ExecuteBackup(namespace, clusterBackupManifest, false, testTimeouts[testUtils.BackupIsReady], + env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) }) By("verifying the number of tars in minio", func() { @@ -270,7 +281,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, // This should be better handled inside ExecuteBackup AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - AssertBackupConditionInClusterStatus(namespace, clusterName) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) }) By("verifying the number of tars in the latest base backup", func() { @@ -368,7 +379,16 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) // We create the required credentials for MinIO - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + By("creating the credentials for minio", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) By("create the certificates for MinIO", func() { err := minioEnv.CreateCaSecret(env, namespace) @@ -396,7 +416,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, testTimeouts[testUtils.VolumeSnapshotIsReady], env, ) - AssertBackupConditionInClusterStatus(namespace, clusterName) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) }) By("checking that volumeSnapshots are properly labeled", func() { @@ -1214,7 +1234,7 @@ func latestBaseBackupContainsExpectedTars( // we list the backup.info files to get the listing of base backups // directories in minio backupInfoFiles := filepath.Join("*", clusterName, "base", "*", "*.info") - ls, err := testUtils.ListFilesOnMinio(minioEnv, backupInfoFiles) + ls, err := minio.ListFiles(minioEnv, backupInfoFiles) g.Expect(err).ShouldNot(HaveOccurred()) frags := strings.Split(ls, "\n") slices.Sort(frags) @@ -1222,10 +1242,10 @@ func latestBaseBackupContainsExpectedTars( g.Expect(frags).To(HaveLen(numBackups), report) latestBaseBackup := filepath.Dir(frags[numBackups-1]) tarsInLastBackup := strings.TrimPrefix(filepath.Join(latestBaseBackup, "*.tar"), "minio/") - listing, err := testUtils.ListFilesOnMinio(minioEnv, tarsInLastBackup) + listing, err := minio.ListFiles(minioEnv, tarsInLastBackup) g.Expect(err).ShouldNot(HaveOccurred()) report += fmt.Sprintf("tar listing:\n%s\n", listing) - numTars, err := testUtils.CountFilesOnMinio(minioEnv, tarsInLastBackup) + numTars, err := minio.CountFiles(minioEnv, tarsInLastBackup) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(numTars).To(Equal(expectedTars), report) }, 120).Should(Succeed()) diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go index c389bf7a29..f698d29c6f 100644 --- a/tests/e2e/upgrade_test.go +++ b/tests/e2e/upgrade_test.go @@ -37,6 +37,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -145,11 +146,11 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // but a single scheduled backups during the check AssertScheduledBackupsAreScheduled := func(serverName string) { By("verifying scheduled backups are still happening", func() { - latestTar := minioPath(serverName, "data.tar.gz") - currentBackups, err := testsUtils.CountFilesOnMinio(minioEnv, latestTar) + latestTar := minio.GetFilePath(serverName, "data.tar.gz") + currentBackups, err := minio.CountFiles(minioEnv, latestTar) Expect(err).ToNot(HaveOccurred()) Eventually(func() (int, error) { - return testsUtils.CountFilesOnMinio(minioEnv, latestTar) + return minio.CountFiles(minioEnv, latestTar) }, 120).Should(BeNumerically(">", currentBackups)) }) } @@ -354,7 +355,9 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // assertExpectedMatchingPodUIDs checks that the UID of each pod of a Cluster matches with a given list of UIDs. // expectedMatches defines how many times, when comparing the elements of the 2 lists, you are expected to have // common values - assertExpectedMatchingPodUIDs := func(namespace, clusterName string, podUIDs []types.UID, expectedMatches int) error { + assertExpectedMatchingPodUIDs := func( + namespace, clusterName string, podUIDs []types.UID, expectedMatches int, + ) error { backoffCheckingPodRestarts := wait.Backoff{ Duration: 10 * time.Second, Steps: 30, @@ -397,11 +400,11 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O return fmt.Errorf("could not cleanup, failed to delete operator namespace: %v", err) } - if _, err := testsUtils.CleanFilesOnMinio(minioEnv, minioPath1); err != nil { + if _, err := minio.CleanFiles(minioEnv, minioPath1); err != nil { return fmt.Errorf("encountered an error while cleaning up minio: %v", err) } - if _, err := testsUtils.CleanFilesOnMinio(minioEnv, minioPath2); err != nil { + if _, err := minio.CleanFiles(minioEnv, minioPath2); err != nil { return fmt.Errorf("encountered an error while cleaning up minio: %v", err) } @@ -478,7 +481,14 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O CreateResourceFromFile(upgradeNamespace, pgSecrets) }) By("creating the cloud storage credentials", func() { - AssertStorageCredentialsAreCreated(upgradeNamespace, "aws-creds", "minio", "minio123") + _, err := testsUtils.CreateObjectStorageSecret( + upgradeNamespace, + "aws-creds", + "minio", + "minio123", + env, + ) + Expect(err).NotTo(HaveOccurred()) }) By("create the certificates for MinIO", func() { err := minioEnv.CreateCaSecret(env, upgradeNamespace) @@ -656,7 +666,8 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O By("restoring the backup taken from the first Cluster in a new cluster", func() { restoredClusterName := "cluster-restore" CreateResourceFromFile(upgradeNamespace, restoreFile) - AssertClusterIsReady(upgradeNamespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(upgradeNamespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], + env) // Test data should be present on restored primary primary := restoredClusterName + "-1" diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go index ff0b016ec0..74f32e8bca 100644 --- a/tests/e2e/volume_snapshot_test.go +++ b/tests/e2e/volume_snapshot_test.go @@ -32,6 +32,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -167,7 +168,13 @@ var _ = Describe("Verify Volume Snapshot", Expect(err).ToNot(HaveOccurred()) }) - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env) + Expect(err).ToNot(HaveOccurred()) }) It("correctly executes PITR with a cold snapshot", func() { @@ -190,7 +197,7 @@ var _ = Describe("Verify Volume Snapshot", primaryPod, err := env.GetClusterPrimary(namespace, clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) if err != nil { return false, err @@ -405,7 +412,7 @@ var _ = Describe("Verify Volume Snapshot", "Backup should be completed correctly, error message is '%s'", backup.Status.Error) }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - AssertBackupConditionInClusterStatus(namespace, clusterToBackupName) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -473,7 +480,7 @@ var _ = Describe("Verify Volume Snapshot", "Backup should be completed correctly, error message is '%s'", backup.Status.Error) }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - AssertBackupConditionInClusterStatus(namespace, clusterToBackupName) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -540,7 +547,7 @@ var _ = Describe("Verify Volume Snapshot", "Backup should be completed correctly, error message is '%s'", backup.Status.Error) }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - AssertBackupConditionInClusterStatus(namespace, clusterToBackupName) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -602,7 +609,16 @@ var _ = Describe("Verify Volume Snapshot", Expect(err).ToNot(HaveOccurred()) }) - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + By("creating the credentials for minio", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) By("creating the cluster to snapshot", func() { AssertCreateCluster(namespace, clusterToSnapshotName, clusterToSnapshot, env) @@ -612,7 +628,7 @@ var _ = Describe("Verify Volume Snapshot", primaryPod, err := env.GetClusterPrimary(namespace, clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) if err != nil { return false, err diff --git a/tests/e2e/wal_restore_parallel_test.go b/tests/e2e/wal_restore_parallel_test.go index 64c371fc82..03906b7152 100644 --- a/tests/e2e/wal_restore_parallel_test.go +++ b/tests/e2e/wal_restore_parallel_test.go @@ -23,6 +23,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -69,7 +70,14 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun Expect(err).ToNot(HaveOccurred()) By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { @@ -104,10 +112,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun Expect(err).ToNot(HaveOccurred()) primary := pod.GetName() latestWAL = switchWalAndGetLatestArchive(namespace, primary) - latestWALPath := minioPath(clusterName, latestWAL+".gz") + latestWALPath := minio.GetFilePath(clusterName, latestWAL+".gz") Eventually(func() (int, error) { // WALs are compressed with gzip in the fixture - return testUtils.CountFilesOnMinio(minioEnv, latestWALPath) + return minio.CountFiles(minioEnv, latestWALPath) }, RetryTimeout).Should(BeEquivalentTo(1), fmt.Sprintf("verify the existence of WAL %v in minio", latestWALPath)) }) @@ -118,15 +126,20 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun walFile3 = "0000000100000000000000F3" walFile4 = "0000000100000000000000F4" walFile5 = "0000000100000000000000F5" - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile1)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile1)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile2)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile2)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile3)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile3)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile4)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile4)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile5)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile5)). ShouldNot(HaveOccurred()) }) @@ -167,7 +180,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeTrue(), "#3 wal is in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeFalse(), "end-of-wal-stream flag is unset") @@ -193,7 +209,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeTrue(), "#3 wal is in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeFalse(), "end-of-wal-stream flag is unset") @@ -241,7 +260,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeTrue(), "#5 wal is in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeTrue(), "end-of-wal-stream flag is set for #6 wal is not present") @@ -250,7 +272,8 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // Generate a new wal file; the archive also contains WAL #6. By("forging a new wal file, the #6 wal", func() { walFile6 = "0000000100000000000000F6" - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile6)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile6)). ShouldNot(HaveOccurred()) }) @@ -273,7 +296,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeFalse(), "no wal files exist in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeTrue(), "end-of-wal-stream flag is still there") @@ -321,7 +347,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeFalse(), "no wals in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeTrue(), "end-of-wal-stream flag is set for #7 and #8 wal is not present") diff --git a/tests/utils/azurite.go b/tests/utils/azurite.go index c7732104b1..7ea3ed7903 100644 --- a/tests/utils/azurite.go +++ b/tests/utils/azurite.go @@ -17,7 +17,11 @@ limitations under the License. package utils import ( + "encoding/json" + "fmt" "os" + "strings" + "time" apiv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -26,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" + v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" ) @@ -119,7 +124,7 @@ func InstallAzCli(namespace string, env *TestingEnvironment) error { return nil } -// getAzuriteClientPod get the cli client pod +// getAzuriteClientPod get the cli client pod/home/zeus/src/cloudnative-pg/pkg func getAzuriteClientPod(namespace string) corev1.Pod { seccompProfile := &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, @@ -346,3 +351,295 @@ func getStorageCredentials(namespace string) corev1.Secret { } return azuriteStorageSecrets } + +// CreateClusterFromExternalClusterBackupWithPITROnAzure creates a cluster on Azure, starting from an external cluster +// backup with PITR +func CreateClusterFromExternalClusterBackupWithPITROnAzure( + namespace, + externalClusterName, + sourceClusterName, + targetTime, + storageCredentialsSecretName, + azStorageAccount, + azBlobContainer string, + env *TestingEnvironment, +) (*v1.Cluster, error) { + storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") + destinationPath := fmt.Sprintf("https://%v.blob.core.windows.net/%v/", + azStorageAccount, azBlobContainer) + + restoreCluster := &v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: externalClusterName, + Namespace: namespace, + }, + Spec: v1.ClusterSpec{ + Instances: 3, + + StorageConfiguration: v1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClassName, + }, + + PostgresConfiguration: v1.PostgresConfiguration{ + Parameters: map[string]string{ + "log_checkpoints": "on", + "log_lock_waits": "on", + "log_min_duration_statement": "1000", + "log_statement": "ddl", + "log_temp_files": "1024", + "log_autovacuum_min_duration": "1s", + "log_replication_commands": "on", + }, + }, + + Bootstrap: &v1.BootstrapConfiguration{ + Recovery: &v1.BootstrapRecovery{ + Source: sourceClusterName, + RecoveryTarget: &v1.RecoveryTarget{ + TargetTime: targetTime, + }, + }, + }, + + ExternalClusters: []v1.ExternalCluster{ + { + Name: sourceClusterName, + BarmanObjectStore: &v1.BarmanObjectStoreConfiguration{ + DestinationPath: destinationPath, + BarmanCredentials: v1.BarmanCredentials{ + Azure: &v1.AzureCredentials{ + StorageAccount: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: storageCredentialsSecretName, + }, + Key: "ID", + }, + StorageKey: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: storageCredentialsSecretName, + }, + Key: "KEY", + }, + }, + }, + }, + }, + }, + }, + } + obj, err := CreateObject(env, restoreCluster) + if err != nil { + return nil, err + } + cluster, ok := obj.(*v1.Cluster) + if !ok { + return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) + } + return cluster, nil +} + +// CreateClusterFromExternalClusterBackupWithPITROnAzurite creates a cluster with Azurite, starting from an external +// cluster backup with PITR +func CreateClusterFromExternalClusterBackupWithPITROnAzurite( + namespace, + externalClusterName, + sourceClusterName, + targetTime string, + env *TestingEnvironment, +) (*v1.Cluster, error) { + storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") + DestinationPath := fmt.Sprintf("https://azurite:10000/storageaccountname/%v", sourceClusterName) + + restoreCluster := &v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: externalClusterName, + Namespace: namespace, + }, + Spec: v1.ClusterSpec{ + Instances: 3, + + StorageConfiguration: v1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClassName, + }, + + PostgresConfiguration: v1.PostgresConfiguration{ + Parameters: map[string]string{ + "log_checkpoints": "on", + "log_lock_waits": "on", + "log_min_duration_statement": "1000", + "log_statement": "ddl", + "log_temp_files": "1024", + "log_autovacuum_min_duration": "1s", + "log_replication_commands": "on", + }, + }, + + Bootstrap: &v1.BootstrapConfiguration{ + Recovery: &v1.BootstrapRecovery{ + Source: sourceClusterName, + RecoveryTarget: &v1.RecoveryTarget{ + TargetTime: targetTime, + }, + }, + }, + + ExternalClusters: []v1.ExternalCluster{ + { + Name: sourceClusterName, + BarmanObjectStore: &v1.BarmanObjectStoreConfiguration{ + DestinationPath: DestinationPath, + EndpointCA: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "azurite-ca-secret", + }, + Key: "ca.crt", + }, + BarmanCredentials: v1.BarmanCredentials{ + Azure: &v1.AzureCredentials{ + ConnectionString: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "azurite", + }, + Key: "AZURE_CONNECTION_STRING", + }, + }, + }, + }, + }, + }, + }, + } + obj, err := CreateObject(env, restoreCluster) + if err != nil { + return nil, err + } + cluster, ok := obj.(*v1.Cluster) + if !ok { + return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) + } + return cluster, nil +} + +// ComposeAzBlobListAzuriteCmd builds the Azure storage blob list command for Azurite +func ComposeAzBlobListAzuriteCmd(clusterName, path string) string { + return fmt.Sprintf("az storage blob list --container-name %v --query \"[?contains(@.name, \\`%v\\`)].name\" "+ + "--connection-string $AZURE_CONNECTION_STRING", + clusterName, path) +} + +// ComposeAzBlobListCmd builds the Azure storage blob list command +func ComposeAzBlobListCmd( + configuration AzureConfiguration, + clusterName, + path string, +) string { + return fmt.Sprintf("az storage blob list --account-name %v "+ + "--account-key %v "+ + "--container-name %v "+ + "--prefix %v/ "+ + "--query \"[?contains(@.name, \\`%v\\`)].name\"", + configuration.StorageAccount, configuration.StorageKey, configuration.BlobContainer, clusterName, path) +} + +// CountFilesOnAzureBlobStorage counts files on Azure Blob storage +func CountFilesOnAzureBlobStorage( + configuration AzureConfiguration, + clusterName, + path string, +) (int, error) { + azBlobListCmd := ComposeAzBlobListCmd(configuration, clusterName, path) + out, _, err := RunUnchecked(azBlobListCmd) + if err != nil { + return -1, err + } + var arr []string + err = json.Unmarshal([]byte(out), &arr) + return len(arr), err +} + +// CountFilesOnAzuriteBlobStorage counts files on Azure Blob storage. using Azurite +func CountFilesOnAzuriteBlobStorage( + namespace, + clusterName, + path string, +) (int, error) { + azBlobListCmd := ComposeAzBlobListAzuriteCmd(clusterName, path) + out, _, err := RunUnchecked(fmt.Sprintf("kubectl exec -n %v az-cli "+ + "-- /bin/bash -c '%v'", namespace, azBlobListCmd)) + if err != nil { + return -1, err + } + var arr []string + err = json.Unmarshal([]byte(out), &arr) + return len(arr), err +} + +// verifySASTokenWriteActivity returns true if the given token has RW permissions, +// otherwise it returns false +func verifySASTokenWriteActivity(containerName string, id string, key string) bool { + _, _, err := RunUnchecked(fmt.Sprintf("az storage container create "+ + "--name %v --account-name %v "+ + "--sas-token %v", containerName, id, key)) + + return err == nil +} + +// CreateSASTokenCredentials generates Secrets for the Azure Blob Storage +func CreateSASTokenCredentials(namespace string, id string, key string, env *TestingEnvironment) error { + // Adding 24 hours to the current time + date := time.Now().UTC().Add(time.Hour * 24) + // Creating date time format for az command + expiringDate := fmt.Sprintf("%v"+"-"+"%d"+"-"+"%v"+"T"+"%v"+":"+"%v"+"Z", + date.Year(), + date.Month(), + date.Day(), + date.Hour(), + date.Minute()) + + out, _, err := Run(fmt.Sprintf( + // SAS Token at Blob Container level does not currently work in Barman Cloud + // https://github.com/EnterpriseDB/barman/issues/388 + // we will use SAS Token at Storage Account level + // ( "az storage container generate-sas --account-name %v "+ + // "--name %v "+ + // "--https-only --permissions racwdl --auth-mode key --only-show-errors "+ + // "--expiry \"$(date -u -d \"+4 hours\" '+%%Y-%%m-%%dT%%H:%%MZ')\"", + // id, blobContainerName ) + "az storage account generate-sas --account-name %v "+ + "--https-only --permissions cdlruwap --account-key %v "+ + "--resource-types co --services b --expiry %v -o tsv", + id, key, expiringDate)) + if err != nil { + return err + } + SASTokenRW := strings.TrimRight(out, "\n") + + out, _, err = Run(fmt.Sprintf( + "az storage account generate-sas --account-name %v "+ + "--https-only --permissions lr --account-key %v "+ + "--resource-types co --services b --expiry %v -o tsv", + id, key, expiringDate)) + if err != nil { + return err + } + + SASTokenRO := strings.TrimRight(out, "\n") + isReadWrite := verifySASTokenWriteActivity("restore-cluster-sas", id, SASTokenRO) + if isReadWrite { + return fmt.Errorf("expected token to be ready only") + } + + _, err = CreateObjectStorageSecret(namespace, "backup-storage-creds-sas", id, SASTokenRW, env) + if err != nil { + return err + } + + _, err = CreateObjectStorageSecret(namespace, "restore-storage-creds-sas", id, SASTokenRO, env) + if err != nil { + return err + } + + return nil +} diff --git a/tests/utils/backup.go b/tests/utils/backup.go index d88c0b5504..e07f20d2a5 100644 --- a/tests/utils/backup.go +++ b/tests/utils/backup.go @@ -17,7 +17,6 @@ limitations under the License. package utils import ( - "encoding/json" "fmt" "os" @@ -27,7 +26,8 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - . "github.com/onsi/gomega" // nolint + . "github.com/onsi/ginkgo/v2" // nolint + . "github.com/onsi/gomega" // nolint ) // ExecuteBackup performs a backup and checks the backup status @@ -159,93 +159,6 @@ func CreateClusterFromBackupUsingPITR( return cluster, nil } -// CreateClusterFromExternalClusterBackupWithPITROnAzure creates a cluster on Azure, starting from an external cluster -// backup with PITR -func CreateClusterFromExternalClusterBackupWithPITROnAzure( - namespace, - externalClusterName, - sourceClusterName, - targetTime, - storageCredentialsSecretName, - azStorageAccount, - azBlobContainer string, - env *TestingEnvironment, -) (*apiv1.Cluster, error) { - storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - destinationPath := fmt.Sprintf("https://%v.blob.core.windows.net/%v/", - azStorageAccount, azBlobContainer) - - restoreCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: externalClusterName, - Namespace: namespace, - }, - Spec: apiv1.ClusterSpec{ - Instances: 3, - - StorageConfiguration: apiv1.StorageConfiguration{ - Size: "1Gi", - StorageClass: &storageClassName, - }, - - PostgresConfiguration: apiv1.PostgresConfiguration{ - Parameters: map[string]string{ - "log_checkpoints": "on", - "log_lock_waits": "on", - "log_min_duration_statement": "1000", - "log_statement": "ddl", - "log_temp_files": "1024", - "log_autovacuum_min_duration": "1s", - "log_replication_commands": "on", - }, - }, - - Bootstrap: &apiv1.BootstrapConfiguration{ - Recovery: &apiv1.BootstrapRecovery{ - Source: sourceClusterName, - RecoveryTarget: &apiv1.RecoveryTarget{ - TargetTime: targetTime, - }, - }, - }, - - ExternalClusters: []apiv1.ExternalCluster{ - { - Name: sourceClusterName, - BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ - DestinationPath: destinationPath, - BarmanCredentials: apiv1.BarmanCredentials{ - Azure: &apiv1.AzureCredentials{ - StorageAccount: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: storageCredentialsSecretName, - }, - Key: "ID", - }, - StorageKey: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: storageCredentialsSecretName, - }, - Key: "KEY", - }, - }, - }, - }, - }, - }, - }, - } - obj, err := CreateObject(env, restoreCluster) - if err != nil { - return nil, err - } - cluster, ok := obj.(*apiv1.Cluster) - if !ok { - return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) - } - return cluster, nil -} - // CreateClusterFromExternalClusterBackupWithPITROnMinio creates a cluster on Minio, starting from an external cluster // backup with PITR func CreateClusterFromExternalClusterBackupWithPITROnMinio( @@ -335,143 +248,6 @@ func CreateClusterFromExternalClusterBackupWithPITROnMinio( return cluster, nil } -// CreateClusterFromExternalClusterBackupWithPITROnAzurite creates a cluster with Azurite, starting from an external -// cluster backup with PITR -func CreateClusterFromExternalClusterBackupWithPITROnAzurite( - namespace, - externalClusterName, - sourceClusterName, - targetTime string, - env *TestingEnvironment, -) (*apiv1.Cluster, error) { - storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - DestinationPath := fmt.Sprintf("https://azurite:10000/storageaccountname/%v", sourceClusterName) - - restoreCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: externalClusterName, - Namespace: namespace, - }, - Spec: apiv1.ClusterSpec{ - Instances: 3, - - StorageConfiguration: apiv1.StorageConfiguration{ - Size: "1Gi", - StorageClass: &storageClassName, - }, - - PostgresConfiguration: apiv1.PostgresConfiguration{ - Parameters: map[string]string{ - "log_checkpoints": "on", - "log_lock_waits": "on", - "log_min_duration_statement": "1000", - "log_statement": "ddl", - "log_temp_files": "1024", - "log_autovacuum_min_duration": "1s", - "log_replication_commands": "on", - }, - }, - - Bootstrap: &apiv1.BootstrapConfiguration{ - Recovery: &apiv1.BootstrapRecovery{ - Source: sourceClusterName, - RecoveryTarget: &apiv1.RecoveryTarget{ - TargetTime: targetTime, - }, - }, - }, - - ExternalClusters: []apiv1.ExternalCluster{ - { - Name: sourceClusterName, - BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ - DestinationPath: DestinationPath, - EndpointCA: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: "azurite-ca-secret", - }, - Key: "ca.crt", - }, - BarmanCredentials: apiv1.BarmanCredentials{ - Azure: &apiv1.AzureCredentials{ - ConnectionString: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: "azurite", - }, - Key: "AZURE_CONNECTION_STRING", - }, - }, - }, - }, - }, - }, - }, - } - obj, err := CreateObject(env, restoreCluster) - if err != nil { - return nil, err - } - cluster, ok := obj.(*apiv1.Cluster) - if !ok { - return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) - } - return cluster, nil -} - -// ComposeAzBlobListAzuriteCmd builds the Azure storage blob list command for Azurite -func ComposeAzBlobListAzuriteCmd(clusterName, path string) string { - return fmt.Sprintf("az storage blob list --container-name %v --query \"[?contains(@.name, \\`%v\\`)].name\" "+ - "--connection-string $AZURE_CONNECTION_STRING", - clusterName, path) -} - -// ComposeAzBlobListCmd builds the Azure storage blob list command -func ComposeAzBlobListCmd( - configuration AzureConfiguration, - clusterName, - path string, -) string { - return fmt.Sprintf("az storage blob list --account-name %v "+ - "--account-key %v "+ - "--container-name %v "+ - "--prefix %v/ "+ - "--query \"[?contains(@.name, \\`%v\\`)].name\"", - configuration.StorageAccount, configuration.StorageKey, configuration.BlobContainer, clusterName, path) -} - -// CountFilesOnAzureBlobStorage counts files on Azure Blob storage -func CountFilesOnAzureBlobStorage( - configuration AzureConfiguration, - clusterName, - path string, -) (int, error) { - azBlobListCmd := ComposeAzBlobListCmd(configuration, clusterName, path) - out, _, err := RunUnchecked(azBlobListCmd) - if err != nil { - return -1, err - } - var arr []string - err = json.Unmarshal([]byte(out), &arr) - return len(arr), err -} - -// CountFilesOnAzuriteBlobStorage counts files on Azure Blob storage. using Azurite -func CountFilesOnAzuriteBlobStorage( - namespace, - clusterName, - path string, -) (int, error) { - azBlobListCmd := ComposeAzBlobListAzuriteCmd(clusterName, path) - out, _, err := RunUnchecked(fmt.Sprintf("kubectl exec -n %v az-cli "+ - "-- /bin/bash -c '%v'", namespace, azBlobListCmd)) - if err != nil { - return -1, err - } - var arr []string - err = json.Unmarshal([]byte(out), &arr) - return len(arr), err -} - // GetConditionsInClusterStatus get conditions values as given type from cluster object status func GetConditionsInClusterStatus( namespace, @@ -593,3 +369,18 @@ func (env TestingEnvironment) GetVolumeSnapshot( } return volumeSnapshot, nil } + +// AssertBackupConditionInClusterStatus check that the backup condition in the Cluster's Status +// eventually returns true +func AssertBackupConditionInClusterStatus(env *TestingEnvironment, namespace, clusterName string) { + By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() { + Eventually(func() (string, error) { + getBackupCondition, err := GetConditionsInClusterStatus( + namespace, clusterName, env, apiv1.ConditionBackup) + if err != nil { + return "", err + } + return string(getBackupCondition.Status), nil + }, 300, 5).Should(BeEquivalentTo("True")) + }) +} diff --git a/tests/utils/minio.go b/tests/utils/minio/minio.go similarity index 76% rename from tests/utils/minio.go rename to tests/utils/minio/minio.go index d69821c6f3..a5f878ed85 100644 --- a/tests/utils/minio.go +++ b/tests/utils/minio/minio.go @@ -14,12 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package minio contains all the require functions to setup a MinIO deployment and +// query this MinIO deployment using the MinIO API +package minio import ( "encoding/json" "fmt" "os" + "path/filepath" "strconv" "strings" "time" @@ -36,6 +39,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils" ) const ( @@ -43,9 +47,9 @@ const ( minioClientImage = "minio/mc:RELEASE.2022-06-11T21-10-36Z" ) -// MinioEnv contains all the information related or required by MinIO deployment and +// Env contains all the information related or required by MinIO deployment and // used by the functions on every test -type MinioEnv struct { +type Env struct { Client *corev1.Pod CaPair *certs.KeyPair CaSecretObj corev1.Secret @@ -56,9 +60,9 @@ type MinioEnv struct { Timeout uint } -// MinioSetup contains the resources needed for a working minio server deployment: +// Setup contains the resources needed for a working minio server deployment: // a PersistentVolumeClaim, a Deployment and a Service -type MinioSetup struct { +type Setup struct { PersistentVolumeClaim corev1.PersistentVolumeClaim Deployment appsv1.Deployment Service corev1.Service @@ -69,10 +73,10 @@ type TagSet struct { Tags map[string]string `json:"tagset"` } -// InstallMinio installs minio in a given namespace -func InstallMinio( - env *TestingEnvironment, - minioSetup MinioSetup, +// installMinio installs minio in a given namespace +func installMinio( + env *utils.TestingEnvironment, + minioSetup Setup, timeoutSeconds uint, ) error { if err := env.Client.Create(env.Ctx, &minioSetup.PersistentVolumeClaim); err != nil { @@ -110,15 +114,15 @@ func InstallMinio( return err } -// MinioDefaultSetup returns the definition for the default minio setup -func MinioDefaultSetup(namespace string) (MinioSetup, error) { - pvc, err := MinioDefaultPVC(namespace) +// defaultSetup returns the definition for the default minio setup +func defaultSetup(namespace string) (Setup, error) { + pvc, err := defaultPVC(namespace) if err != nil { - return MinioSetup{}, err + return Setup{}, err } - deployment := MinioDefaultDeployment(namespace, pvc) - service := MinioDefaultSVC(namespace) - setup := MinioSetup{ + deployment := defaultDeployment(namespace, pvc) + service := defaultSVC(namespace) + setup := Setup{ PersistentVolumeClaim: pvc, Deployment: deployment, Service: service, @@ -126,8 +130,8 @@ func MinioDefaultSetup(namespace string) (MinioSetup, error) { return setup, nil } -// MinioDefaultDeployment returns a default Deployment for minio -func MinioDefaultDeployment(namespace string, minioPVC corev1.PersistentVolumeClaim) appsv1.Deployment { +// defaultDeployment returns a default Deployment for minio +func defaultDeployment(namespace string, minioPVC corev1.PersistentVolumeClaim) appsv1.Deployment { seccompProfile := &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, } @@ -222,8 +226,8 @@ func MinioDefaultDeployment(namespace string, minioPVC corev1.PersistentVolumeCl return minioDeployment } -// MinioDefaultSVC returns a default Service for minio -func MinioDefaultSVC(namespace string) corev1.Service { +// defaultSVC returns a default Service for minio +func defaultSVC(namespace string) corev1.Service { minioService := corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "minio-service", @@ -245,8 +249,8 @@ func MinioDefaultSVC(namespace string) corev1.Service { return minioService } -// MinioDefaultPVC returns a default PVC for minio -func MinioDefaultPVC(namespace string) (corev1.PersistentVolumeClaim, error) { +// defaultPVC returns a default PVC for minio +func defaultPVC(namespace string) (corev1.PersistentVolumeClaim, error) { const claimName = "minio-pv-claim" storageClass, ok := os.LookupEnv("E2E_DEFAULT_STORAGE_CLASS") if !ok { @@ -273,11 +277,11 @@ func MinioDefaultPVC(namespace string) (corev1.PersistentVolumeClaim, error) { return minioPVC, nil } -// MinioSSLSetup returns the definition for a minio setup using SSL -func MinioSSLSetup(namespace string) (MinioSetup, error) { - setup, err := MinioDefaultSetup(namespace) +// sslSetup returns the definition for a minio setup using SSL +func sslSetup(namespace string) (Setup, error) { + setup, err := defaultSetup(namespace) if err != nil { - return MinioSetup{}, err + return Setup{}, err } const tlsVolumeName = "secret-volume" const tlsVolumeMountPath = "/etc/secrets/certs" @@ -341,8 +345,8 @@ func MinioSSLSetup(namespace string) (MinioSetup, error) { return setup, nil } -// MinioDefaultClient returns the default Pod definition for a minio client -func MinioDefaultClient(namespace string) corev1.Pod { +// defaultClient returns the default Pod definition for a minio client +func defaultClient(namespace string) corev1.Pod { seccompProfile := &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, } @@ -403,8 +407,8 @@ func MinioDefaultClient(namespace string) corev1.Pod { return minioClient } -// MinioSSLClient returns the Pod definition for a minio client using SSL -func MinioSSLClient(namespace string) corev1.Pod { +// sslClient returns the Pod definition for a minio client using SSL +func sslClient(namespace string) corev1.Pod { const ( configVolumeMountPath = "/mc/.mc" configVolumeName = "mc-config" @@ -414,7 +418,7 @@ func MinioSSLClient(namespace string) corev1.Pod { ) var secretMode int32 = 0o600 - minioClient := MinioDefaultClient(namespace) + minioClient := defaultClient(namespace) minioClient.Spec.Volumes = append(minioClient.Spec.Volumes, corev1.Volume{ Name: configVolumeName, @@ -448,8 +452,8 @@ func MinioSSLClient(namespace string) corev1.Pod { return minioClient } -// MinioDeploy will create a full MinIO deployment defined inthe minioEnv variable -func MinioDeploy(minioEnv *MinioEnv, env *TestingEnvironment) (*corev1.Pod, error) { +// Deploy will create a full MinIO deployment defined inthe minioEnv variable +func Deploy(minioEnv *Env, env *utils.TestingEnvironment) (*corev1.Pod, error) { var err error minioEnv.CaPair, err = certs.CreateRootCA(minioEnv.Namespace, "minio") if err != nil { @@ -457,7 +461,7 @@ func MinioDeploy(minioEnv *MinioEnv, env *TestingEnvironment) (*corev1.Pod, erro } minioEnv.CaSecretObj = *minioEnv.CaPair.GenerateCASecret(minioEnv.Namespace, minioEnv.CaSecretName) - if _, err = CreateObject(env, &minioEnv.CaSecretObj); err != nil { + if _, err = utils.CreateObject(env, &minioEnv.CaSecretObj); err != nil { return nil, err } @@ -474,20 +478,20 @@ func MinioDeploy(minioEnv *MinioEnv, env *TestingEnvironment) (*corev1.Pod, erro return nil, err } - setup, err := MinioSSLSetup(minioEnv.Namespace) + setup, err := sslSetup(minioEnv.Namespace) if err != nil { return nil, err } - if err = InstallMinio(env, setup, minioEnv.Timeout); err != nil { + if err = installMinio(env, setup, minioEnv.Timeout); err != nil { return nil, err } - minioClient := MinioSSLClient(minioEnv.Namespace) + minioClient := sslClient(minioEnv.Namespace) - return &minioClient, PodCreateAndWaitForReady(env, &minioClient, 240) + return &minioClient, utils.PodCreateAndWaitForReady(env, &minioClient, 240) } -func (m *MinioEnv) getCaSecret(env *TestingEnvironment, namespace string) (*corev1.Secret, error) { +func (m *Env) getCaSecret(env *utils.TestingEnvironment, namespace string) (*corev1.Secret, error) { var certSecret corev1.Secret if err := env.Client.Get(env.Ctx, types.NamespacedName{ @@ -508,24 +512,24 @@ func (m *MinioEnv) getCaSecret(env *TestingEnvironment, namespace string) (*core } // CreateCaSecret creates the certificates required to authenticate against the the MinIO service -func (m *MinioEnv) CreateCaSecret(env *TestingEnvironment, namespace string) error { +func (m *Env) CreateCaSecret(env *utils.TestingEnvironment, namespace string) error { caSecret, err := m.getCaSecret(env, namespace) if err != nil { return err } - _, err = CreateObject(env, caSecret) + _, err = utils.CreateObject(env, caSecret) return err } -// CountFilesOnMinio uses the minioClient in the given `namespace` to count the +// CountFiles uses the minioClient in the given `namespace` to count the // amount of files matching the given `path` -func CountFilesOnMinio(minioEnv *MinioEnv, path string) (value int, err error) { +func CountFiles(minioEnv *Env, path string) (value int, err error) { var stdout string - stdout, _, err = RunUnchecked(fmt.Sprintf( + stdout, _, err = utils.RunUnchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, - composeFindMinioCmd(path, "minio"))) + composeFindCmd(path, "minio"))) if err != nil { return -1, err } @@ -533,41 +537,41 @@ func CountFilesOnMinio(minioEnv *MinioEnv, path string) (value int, err error) { return value, err } -// ListFilesOnMinio uses the minioClient in the given `namespace` to list the +// ListFiles uses the minioClient in the given `namespace` to list the // paths matching the given `path` -func ListFilesOnMinio(minioEnv *MinioEnv, path string) (string, error) { +func ListFiles(minioEnv *Env, path string) (string, error) { var stdout string - stdout, _, err := RunUnchecked(fmt.Sprintf( + stdout, _, err := utils.RunUnchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, - composeListFilesMinio(path, "minio"))) + composeListFiles(path, "minio"))) if err != nil { return "", err } return strings.Trim(stdout, "\n"), nil } -// composeListFilesMinio builds the Minio command to list the filenames matching a given path -func composeListFilesMinio(path string, serviceName string) string { +// composeListFiles builds the Minio command to list the filenames matching a given path +func composeListFiles(path string, serviceName string) string { return fmt.Sprintf("sh -c 'mc find %v --path %v'", serviceName, path) } -// composeListFilesMinio builds the Minio command to list the filenames matching a given path -func composeCleanFilesMinio(path string) string { +// composeCleanFiles builds the Minio command to list the filenames matching a given path +func composeCleanFiles(path string) string { return fmt.Sprintf("sh -c 'mc rm --force --recursive %v'", path) } -// composeFindMinioCmd builds the Minio find command -func composeFindMinioCmd(path string, serviceName string) string { +// composeFindCmd builds the Minio find command +func composeFindCmd(path string, serviceName string) string { return fmt.Sprintf("sh -c 'mc find %v --path %v | wc -l'", serviceName, path) } -// GetFileTagsOnMinio will use the minioClient to retrieve the tags in a specified path -func GetFileTagsOnMinio(minioEnv *MinioEnv, path string) (TagSet, error) { +// GetFileTags will use the minioClient to retrieve the tags in a specified path +func GetFileTags(minioEnv *Env, path string) (TagSet, error) { var output TagSet // Make sure we have a registered backup to access - out, _, err := RunUncheckedRetry(fmt.Sprintf( + out, _, err := utils.RunUncheckedRetry(fmt.Sprintf( "kubectl exec -n %v %v -- sh -c 'mc find minio --path %v | head -n1'", minioEnv.Namespace, minioEnv.Client.Name, @@ -578,7 +582,7 @@ func GetFileTagsOnMinio(minioEnv *MinioEnv, path string) (TagSet, error) { walFile := strings.Trim(out, "\n") - stdout, _, err := RunUncheckedRetry(fmt.Sprintf( + stdout, _, err := utils.RunUncheckedRetry(fmt.Sprintf( "kubectl exec -n %v %v -- sh -c 'mc --json tag list %v'", minioEnv.Namespace, minioEnv.Client.Name, @@ -594,8 +598,8 @@ func GetFileTagsOnMinio(minioEnv *MinioEnv, path string) (TagSet, error) { return output, nil } -// MinioTestConnectivityUsingBarmanCloudWalArchive returns true if test connection is successful else false -func MinioTestConnectivityUsingBarmanCloudWalArchive( +// TestConnectivityUsingBarmanCloudWalArchive returns true if test connection is successful else false +func TestConnectivityUsingBarmanCloudWalArchive( namespace, clusterName, podName, @@ -609,7 +613,7 @@ func MinioTestConnectivityUsingBarmanCloudWalArchive( "barman-cloud-wal-archive --cloud-provider aws-s3 --endpoint-url https://%s:9000 s3://cluster-backups/ %s "+ "000000010000000000000000 --test", postgres.BarmanBackupEndpointCACertificateLocation, id, key, minioSvcName, clusterName) - _, _, err := RunUnchecked(fmt.Sprintf( + _, _, err := utils.RunUnchecked(fmt.Sprintf( "kubectl exec -n %v %v -c postgres -- /bin/bash -c \"%v\"", namespace, podName, @@ -620,16 +624,25 @@ func MinioTestConnectivityUsingBarmanCloudWalArchive( return true, nil } -// CleanFilesOnMinio clean files on minio for a given path -func CleanFilesOnMinio(minioEnv *MinioEnv, path string) (string, error) { +// CleanFiles clean files on minio for a given path +func CleanFiles(minioEnv *Env, path string) (string, error) { var stdout string - stdout, _, err := RunUnchecked(fmt.Sprintf( + stdout, _, err := utils.RunUnchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, - composeCleanFilesMinio(path))) + composeCleanFiles(path))) if err != nil { return "", err } return strings.Trim(stdout, "\n"), nil } + +// GetFilePath gets the MinIO file string for WAL/backup objects in a configured bucket +func GetFilePath(serverName, fileName string) string { + // the * regexes enable matching these typical paths: + // minio/backups/serverName/base/20220618T140300/data.tar + // minio/backups/serverName/wals/0000000100000000/000000010000000000000002.gz + // minio/backups/serverName/wals/00000002.history.gz + return filepath.Join("*", serverName, "*", fileName) +} diff --git a/tests/utils/secrets.go b/tests/utils/secrets.go index f45c2b1cb9..c6f01b3f10 100644 --- a/tests/utils/secrets.go +++ b/tests/utils/secrets.go @@ -21,6 +21,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -101,3 +102,30 @@ func GetCredentials( password := string(secret.Data["password"]) return username, password, nil } + +// CreateObjectStorageSecret generates an Opaque Secret with a given ID and Key +func CreateObjectStorageSecret( + namespace string, + secretName string, + id string, + key string, + env *TestingEnvironment, +) (*corev1.Secret, error) { + targetSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + StringData: map[string]string{ + "ID": id, + "KEY": key, + }, + Type: corev1.SecretTypeOpaque, + } + obj, err := CreateObject(env, targetSecret) + if err != nil { + return nil, err + } + + return obj.(*corev1.Secret), nil +} From fdafa5546850c67a5207bf65bfa5091966a533c5 Mon Sep 17 00:00:00 2001 From: Peggie Date: Wed, 23 Oct 2024 12:27:01 +0200 Subject: [PATCH 04/10] feat: Public Cloud K8S versions update (#5918) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index 4c3493a7b5..873c7f6786 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,5 +1,5 @@ [ - "1.30.4", - "1.29.8", + "1.30.5", + "1.29.9", "1.28.9" ] From 4d2984d4c4278189f5cf9c78229c98e35a45aedd Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 23 Oct 2024 15:14:36 +0200 Subject: [PATCH 05/10] test: remove redundant log capture in tablespace tests (#5781) After #5790, we capture the logs for the whole cluster, so it is It is unnecessary to set up the log capture for the single tests. Closes #5847 Signed-off-by: Jonathan Gonzalez V. --- tests/e2e/tablespaces_test.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index cad9524df3..afbaa42c13 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -17,8 +17,6 @@ limitations under the License. package e2e import ( - "bytes" - "context" "fmt" "os" "path" @@ -36,7 +34,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" @@ -79,19 +76,6 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) cluster, err = env.GetCluster(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - - clusterLogs := logs.ClusterStreamingRequest{ - Cluster: cluster, - Options: &corev1.PodLogOptions{ - Follow: true, - }, - } - var buffer bytes.Buffer - go func() { - defer GinkgoRecover() - err = clusterLogs.SingleStream(context.TODO(), &buffer) - Expect(err).ToNot(HaveOccurred()) - }() } Context("on a new cluster with tablespaces", Ordered, func() { From 8ff0929affe93535b701b65b577282c87c63bb3f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:28:45 +0200 Subject: [PATCH 06/10] test: Updated Postgres versions used in E2E tests (#5863) Update the Postgres versions used in E2E tests Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 73e9d1a8d0..3f2a5d2f85 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,7 +1,7 @@ { "17": [ "17.0", - "17.0-15" + "17.0-20" ], "16": [ "16.4", From 1a70c90a59211d1198caf6e6d667318509d93bd3 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 23 Oct 2024 22:26:09 +0200 Subject: [PATCH 07/10] fix(plugin): handle multiple containers in `kubectl cnpg logs` (#5931) This patch fixes an issue in the `kubectl cnpg logs` command that leads to a failure if the instance pod has more than one container. Closes #5905 Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- pkg/utils/logs/cluster_logs.go | 24 +++++++++----- pkg/utils/logs/cluster_logs_test.go | 51 +++++++++++++++++++---------- 2 files changed, 50 insertions(+), 25 deletions(-) diff --git a/pkg/utils/logs/cluster_logs.go b/pkg/utils/logs/cluster_logs.go index c0afc0a314..3e5b85e6c9 100644 --- a/pkg/utils/logs/cluster_logs.go +++ b/pkg/utils/logs/cluster_logs.go @@ -25,7 +25,7 @@ import ( "sync" "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -46,7 +46,7 @@ const DefaultFollowWaiting time.Duration = 1 * time.Second // streaming type ClusterStreamingRequest struct { Cluster *apiv1.Cluster - Options *v1.PodLogOptions + Options *corev1.PodLogOptions Previous bool `json:"previous,omitempty"` FollowWaiting time.Duration // NOTE: the Client argument may be omitted, but it is good practice to pass it @@ -62,14 +62,17 @@ func (csr *ClusterStreamingRequest) getClusterNamespace() string { return csr.Cluster.Namespace } -func (csr *ClusterStreamingRequest) getLogOptions(containerName string) *v1.PodLogOptions { +func (csr *ClusterStreamingRequest) getLogOptions(containerName string) *corev1.PodLogOptions { if csr.Options == nil { - csr.Options = &v1.PodLogOptions{ + return &corev1.PodLogOptions{ Container: containerName, + Previous: csr.Previous, } } - csr.Options.Previous = csr.Previous - return csr.Options + options := csr.Options.DeepCopy() + options.Container = containerName + options.Previous = csr.Previous + return options } func (csr *ClusterStreamingRequest) getKubernetesClient() kubernetes.Interface { @@ -135,6 +138,8 @@ func (as *activeSet) add(name string) { // has returns true if and only if name is active func (as *activeSet) has(name string) bool { + as.m.Lock() + defer as.m.Unlock() _, found := as.set[name] return found } @@ -149,6 +154,8 @@ func (as *activeSet) drop(name string) { // isZero checks if there are any active processes func (as *activeSet) isZero() bool { + as.m.Lock() + defer as.m.Unlock() return len(as.set) == 0 } @@ -169,7 +176,7 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. for { var ( - podList *v1.PodList + podList *corev1.PodList err error ) if isFirstScan || csr.Options.Follow { @@ -189,6 +196,7 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. return nil } + wrappedWriter := safeWriterFrom(writer) for _, pod := range podList.Items { for _, container := range pod.Status.ContainerStatuses { if container.State.Running != nil { @@ -204,7 +212,7 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. container.Name, client, streamSet, - safeWriterFrom(writer), + wrappedWriter, ) } } diff --git a/pkg/utils/logs/cluster_logs_test.go b/pkg/utils/logs/cluster_logs_test.go index 4fa7b6f6bc..b0561d8a22 100644 --- a/pkg/utils/logs/cluster_logs_test.go +++ b/pkg/utils/logs/cluster_logs_test.go @@ -22,7 +22,7 @@ import ( "sync" "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" @@ -33,6 +33,23 @@ import ( . "github.com/onsi/gomega" ) +type syncBuffer struct { + b bytes.Buffer + m sync.Mutex +} + +func (b *syncBuffer) Write(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + return b.b.Write(p) +} + +func (b *syncBuffer) String() string { + b.m.Lock() + defer b.m.Unlock() + return b.b.String() +} + var _ = Describe("Cluster logging tests", func() { clusterNamespace := "cluster-test" clusterName := "myTestCluster" @@ -42,7 +59,7 @@ var _ = Describe("Cluster logging tests", func() { Name: clusterName, }, } - pod := &v1.Pod{ + pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: clusterNamespace, Name: clusterName + "-1", @@ -50,18 +67,18 @@ var _ = Describe("Cluster logging tests", func() { utils.ClusterLabelName: clusterName, }, }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ { Name: "postgresql", - State: v1.ContainerState{ - Running: &v1.ContainerStateRunning{}, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, }, }, }, }, } - podWithSidecars := &v1.Pod{ + podWithSidecars := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: clusterNamespace, Name: clusterName + "-1", @@ -69,18 +86,18 @@ var _ = Describe("Cluster logging tests", func() { utils.ClusterLabelName: clusterName, }, }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ { Name: "postgresql", - State: v1.ContainerState{ - Running: &v1.ContainerStateRunning{}, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, }, }, { Name: "sidecar", - State: v1.ContainerState{ - Running: &v1.ContainerStateRunning{}, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, }, }, }, @@ -96,7 +113,7 @@ var _ = Describe("Cluster logging tests", func() { defer wait.Done() streamClusterLogs := ClusterStreamingRequest{ Cluster: cluster, - Options: &v1.PodLogOptions{ + Options: &corev1.PodLogOptions{ Follow: false, }, Client: client, @@ -119,7 +136,7 @@ var _ = Describe("Cluster logging tests", func() { defer wait.Done() streamClusterLogs := ClusterStreamingRequest{ Cluster: cluster, - Options: &v1.PodLogOptions{ + Options: &corev1.PodLogOptions{ Follow: false, }, Client: client, @@ -134,7 +151,7 @@ var _ = Describe("Cluster logging tests", func() { It("should catch extra logs if given the follow option", func(ctx context.Context) { client := fake.NewSimpleClientset(pod) - var logBuffer bytes.Buffer + var logBuffer syncBuffer // let's set a short follow-wait, and keep the cluster streaming for two // cycles followWaiting := 200 * time.Millisecond @@ -143,7 +160,7 @@ var _ = Describe("Cluster logging tests", func() { defer GinkgoRecover() streamClusterLogs := ClusterStreamingRequest{ Cluster: cluster, - Options: &v1.PodLogOptions{ + Options: &corev1.PodLogOptions{ Follow: true, }, FollowWaiting: followWaiting, From 919d2b52a78363e3a65d4a69322665d741049240 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 24 Oct 2024 10:33:27 +0200 Subject: [PATCH 08/10] fix(deps): update kubernetes patches to v0.31.2 (main) (#5932) https://github.com/kubernetes/api `v0.31.1` -> `v0.31.2` https://github.com/kubernetes/apiextensions-apiserver `v0.31.1` -> `v0.31.2` https://github.com/kubernetes/apimachinery `v0.31.1` -> `v0.31.2` https://github.com/kubernetes/cli-runtime `v0.31.1` -> `v0.31.2` https://github.com/kubernetes/client-go `v0.31.1` -> `v0.31.2` --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index f6aa11ec03..3714fa4102 100644 --- a/go.mod +++ b/go.mod @@ -40,11 +40,11 @@ require ( golang.org/x/term v0.25.0 google.golang.org/grpc v1.67.1 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.31.1 - k8s.io/apiextensions-apiserver v0.31.1 - k8s.io/apimachinery v0.31.1 - k8s.io/cli-runtime v0.31.1 - k8s.io/client-go v0.31.1 + k8s.io/api v0.31.2 + k8s.io/apiextensions-apiserver v0.31.2 + k8s.io/apimachinery v0.31.2 + k8s.io/cli-runtime v0.31.2 + k8s.io/client-go v0.31.2 k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/yaml v1.4.0 diff --git a/go.sum b/go.sum index 1c1d7bd228..7b5f00c955 100644 --- a/go.sum +++ b/go.sum @@ -278,16 +278,16 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= -k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= -k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= -k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk= -k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U= -k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= -k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= +k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/cli-runtime v0.31.2 h1:7FQt4C4Xnqx8V1GJqymInK0FFsoC+fAZtbLqgXYVOLQ= +k8s.io/cli-runtime v0.31.2/go.mod h1:XROyicf+G7rQ6FQJMbeDV9jqxzkWXTYD6Uxd15noe0Q= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= From 52b991f62a879acd3a3b192032b5dc9e594e93fa Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 24 Oct 2024 14:33:58 +0200 Subject: [PATCH 09/10] test: improve unit tests and add race condition detection (#5936) * Added `make test-race` target to the Makefile to enable running tests with the Go race detector. This helps catch potential concurrency issues. * Added the missing `RunSpec` invocation in the `internal/cmd/manager/instance/run` package. * Marked relevant test suites with the `Ordered` modifier to ensure proper execution order for dependent tests. * Moved shared variables initialization in a `BeforeEach` block. * Avoided disrupting the formatting of the test output by ensuring there was no output to stdout during the tests. * Renamed some imported packages to improve code clarity and readability. * Removed empty and unused test suites. * Improved test suite setup and teardown with `DeferCleanup()` to handle cleanup operations consistently and avoid manual `AfterSuite` calls. * Used `SpecContext` injected parameter instead of `context.TODO()` when a context is needed. * Applied various style fixes for consistency and code quality. Signed-off-by: Marco Nenciarini Signed-off-by: Marco Nenciarini Signed-off-by: Jaime Silvela Co-authored-by: Jaime Silvela --- Makefile | 8 +++- api/v1/cluster_funcs_test.go | 6 ++- api/v1/scheduledbackup_funcs_test.go | 6 ++- .../run}/suite_test.go | 6 +-- internal/cmd/plugin/logs/cluster_logs_test.go | 47 +++++++++++-------- internal/cmd/plugin/logs/cluster_test.go | 14 ++++-- internal/cmd/plugin/logs/suite_test.go | 14 ------ internal/cmd/plugin/suite_test.go | 14 +++--- .../controller/cluster_controller_test.go | 10 ++-- internal/controller/cluster_create_test.go | 14 +++--- internal/controller/cluster_restore_test.go | 26 ++++------ internal/controller/cluster_scale_test.go | 4 +- internal/controller/pooler_update_test.go | 9 ++-- .../controller/roles/postgres_test.go | 8 ++-- .../controller/roles/reconciler_test.go | 10 ++-- .../slots/reconciler/replicationslot_test.go | 12 ++--- .../controller/slots/runner/runner_test.go | 5 +- pkg/certs/certs_test.go | 8 ++-- pkg/certs/k8s_test.go | 6 +-- pkg/certs/tls_test.go | 6 +-- pkg/management/postgres/configuration_test.go | 1 - .../postgres/logicalimport/database_test.go | 21 ++++----- .../postgres/logicalimport/role_test.go | 15 +++--- .../postgres/logpipe/logpipe_test.go | 21 ++++----- .../postgres/metrics/collector_test.go | 2 +- pkg/management/postgres/restore_test.go | 17 ++++--- .../postgres/webserver/suite_test.go | 29 ------------ .../persistentvolumeclaim/reconciler_test.go | 2 +- .../persistentvolumeclaim/resources_test.go | 6 +-- pkg/resources/retry_test.go | 8 +--- pkg/utils/discovery_test.go | 9 +++- pkg/utils/logs/logs_test.go | 3 +- 32 files changed, 160 insertions(+), 207 deletions(-) rename internal/cmd/manager/{walarchive => instance/run}/suite_test.go (88%) delete mode 100644 pkg/management/postgres/webserver/suite_test.go diff --git a/Makefile b/Makefile index 192d152fd0..50cba47855 100644 --- a/Makefile +++ b/Makefile @@ -101,7 +101,13 @@ test: generate fmt vet manifests envtest ## Run tests. source <(${ENVTEST} use -p env --bin-dir ${ENVTEST_ASSETS_DIR} ${ENVTEST_K8S_VERSION}) ;\ export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT=60s ;\ export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=60s ;\ - go test -coverpkg=./... --count=1 -coverprofile=cover.out ./api/... ./cmd/... ./internal/... ./pkg/... ./tests/utils ; + go test -coverpkg=./... -coverprofile=cover.out ./api/... ./cmd/... ./internal/... ./pkg/... ./tests/utils + +test-race: generate fmt vet manifests envtest ## Run tests enabling race detection. + mkdir -p ${ENVTEST_ASSETS_DIR} ;\ + source <(${ENVTEST} use -p env --bin-dir ${ENVTEST_ASSETS_DIR} ${ENVTEST_K8S_VERSION}) ;\ + go run github.com/onsi/ginkgo/v2/ginkgo -r -p --skip-package=e2e \ + --race --keep-going --fail-on-empty --randomize-all --randomize-suites e2e-test-kind: ## Run e2e tests locally using kind. hack/e2e/run-e2e-kind.sh diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index 8d6f0950ac..c478c2b3ae 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -452,7 +452,7 @@ var _ = Describe("external cluster list", func() { }) }) -var _ = Describe("look up for secrets", func() { +var _ = Describe("look up for secrets", Ordered, func() { cluster := Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "clustername", @@ -479,15 +479,19 @@ var _ = Describe("look up for secrets", func() { It("retrieves client CA secret name", func() { Expect(cluster.GetClientCASecretName()).To(Equal("clustername-ca")) }) + It("retrieves server CA secret name", func() { Expect(cluster.GetServerCASecretName()).To(Equal("clustername-ca")) }) + It("retrieves replication secret name", func() { Expect(cluster.GetReplicationSecretName()).To(Equal("clustername-replication")) }) + It("retrieves replication secret name", func() { Expect(cluster.GetReplicationSecretName()).To(Equal("clustername-replication")) }) + It("retrieves all names needed to build a server CA certificate", func() { names := cluster.GetClusterAltDNSNames() Expect(names).To(HaveLen(12)) diff --git a/api/v1/scheduledbackup_funcs_test.go b/api/v1/scheduledbackup_funcs_test.go index e68b20ba42..9ef98a3692 100644 --- a/api/v1/scheduledbackup_funcs_test.go +++ b/api/v1/scheduledbackup_funcs_test.go @@ -26,9 +26,13 @@ import ( ) var _ = Describe("Scheduled backup", func() { - scheduledBackup := &ScheduledBackup{} + var scheduledBackup *ScheduledBackup backupName := "test" + BeforeEach(func() { + scheduledBackup = &ScheduledBackup{} + }) + It("properly creates a backup with no annotations", func() { backup := scheduledBackup.CreateBackup("test") Expect(backup).ToNot(BeNil()) diff --git a/internal/cmd/manager/walarchive/suite_test.go b/internal/cmd/manager/instance/run/suite_test.go similarity index 88% rename from internal/cmd/manager/walarchive/suite_test.go rename to internal/cmd/manager/instance/run/suite_test.go index e8e0072475..e1d9122745 100644 --- a/internal/cmd/manager/walarchive/suite_test.go +++ b/internal/cmd/manager/instance/run/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package walarchive +package run import ( "testing" @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" ) -func TestUtils(t *testing.T) { +func TestSuite(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "walarchive test suite") + RunSpecs(t, "instance run test suite") } diff --git a/internal/cmd/plugin/logs/cluster_logs_test.go b/internal/cmd/plugin/logs/cluster_logs_test.go index 326ab2a313..bcd7a87a1f 100644 --- a/internal/cmd/plugin/logs/cluster_logs_test.go +++ b/internal/cmd/plugin/logs/cluster_logs_test.go @@ -17,12 +17,11 @@ limitations under the License. package logs import ( - "context" "path" - v1 "k8s.io/api/core/v1" - v12 "k8s.io/apimachinery/pkg/apis/meta/v1" - fake2 "k8s.io/client-go/kubernetes/fake" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeClient "k8s.io/client-go/kubernetes/fake" "sigs.k8s.io/controller-runtime/pkg/client/fake" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -34,18 +33,18 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("Get the logs", func() { +var _ = Describe("Get the logs", Ordered, func() { namespace := "default" clusterName := "test-cluster" - pod := &v1.Pod{ - ObjectMeta: v12.ObjectMeta{ + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName + "-1", }, } - client := fake2.NewSimpleClientset(pod) + client := fakeClient.NewSimpleClientset(pod) cluster := &apiv1.Cluster{ - ObjectMeta: v12.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName, Labels: map[string]string{ @@ -54,20 +53,24 @@ var _ = Describe("Get the logs", func() { }, Spec: apiv1.ClusterSpec{}, } - cl := clusterLogs{ - ctx: context.TODO(), - clusterName: clusterName, - namespace: namespace, - follow: true, - timestamp: true, - tailLines: -1, - client: client, - } + var cl clusterLogs plugin.Client = fake.NewClientBuilder(). WithScheme(scheme.BuildWithAllKnownScheme()). WithObjects(cluster). Build() + BeforeEach(func(ctx SpecContext) { + cl = clusterLogs{ + ctx: ctx, + clusterName: clusterName, + namespace: namespace, + follow: true, + timestamp: true, + tailLines: -1, + client: client, + } + }) + It("should get a proper cluster", func() { cluster, err := getCluster(cl) Expect(err).ToNot(HaveOccurred()) @@ -95,18 +98,24 @@ var _ = Describe("Get the logs", func() { }) It("should get the proper stream for logs", func() { + PauseOutputInterception() err := followCluster(cl) + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) It("should save the logs to file", func() { + tempDir := GinkgoT().TempDir() cl.outputFile = path.Join(tempDir, "test-file.logs") + PauseOutputInterception() err := saveClusterLogs(cl) + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) It("should fail if can't write a file", func() { - cl.outputFile = "/this-does-not-exist/test-file.log" + tempDir := GinkgoT().TempDir() + cl.outputFile = path.Join(tempDir, "this-does-not-exist/test-file.log") err := saveClusterLogs(cl) Expect(err).To(HaveOccurred()) }) diff --git a/internal/cmd/plugin/logs/cluster_test.go b/internal/cmd/plugin/logs/cluster_test.go index 1df2081fd4..d66206731a 100644 --- a/internal/cmd/plugin/logs/cluster_test.go +++ b/internal/cmd/plugin/logs/cluster_test.go @@ -17,8 +17,8 @@ limitations under the License. package logs import ( - v1 "k8s.io/api/core/v1" - v12 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakeClient "k8s.io/client-go/kubernetes/fake" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -35,14 +35,14 @@ var _ = Describe("Test the command", func() { clusterName := "test-cluster" namespace := "default" var cluster *apiv1.Cluster - pod := &v1.Pod{ - ObjectMeta: v12.ObjectMeta{ + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName + "-1", }, } cluster = &apiv1.Cluster{ - ObjectMeta: v12.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName, Labels: map[string]string{ @@ -62,14 +62,18 @@ var _ = Describe("Test the command", func() { It("should not fail, with cluster name as argument", func() { cmd := clusterCmd() cmd.SetArgs([]string{clusterName}) + PauseOutputInterception() err := cmd.Execute() + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) It("could follow the logs", func() { cmd := clusterCmd() cmd.SetArgs([]string{clusterName, "-f"}) + PauseOutputInterception() err := cmd.Execute() + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) }) diff --git a/internal/cmd/plugin/logs/suite_test.go b/internal/cmd/plugin/logs/suite_test.go index c5bd148d2d..476d2ff84b 100644 --- a/internal/cmd/plugin/logs/suite_test.go +++ b/internal/cmd/plugin/logs/suite_test.go @@ -17,27 +17,13 @@ limitations under the License. package logs import ( - "os" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -var tempDir string - func TestPgbench(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Logs Suite") } - -var _ = BeforeSuite(func() { - var err error - tempDir, err = os.MkdirTemp(os.TempDir(), "logs_") - Expect(err).ToNot(HaveOccurred()) -}) - -var _ = AfterSuite(func() { - err := os.RemoveAll(tempDir) - Expect(err).ToNot(HaveOccurred()) -}) diff --git a/internal/cmd/plugin/suite_test.go b/internal/cmd/plugin/suite_test.go index 4628a4efab..2f0816b08b 100644 --- a/internal/cmd/plugin/suite_test.go +++ b/internal/cmd/plugin/suite_test.go @@ -48,8 +48,6 @@ func TestPlugin(t *testing.T) { } var _ = BeforeSuite(func() { - By("bootstrapping test environment") - if os.Getenv("USE_EXISTING_CLUSTER") == "true" { By("using existing config for test environment") testEnv = &envtest.Environment{} @@ -65,6 +63,12 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Expect(cfg).ToNot(BeNil()) + DeferCleanup(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) + }) + err = apiv1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) @@ -74,9 +78,3 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Expect(k8sClient).ToNot(BeNil()) }) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) diff --git a/internal/controller/cluster_controller_test.go b/internal/controller/cluster_controller_test.go index ef62079db2..9439f0b86b 100644 --- a/internal/controller/cluster_controller_test.go +++ b/internal/controller/cluster_controller_test.go @@ -17,7 +17,6 @@ limitations under the License. package controller import ( - "context" "time" cnpgTypes "github.com/cloudnative-pg/machinery/pkg/types" @@ -73,8 +72,7 @@ var _ = Describe("Updating target primary", func() { env = buildTestEnvironment() }) - It("selects the new target primary right away", func() { - ctx := context.TODO() + It("selects the new target primary right away", func(ctx SpecContext) { namespace := newFakeNamespace(env.client) cluster := newFakeCNPGCluster(env.client, namespace) @@ -132,8 +130,7 @@ var _ = Describe("Updating target primary", func() { }) }) - It("it should wait the failover delay to select the new target primary", func() { - ctx := context.TODO() + It("it should wait the failover delay to select the new target primary", func(ctx SpecContext) { namespace := newFakeNamespace(env.client) cluster := newFakeCNPGCluster(env.client, namespace, func(cluster *apiv1.Cluster) { cluster.Spec.FailoverDelay = 2 @@ -210,8 +207,7 @@ var _ = Describe("Updating target primary", func() { }) }) - It("Issue #1783: ensure that the scale-down behaviour remain consistent", func() { - ctx := context.TODO() + It("Issue #1783: ensure that the scale-down behaviour remain consistent", func(ctx SpecContext) { namespace := newFakeNamespace(env.client) cluster := newFakeCNPGCluster(env.client, namespace, func(cluster *apiv1.Cluster) { cluster.Spec.Instances = 2 diff --git a/internal/controller/cluster_create_test.go b/internal/controller/cluster_create_test.go index c7068a15d7..d6aa79bcbf 100644 --- a/internal/controller/cluster_create_test.go +++ b/internal/controller/cluster_create_test.go @@ -822,14 +822,12 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { namespace = "test-namespace" ) var ( - ctx context.Context proposed *corev1.Secret cli k8client.Client ) BeforeEach(func() { cli = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).Build() - ctx = context.TODO() const secretName = "test-secret" proposed = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -843,7 +841,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { }) Context("when the secret does not exist", func() { - It("should create the secret", func() { + It("should create the secret", func(ctx SpecContext) { err := createOrPatchClusterCredentialSecret(ctx, cli, proposed) Expect(err).NotTo(HaveOccurred()) @@ -857,7 +855,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { }) Context("when the secret exists and is owned by the cluster", func() { - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { existingSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, @@ -878,7 +876,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { Expect(cli.Create(ctx, existingSecret)).To(Succeed()) }) - It("should patch the secret if metadata differs", func() { + It("should patch the secret if metadata differs", func(ctx SpecContext) { Expect(proposed.Labels).To(HaveKeyWithValue("test", "label")) Expect(proposed.Annotations).To(HaveKeyWithValue("test", "annotation")) @@ -892,7 +890,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { Expect(patchedSecret.Annotations).To(HaveKeyWithValue("test", "annotation")) }) - It("should not patch the secret if metadata is the same", func() { + It("should not patch the secret if metadata is the same", func(ctx SpecContext) { var originalSecret corev1.Secret err := cli.Get(ctx, types.NamespacedName{Name: secretName, Namespace: namespace}, &originalSecret) Expect(err).NotTo(HaveOccurred()) @@ -913,7 +911,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { }) Context("when the secret exists but is not owned by the cluster", func() { - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { existingSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, @@ -923,7 +921,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { Expect(cli.Create(ctx, existingSecret)).To(Succeed()) }) - It("should not modify the secret", func() { + It("should not modify the secret", func(ctx SpecContext) { var originalSecret corev1.Secret err := cli.Get(ctx, types.NamespacedName{Name: secretName, Namespace: namespace}, &originalSecret) Expect(err).NotTo(HaveOccurred()) diff --git a/internal/controller/cluster_restore_test.go b/internal/controller/cluster_restore_test.go index 616b0cf882..cb68fc565b 100644 --- a/internal/controller/cluster_restore_test.go +++ b/internal/controller/cluster_restore_test.go @@ -37,19 +37,17 @@ import ( var _ = Describe("ensureClusterIsNotFenced", func() { var ( - ctx context.Context mockCli k8client.Client cluster *apiv1.Cluster ) - getCluster := func(clusterKey k8client.ObjectKey) (*apiv1.Cluster, error) { + getCluster := func(ctx context.Context, clusterKey k8client.ObjectKey) (*apiv1.Cluster, error) { remoteCluster := &apiv1.Cluster{} err := mockCli.Get(ctx, clusterKey, remoteCluster) return remoteCluster, err } BeforeEach(func() { - ctx = context.TODO() cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -67,14 +65,14 @@ var _ = Describe("ensureClusterIsNotFenced", func() { }) Context("when no instances are fenced", func() { - It("should not modify the object", func() { - origCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + It("should not modify the object", func(ctx SpecContext) { + origCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) err = ensureClusterIsNotFenced(ctx, mockCli, cluster) Expect(err).ToNot(HaveOccurred()) - remoteCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + remoteCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) Expect(remoteCluster.ObjectMeta).To(Equal(origCluster.ObjectMeta)) }) @@ -91,15 +89,15 @@ var _ = Describe("ensureClusterIsNotFenced", func() { Build() }) - It("should patch the cluster and remove fenced instances", func() { - origCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + It("should patch the cluster and remove fenced instances", func(ctx SpecContext) { + origCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) Expect(origCluster.Annotations).To(HaveKey(utils.FencedInstanceAnnotation)) err = ensureClusterIsNotFenced(ctx, mockCli, cluster) Expect(err).ToNot(HaveOccurred()) - remoteCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + remoteCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) Expect(remoteCluster.ObjectMeta).ToNot(Equal(origCluster.ObjectMeta)) @@ -110,13 +108,11 @@ var _ = Describe("ensureClusterIsNotFenced", func() { var _ = Describe("restoreClusterStatus", func() { var ( - ctx context.Context mockCli k8client.Client cluster *apiv1.Cluster ) BeforeEach(func() { - ctx = context.TODO() cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -131,7 +127,7 @@ var _ = Describe("restoreClusterStatus", func() { }) Context("when restoring cluster status", func() { - It("should patch the cluster with the updated status", func() { + It("should patch the cluster with the updated status", func(ctx SpecContext) { latestNodeSerial := 10 targetPrimaryNodeSerial := 3 @@ -151,7 +147,6 @@ var _ = Describe("restoreClusterStatus", func() { var _ = Describe("getOrphanPVCs", func() { var ( - ctx context.Context mockCli k8client.Client cluster *apiv1.Cluster goodPvcs []corev1.PersistentVolumeClaim @@ -159,7 +154,6 @@ var _ = Describe("getOrphanPVCs", func() { ) BeforeEach(func() { - ctx = context.TODO() cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -268,7 +262,7 @@ var _ = Describe("getOrphanPVCs", func() { Build() }) - It("should fetch only the pvcs that belong to the cluster and without an owner", func() { + It("should fetch only the pvcs that belong to the cluster and without an owner", func(ctx SpecContext) { remotePvcs, err := getOrphanPVCs(ctx, mockCli, cluster) Expect(err).ToNot(HaveOccurred()) Expect(remotePvcs).To(HaveLen(len(goodPvcs))) @@ -290,7 +284,7 @@ var _ = Describe("getOrphanPVCs", func() { Expect(primary).To(Equal(2)) }) - It("should correctly restore the orphan pvcs", func() { + It("should correctly restore the orphan pvcs", func(ctx SpecContext) { err := restoreOrphanPVCs(ctx, mockCli, cluster, goodPvcs) Expect(err).ToNot(HaveOccurred()) diff --git a/internal/controller/cluster_scale_test.go b/internal/controller/cluster_scale_test.go index 6b01ab1854..4ed725a82a 100644 --- a/internal/controller/cluster_scale_test.go +++ b/internal/controller/cluster_scale_test.go @@ -190,7 +190,7 @@ var _ = Describe("cluster scale pod and job deletion logic", func() { cancel() }) - It("should delete all the jobs", func() { + It("should delete all the jobs", func(ctx SpecContext) { for _, jobName := range specs.GetPossibleJobNames(instanceName) { job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ @@ -198,7 +198,7 @@ var _ = Describe("cluster scale pod and job deletion logic", func() { Namespace: cluster.Namespace, }, } - err := fakeClientSet.Create(context.TODO(), job) + err := fakeClientSet.Create(ctx, job) Expect(err).NotTo(HaveOccurred()) } diff --git a/internal/controller/pooler_update_test.go b/internal/controller/pooler_update_test.go index 6f0b599c23..8774d1ad8b 100644 --- a/internal/controller/pooler_update_test.go +++ b/internal/controller/pooler_update_test.go @@ -362,7 +362,6 @@ var _ = Describe("unit test of pooler_update reconciliation logic", func() { var _ = Describe("ensureServiceAccountPullSecret", func() { var ( - ctx context.Context r *PoolerReconciler pooler *apiv1.Pooler conf *configuration.Data @@ -385,8 +384,6 @@ var _ = Describe("ensureServiceAccountPullSecret", func() { } BeforeEach(func() { - ctx = context.TODO() - pullSecret = generateOperatorPullSecret() conf = &configuration.Data{ @@ -417,13 +414,13 @@ var _ = Describe("ensureServiceAccountPullSecret", func() { } }) - It("should create the pull secret", func() { + It("should create the pull secret", func(ctx SpecContext) { name, err := r.ensureServiceAccountPullSecret(ctx, pooler, conf) Expect(err).ToNot(HaveOccurred()) Expect(name).To(Equal(poolerSecretName)) }) - It("should not change the pull secret if it matches", func() { + It("should not change the pull secret if it matches", func(ctx SpecContext) { By("creating the secret before triggering the reconcile") secret := generateOperatorPullSecret() secret.Name = poolerSecretName @@ -450,7 +447,7 @@ var _ = Describe("ensureServiceAccountPullSecret", func() { Expect(remoteSecret).To(BeEquivalentTo(remoteSecret)) }) - It("should reconcile the secret if it doesn't match", func() { + It("should reconcile the secret if it doesn't match", func(ctx SpecContext) { By("creating the secret before triggering the reconcile") secret := generateOperatorPullSecret() secret.Name = poolerSecretName diff --git a/internal/management/controller/roles/postgres_test.go b/internal/management/controller/roles/postgres_test.go index d003af03f3..60fdbbe99f 100644 --- a/internal/management/controller/roles/postgres_test.go +++ b/internal/management/controller/roles/postgres_test.go @@ -548,7 +548,7 @@ var _ = Describe("Postgres RoleManager implementation test", func() { Expect(queryValidUntil.String()).To(BeEquivalentTo(expectedQueryValidUntil)) }) - It("Getting the proper TransactionID per rol", func() { + It("Getting the proper TransactionID per rol", func(ctx SpecContext) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) prm := NewPostgresRoleManager(db) @@ -558,16 +558,16 @@ var _ = Describe("Postgres RoleManager implementation test", func() { dbRole := roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole() mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnError(errors.New("Kaboom")) - _, err = prm.GetLastTransactionID(context.TODO(), dbRole) + _, err = prm.GetLastTransactionID(ctx, dbRole) Expect(err).To(HaveOccurred()) mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnError(sql.ErrNoRows) - _, err = prm.GetLastTransactionID(context.TODO(), dbRole) + _, err = prm.GetLastTransactionID(ctx, dbRole) Expect(err).To(HaveOccurred()) rows.AddRow("1321") mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnRows(rows) - transID, err := prm.GetLastTransactionID(context.TODO(), dbRole) + transID, err := prm.GetLastTransactionID(ctx, dbRole) Expect(err).ToNot(HaveOccurred()) Expect(transID).To(BeEquivalentTo(1321)) }) diff --git a/internal/management/controller/roles/reconciler_test.go b/internal/management/controller/roles/reconciler_test.go index 1ee151a170..a126b73ef4 100644 --- a/internal/management/controller/roles/reconciler_test.go +++ b/internal/management/controller/roles/reconciler_test.go @@ -17,8 +17,6 @@ limitations under the License. package roles import ( - "context" - "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -30,17 +28,17 @@ import ( ) var _ = Describe("Role reconciler test", func() { - It("reconcile an empty cluster", func() { + It("reconcile an empty cluster", func(ctx SpecContext) { cluster := &v1.Cluster{} instance := &postgres.Instance{} mockClient := fake.NewClientBuilder().Build() - result, err := Reconcile(context.TODO(), instance, cluster, mockClient) + result, err := Reconcile(ctx, instance, cluster, mockClient) Expect(err).ToNot(HaveOccurred()) Expect(result).To(BeEquivalentTo(reconcile.Result{})) }) - It("reconcile fails with no database connection", func() { + It("reconcile fails with no database connection", func(ctx SpecContext) { instance := &postgres.Instance{} mockClient := fake.NewClientBuilder().Build() cluster := &v1.Cluster{ @@ -59,7 +57,7 @@ var _ = Describe("Role reconciler test", func() { "failed to connect to `user=postgres database=postgres`: " + "/controller/run/.s.PGSQL.5432 (/controller/run): " + "dial error: dial unix /controller/run/.s.PGSQL.5432: connect: no such file or directory" - result, err := Reconcile(context.TODO(), instance, cluster, mockClient) + result, err := Reconcile(ctx, instance, cluster, mockClient) Expect(err.Error()).To(BeEquivalentTo(pgStringError)) Expect(result).To(BeEquivalentTo(reconcile.Result{})) }) diff --git a/internal/management/controller/slots/reconciler/replicationslot_test.go b/internal/management/controller/slots/reconciler/replicationslot_test.go index 0634641475..8e90f2d068 100644 --- a/internal/management/controller/slots/reconciler/replicationslot_test.go +++ b/internal/management/controller/slots/reconciler/replicationslot_test.go @@ -103,7 +103,7 @@ func makeClusterWithInstanceNames(instanceNames []string, primary string) apiv1. } var _ = Describe("HA Replication Slots reconciliation in Primary", func() { - It("can create a new replication slot for a new cluster instance", func() { + It("can create a new replication slot for a new cluster instance", func(ctx SpecContext) { fakeSlotManager := fakeReplicationSlotManager{ replicationSlots: map[fakeSlot]bool{ {name: slotPrefix + "instance1", isHA: true}: true, @@ -117,7 +117,7 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance1", isHA: true}]).To(BeTrue()) Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance2", isHA: true}]).To(BeTrue()) - _, err := ReconcileReplicationSlots(context.TODO(), "instance1", fakeSlotManager, &cluster) + _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster) Expect(err).ShouldNot(HaveOccurred()) Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance1", isHA: true}]).To(BeFalse()) Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance3", isHA: true}]).To(BeTrue()) @@ -125,7 +125,7 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { Expect(fakeSlotManager.replicationSlots).To(HaveLen(2)) }) - It("can delete an inactive HA replication slot that is not in the cluster", func() { + It("can delete an inactive HA replication slot that is not in the cluster", func(ctx SpecContext) { fakeSlotManager := fakeReplicationSlotManager{ replicationSlots: map[fakeSlot]bool{ {name: slotPrefix + "instance1", isHA: true}: true, @@ -138,13 +138,13 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { Expect(fakeSlotManager.replicationSlots).To(HaveLen(3)) - _, err := ReconcileReplicationSlots(context.TODO(), "instance1", fakeSlotManager, &cluster) + _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster) Expect(err).ShouldNot(HaveOccurred()) Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance3", isHA: true}]).To(BeFalse()) Expect(fakeSlotManager.replicationSlots).To(HaveLen(1)) }) - It("will not delete an active HA replication slot that is not in the cluster", func() { + It("will not delete an active HA replication slot that is not in the cluster", func(ctx SpecContext) { fakeSlotManager := fakeReplicationSlotManager{ replicationSlots: map[fakeSlot]bool{ {name: slotPrefix + "instance1", isHA: true}: true, @@ -157,7 +157,7 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { Expect(fakeSlotManager.replicationSlots).To(HaveLen(3)) - _, err := ReconcileReplicationSlots(context.TODO(), "instance1", fakeSlotManager, &cluster) + _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster) Expect(err).ShouldNot(HaveOccurred()) Expect(fakeSlotManager.replicationSlots[fakeSlot{name: slotPrefix + "instance3", isHA: true, active: true}]). To(BeTrue()) diff --git a/internal/management/controller/slots/runner/runner_test.go b/internal/management/controller/slots/runner/runner_test.go index de1df33e0b..df73585c72 100644 --- a/internal/management/controller/slots/runner/runner_test.go +++ b/internal/management/controller/slots/runner/runner_test.go @@ -89,7 +89,7 @@ func (sm *fakeSlotManager) Delete(_ context.Context, slot infrastructure.Replica return nil } -var _ = Describe("Slot synchronization", func() { +var _ = Describe("Slot synchronization", Ordered, func() { localPodName := "cluster-2" localSlotName := "_cnpg_cluster_2" slot3 := "cluster-3" @@ -127,6 +127,7 @@ var _ = Describe("Slot synchronization", func() { Expect(localSlotsAfter.Has(slot4)).To(BeTrue()) Expect(local.slotsCreated).To(Equal(2)) }) + It("can update slots in local when ReplayLSN in primary advanced", func(ctx SpecContext) { // advance slot3 in primary newLSN := "0/308C4D8" @@ -144,6 +145,7 @@ var _ = Describe("Slot synchronization", func() { Expect(slot.RestartLSN).To(Equal(newLSN)) Expect(local.slotsUpdated).To(Equal(1)) }) + It("can drop slots in local when they are no longer in primary", func(ctx SpecContext) { err := primary.Delete(ctx, infrastructure.ReplicationSlot{SlotName: slot4}) Expect(err).ShouldNot(HaveOccurred()) @@ -157,6 +159,7 @@ var _ = Describe("Slot synchronization", func() { Expect(localSlotsAfter.Has(slot3)).To(BeTrue()) Expect(local.slotsDeleted).To(Equal(1)) }) + It("can drop slots in local that hold xmin", func(ctx SpecContext) { slotWithXmin := "_cnpg_xmin" err := primary.Create(ctx, infrastructure.ReplicationSlot{SlotName: slotWithXmin}) diff --git a/pkg/certs/certs_test.go b/pkg/certs/certs_test.go index aa85c9e0da..503553552c 100644 --- a/pkg/certs/certs_test.go +++ b/pkg/certs/certs_test.go @@ -343,20 +343,22 @@ var _ = Describe("Certicate duration and expiration threshold", func() { defaultExpiringThreshold := configuration.ExpiringCheckThreshold * 24 * time.Hour tenDays := 10 * 24 * time.Hour + BeforeEach(func() { + configuration.Current = configuration.NewConfiguration() + }) + It("returns the default duration", func() { duration := getCertificateDuration() Expect(duration).To(BeEquivalentTo(defaultCertificateDuration)) }) It("returns the default duration if the configuration is a negative value", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.CertificateDuration = -1 duration := getCertificateDuration() Expect(duration).To(BeEquivalentTo(defaultCertificateDuration)) }) It("returns a valid duration of 10 days", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.CertificateDuration = 10 duration := getCertificateDuration() Expect(duration).To(BeEquivalentTo(tenDays)) @@ -368,14 +370,12 @@ var _ = Describe("Certicate duration and expiration threshold", func() { }) It("returns the default check threshold if the configuration is a negative value", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.ExpiringCheckThreshold = -1 threshold := getCheckThreshold() Expect(threshold).To(BeEquivalentTo(defaultExpiringThreshold)) }) It("returns a valid threshold of 10 days", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.ExpiringCheckThreshold = 10 threshold := getCheckThreshold() Expect(threshold).To(BeEquivalentTo(tenDays)) diff --git a/pkg/certs/k8s_test.go b/pkg/certs/k8s_test.go index 54f54c9044..043b13be82 100644 --- a/pkg/certs/k8s_test.go +++ b/pkg/certs/k8s_test.go @@ -163,7 +163,7 @@ var _ = Describe("Root CA secret generation", func() { }) var _ = Describe("Webhook certificate validation", func() { - When("we have a valid CA secret", func() { + When("we have a valid CA secret", Ordered, func() { kubeClient := generateFakeClient() pki := pkiEnvironmentTemplate @@ -196,7 +196,7 @@ var _ = Describe("Webhook certificate validation", func() { }) }) - When("we have a valid CA and webhook secret", func() { + When("we have a valid CA and webhook secret", Ordered, func() { kubeClient := generateFakeClient() pki := pkiEnvironmentTemplate var caSecret, webhookSecret *corev1.Secret @@ -220,7 +220,7 @@ var _ = Describe("Webhook certificate validation", func() { }) }) - When("we have a valid CA secret and expired webhook secret", func() { + When("we have a valid CA secret and expired webhook secret", Ordered, func() { kubeClient := generateFakeClient() pki := pkiEnvironmentTemplate diff --git a/pkg/certs/tls_test.go b/pkg/certs/tls_test.go index 8e99876520..66039695eb 100644 --- a/pkg/certs/tls_test.go +++ b/pkg/certs/tls_test.go @@ -35,13 +35,11 @@ import ( var _ = Describe("newTLSConfigFromSecret", func() { var ( - ctx context.Context c client.Client caSecret types.NamespacedName ) BeforeEach(func() { - ctx = context.TODO() caSecret = types.NamespacedName{Name: "test-secret", Namespace: "default"} }) @@ -276,7 +274,7 @@ MQCKGqId+Xj6O6gnoi9xhu0rbzSnMjrURoa1v2d5+O5XssE7LGtJdIKrd2p7EuwE c = fake.NewClientBuilder().Build() }) - It("should return an error", func() { + It("should return an error", func(ctx SpecContext) { tlsConfig, err := newTLSConfigFromSecret(ctx, c, caSecret) Expect(err).To(HaveOccurred()) Expect(tlsConfig).To(BeNil()) @@ -295,7 +293,7 @@ MQCKGqId+Xj6O6gnoi9xhu0rbzSnMjrURoa1v2d5+O5XssE7LGtJdIKrd2p7EuwE c = fake.NewClientBuilder().WithObjects(secret).Build() }) - It("should return an error", func() { + It("should return an error", func(ctx SpecContext) { tlsConfig, err := newTLSConfigFromSecret(ctx, c, caSecret) Expect(err).To(HaveOccurred()) Expect(tlsConfig).To(BeNil()) diff --git a/pkg/management/postgres/configuration_test.go b/pkg/management/postgres/configuration_test.go index 7d040f8071..f4a9d2f20d 100644 --- a/pkg/management/postgres/configuration_test.go +++ b/pkg/management/postgres/configuration_test.go @@ -84,7 +84,6 @@ var _ = Describe("testing the building of the ldap config string", func() { }) It("correctly builds a bindSearchAuth string", func() { str := buildLDAPConfigString(&cluster, ldapPassword) - fmt.Printf("here %s\n", str) Expect(str).To(Equal(fmt.Sprintf(`host all all 0.0.0.0/0 ldap ldapserver="%s" ldapport=%d `+ `ldapscheme="%s" ldaptls=1 ldapbasedn="%s" ldapbinddn="%s" `+ `ldapbindpasswd="%s" ldapsearchfilter="%s" ldapsearchattribute="%s"`, diff --git a/pkg/management/postgres/logicalimport/database_test.go b/pkg/management/postgres/logicalimport/database_test.go index 426703e5d7..cb3a6eb63a 100644 --- a/pkg/management/postgres/logicalimport/database_test.go +++ b/pkg/management/postgres/logicalimport/database_test.go @@ -17,7 +17,6 @@ limitations under the License. package logicalimport import ( - "context" "fmt" "github.com/DATA-DOG/go-sqlmock" @@ -32,14 +31,12 @@ import ( var _ = Describe("databaseSnapshotter methods test", func() { var ( - ctx context.Context ds databaseSnapshotter fp fakePooler mock sqlmock.Sqlmock ) BeforeEach(func() { - ctx = context.TODO() ds = databaseSnapshotter{ cluster: &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -109,13 +106,13 @@ var _ = Describe("databaseSnapshotter methods test", func() { } }) - It("should execute the query properly", func() { + It("should execute the query properly", func(ctx SpecContext) { mock.ExpectExec(createQuery).WillReturnResult(sqlmock.NewResult(0, 0)) err := ds.executePostImportQueries(ctx, fp, "test") Expect(err).ToNot(HaveOccurred()) }) - It("should return any error encountered", func() { + It("should return any error encountered", func(ctx SpecContext) { expectedErr := fmt.Errorf("will fail") mock.ExpectExec(createQuery).WillReturnError(expectedErr) err := ds.executePostImportQueries(ctx, fp, "test") @@ -123,7 +120,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { }) }) - It("should run analyze", func() { + It("should run analyze", func(ctx SpecContext) { mock.ExpectExec("ANALYZE VERBOSE").WillReturnResult(sqlmock.NewResult(0, 0)) err := ds.analyze(ctx, fp, []string{"test"}) Expect(err).ToNot(HaveOccurred()) @@ -136,7 +133,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { expectedQuery = mock.ExpectQuery("SELECT extname FROM pg_extension WHERE oid >= 16384") }) - It("should drop the user-defined extensions successfully", func() { + It("should drop the user-defined extensions successfully", func(ctx SpecContext) { extensions := []string{"extension1", "extension2"} rows := sqlmock.NewRows([]string{"extname"}) @@ -150,7 +147,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should correctly handle an error when querying for extensions", func() { + It("should correctly handle an error when querying for extensions", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") expectedQuery.WillReturnError(expectedErr) @@ -158,7 +155,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(err).To(Equal(expectedErr)) }) - It("should correctly handle an error when dropping an extension", func() { + It("should correctly handle an error when dropping an extension", func(ctx SpecContext) { rows := sqlmock.NewRows([]string{"extname"}).AddRow("extension1") expectedQuery.WillReturnRows(rows) @@ -184,7 +181,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { } }) - It("should return the explicit database list if present", func() { + It("should return the explicit database list if present", func(ctx SpecContext) { explicitDatabaseList := []string{"db1", "db2"} ds.cluster.Spec.Bootstrap.InitDB.Import.Databases = explicitDatabaseList @@ -193,7 +190,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(dbs).To(Equal(explicitDatabaseList)) }) - It("should query for databases if explicit list is not present", func() { + It("should query for databases if explicit list is not present", func(ctx SpecContext) { expectedQuery := mock.ExpectQuery(query) ds.cluster.Spec.Bootstrap.InitDB.Import.Databases = []string{"*"} @@ -209,7 +206,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(dbs).To(Equal(queryDatabaseList)) }) - It("should return any error encountered when querying for databases", func() { + It("should return any error encountered when querying for databases", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") expectedQuery := mock.ExpectQuery(query) ds.cluster.Spec.Bootstrap.InitDB.Import.Databases = []string{"*"} diff --git a/pkg/management/postgres/logicalimport/role_test.go b/pkg/management/postgres/logicalimport/role_test.go index 4d2d70d940..a42fb001fd 100644 --- a/pkg/management/postgres/logicalimport/role_test.go +++ b/pkg/management/postgres/logicalimport/role_test.go @@ -17,7 +17,6 @@ limitations under the License. package logicalimport import ( - "context" "fmt" "github.com/DATA-DOG/go-sqlmock" @@ -35,7 +34,6 @@ var _ = Describe("", func() { "WHERE ur.oid >= 16384 AND um.oid >= 16384" var ( - ctx context.Context fp fakePooler mock sqlmock.Sqlmock ri []RoleInheritance @@ -43,7 +41,6 @@ var _ = Describe("", func() { ) BeforeEach(func() { - ctx = context.TODO() db, dbMock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) mock = dbMock @@ -66,7 +63,7 @@ var _ = Describe("", func() { Expect(expectationErr).ToNot(HaveOccurred()) }) - It("should clone role inheritance successfully", func() { + It("should clone role inheritance successfully", func(ctx SpecContext) { // Define the RoleInheritance result for getRoleInheritance ri := []RoleInheritance{ { @@ -95,7 +92,7 @@ var _ = Describe("", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should return any error encountered when getting role inheritance", func() { + It("should return any error encountered when getting role inheritance", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") mock.ExpectQuery(inhQuery).WillReturnError(expectedErr) @@ -103,7 +100,7 @@ var _ = Describe("", func() { Expect(err).To(Equal(expectedErr)) }) - It("should import role inheritance successfully", func() { + It("should import role inheritance successfully", func(ctx SpecContext) { query := fmt.Sprintf(`GRANT %s TO %s WITH ADMIN OPTION GRANTED BY %s`, pgx.Identifier{ri[0].RoleID}.Sanitize(), pgx.Identifier{ri[0].Member}.Sanitize(), @@ -117,7 +114,7 @@ var _ = Describe("", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should return the correct role inheritances", func() { + It("should return the correct role inheritances", func(ctx SpecContext) { mock.ExpectQuery(inhQuery). WillReturnRows(sqlmock.NewRows([]string{"roleid", "member", "admin_option", "grantor"}). AddRow("role1", "member1", true, "grantor1")) @@ -127,7 +124,7 @@ var _ = Describe("", func() { Expect(ris).To(Equal(ri)) }) - It("should return any error encountered when getting role inheritances", func() { + It("should return any error encountered when getting role inheritances", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") mock.ExpectQuery(inhQuery).WillReturnError(expectedErr) @@ -135,7 +132,7 @@ var _ = Describe("", func() { Expect(err).To(Equal(expectedErr)) }) - It("should return any error encountered when scanning the result", func() { + It("should return any error encountered when scanning the result", func(ctx SpecContext) { mock.ExpectQuery(inhQuery).WillReturnRows(sqlmock.NewRows([]string{"wrongColumnName"}).AddRow("role1")) _, err := rm.getRoleInheritance(ctx) diff --git a/pkg/management/postgres/logpipe/logpipe_test.go b/pkg/management/postgres/logpipe/logpipe_test.go index e05bd48708..70a4902872 100644 --- a/pkg/management/postgres/logpipe/logpipe_test.go +++ b/pkg/management/postgres/logpipe/logpipe_test.go @@ -17,7 +17,6 @@ limitations under the License. package logpipe import ( - "context" "errors" "os" "strings" @@ -39,9 +38,7 @@ func (writer *SpyRecordWriter) Write(record NamedRecord) { var _ = Describe("CSV file reader", func() { When("given CSV logs from logging_collector", func() { - ctx := context.TODO() - - It("can read multiple CSV lines", func() { + It("can read multiple CSV lines", func(ctx SpecContext) { f, err := os.Open("testdata/two_lines.csv") defer func() { _ = f.Close() @@ -57,7 +54,7 @@ var _ = Describe("CSV file reader", func() { Expect(spy.records).To(HaveLen(2)) }) - It("can read multiple CSV lines on PostgreSQL version <= 12", func() { + It("can read multiple CSV lines on PostgreSQL version <= 12", func(ctx SpecContext) { f, err := os.Open("testdata/two_lines_12.csv") defer func() { _ = f.Close() @@ -73,7 +70,7 @@ var _ = Describe("CSV file reader", func() { Expect(spy.records).To(HaveLen(2)) }) - It("can read multiple CSV lines on PostgreSQL version == 14", func() { + It("can read multiple CSV lines on PostgreSQL version == 14", func(ctx SpecContext) { f, err := os.Open("testdata/two_lines_14.csv") defer func() { _ = f.Close() @@ -89,7 +86,7 @@ var _ = Describe("CSV file reader", func() { Expect(spy.records).To(HaveLen(2)) }) - It("can read pgAudit CSV lines", func() { + It("can read pgAudit CSV lines", func(ctx SpecContext) { f, err := os.Open("testdata/pgaudit.csv") defer func() { _ = f.Close() @@ -110,7 +107,7 @@ var _ = Describe("CSV file reader", func() { Expect(err).ShouldNot(HaveOccurred()) input := strings.TrimRight(string(inputBuffer), " \n") - It("there are too many fields", func() { + It("there are too many fields", func(ctx SpecContext) { spy := SpyRecordWriter{} longerInput := input + ",test" @@ -128,7 +125,7 @@ var _ = Describe("CSV file reader", func() { Expect(extendedError.Fields).To(HaveLen(FieldsPerRecord13 + 1)) }) - It("there are not enough fields", func() { + It("there are not enough fields", func(ctx SpecContext) { spy := SpyRecordWriter{} shorterInput := "one,two,three" @@ -146,7 +143,7 @@ var _ = Describe("CSV file reader", func() { Expect(extendedError.Fields).To(HaveLen(3)) }) - It("there is a trailing comma", func() { + It("there is a trailing comma", func(ctx SpecContext) { spy := SpyRecordWriter{} trailingCommaInput := input + "," @@ -164,7 +161,7 @@ var _ = Describe("CSV file reader", func() { Expect(extendedError.Fields).To(HaveLen(FieldsPerRecord13 + 1)) }) - It("there is a wrong number of fields on a line that is not the first", func() { + It("there is a wrong number of fields on a line that is not the first", func(ctx SpecContext) { spy := SpyRecordWriter{} longerInput := input + "\none,two,three" @@ -183,7 +180,7 @@ var _ = Describe("CSV file reader", func() { }) }) - It("correctly handles an empty stream", func() { + It("correctly handles an empty stream", func(ctx SpecContext) { spy := SpyRecordWriter{} p := LogPipe{ record: &LoggingRecord{}, diff --git a/pkg/management/postgres/metrics/collector_test.go b/pkg/management/postgres/metrics/collector_test.go index 8ce178d225..f8399472b1 100644 --- a/pkg/management/postgres/metrics/collector_test.go +++ b/pkg/management/postgres/metrics/collector_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("Set default queries", func() { +var _ = Describe("Set default queries", Ordered, func() { q := NewQueriesCollector("test", nil, "db") It("does assign nothing with empty default queries", func() { diff --git a/pkg/management/postgres/restore_test.go b/pkg/management/postgres/restore_test.go index bd59455472..ad19c76453 100644 --- a/pkg/management/postgres/restore_test.go +++ b/pkg/management/postgres/restore_test.go @@ -17,7 +17,6 @@ limitations under the License. package postgres import ( - "context" "os" "path" @@ -44,13 +43,13 @@ var _ = Describe("testing restore InitInfo methods", func() { _ = fileutils.RemoveFile(tempDir) }) - It("should correctly restore a custom PgWal folder without data", func() { + It("should correctly restore a custom PgWal folder without data", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, PgWal: newPgWal, } - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeTrue()) @@ -59,7 +58,7 @@ var _ = Describe("testing restore InitInfo methods", func() { Expect(exists).To(BeTrue()) }) - It("should correctly migrate an existing wal folder to the new one", func() { + It("should correctly migrate an existing wal folder to the new one", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, PgWal: newPgWal, @@ -93,7 +92,7 @@ var _ = Describe("testing restore InitInfo methods", func() { }) By("executing the restore custom wal dir function", func() { - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeTrue()) }) @@ -120,7 +119,7 @@ var _ = Describe("testing restore InitInfo methods", func() { }) }) - It("should not do any changes if the symlink is already present", func() { + It("should not do any changes if the symlink is already present", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, PgWal: newPgWal, @@ -135,16 +134,16 @@ var _ = Describe("testing restore InitInfo methods", func() { err = os.Symlink(newPgWal, pgWal) Expect(err).ToNot(HaveOccurred()) - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeFalse()) }) - It("should not do any changes if pgWal is not set", func() { + It("should not do any changes if pgWal is not set", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, } - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeFalse()) }) diff --git a/pkg/management/postgres/webserver/suite_test.go b/pkg/management/postgres/webserver/suite_test.go deleted file mode 100644 index 34419aa9c8..0000000000 --- a/pkg/management/postgres/webserver/suite_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webserver - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestMetricsServer(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Postgres Webserver test suite") -} diff --git a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go index 4ca794faa8..05937ce383 100644 --- a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go +++ b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go @@ -184,7 +184,7 @@ var _ = Describe("Reconcile resource requests", func() { }) }) -var _ = Describe("PVC reconciliation", func() { +var _ = Describe("PVC reconciliation", Ordered, func() { const clusterName = "cluster-pvc-reconciliation" fetchPVC := func(cl client.Client, pvcToFetch corev1.PersistentVolumeClaim) corev1.PersistentVolumeClaim { diff --git a/pkg/reconciler/persistentvolumeclaim/resources_test.go b/pkg/reconciler/persistentvolumeclaim/resources_test.go index bbccc10553..791f70e30a 100644 --- a/pkg/reconciler/persistentvolumeclaim/resources_test.go +++ b/pkg/reconciler/persistentvolumeclaim/resources_test.go @@ -17,8 +17,6 @@ limitations under the License. package persistentvolumeclaim import ( - "context" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,7 +29,7 @@ import ( ) var _ = Describe("PVC detection", func() { - It("will list PVCs with Jobs or Pods or which are Ready", func() { + It("will list PVCs with Jobs or Pods or which are Ready", func(ctx SpecContext) { clusterName := "myCluster" makeClusterPVC := func(serial string, isResizing bool) corev1.PersistentVolumeClaim { return makePVC(clusterName, serial, serial, NewPgDataCalculator(), isResizing) @@ -48,7 +46,7 @@ var _ = Describe("PVC detection", func() { }, } EnrichStatus( - context.TODO(), + ctx, cluster, []corev1.Pod{ makePod(clusterName, "1", specs.ClusterRoleLabelPrimary), diff --git a/pkg/resources/retry_test.go b/pkg/resources/retry_test.go index 04ae25cb90..7379c2905c 100644 --- a/pkg/resources/retry_test.go +++ b/pkg/resources/retry_test.go @@ -17,8 +17,6 @@ limitations under the License. package resources import ( - "context" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -41,11 +39,9 @@ var _ = Describe("RetryWithRefreshedResource", func() { var ( fakeClient client.Client testResource *appsv1.Deployment - ctx context.Context ) BeforeEach(func() { - ctx = context.TODO() fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).Build() testResource = &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, @@ -70,7 +66,7 @@ var _ = Describe("RetryWithRefreshedResource", func() { }) Context("when client.Get succeeds", func() { - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { // Set up the fake client to return the resource without error Expect(fakeClient.Create(ctx, testResource)).To(Succeed()) @@ -80,7 +76,7 @@ var _ = Describe("RetryWithRefreshedResource", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should invoke the callback without error and update the resource", func() { + It("should invoke the callback without error and update the resource", func(ctx SpecContext) { // ensure that the local deployment contains the old value Expect(*testResource.Spec.Replicas).To(Equal(int32(1))) diff --git a/pkg/utils/discovery_test.go b/pkg/utils/discovery_test.go index 6407a9e423..8652c3a445 100644 --- a/pkg/utils/discovery_test.go +++ b/pkg/utils/discovery_test.go @@ -43,8 +43,13 @@ var _ = DescribeTable("Kubernetes minor version detection", ) var _ = Describe("Detect resources properly when", func() { - client := fakeClient.NewSimpleClientset() - fakeDiscovery := client.Discovery().(*discoveryFake.FakeDiscovery) + var client *fakeClient.Clientset + var fakeDiscovery *discoveryFake.FakeDiscovery + + BeforeEach(func() { + client = fakeClient.NewSimpleClientset() + fakeDiscovery = client.Discovery().(*discoveryFake.FakeDiscovery) + }) It("should not detect PodMonitor resource", func() { exists, err := PodMonitorExist(client.Discovery()) diff --git a/pkg/utils/logs/logs_test.go b/pkg/utils/logs/logs_test.go index 8631c1c998..5d93c21cbc 100644 --- a/pkg/utils/logs/logs_test.go +++ b/pkg/utils/logs/logs_test.go @@ -125,10 +125,9 @@ var _ = Describe("Pod logging tests", func() { Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) }) - It("can follow pod logs", func() { + It("can follow pod logs", func(ctx SpecContext) { client := fake.NewSimpleClientset(pod) var logBuffer bytes.Buffer - ctx := context.TODO() var wait sync.WaitGroup wait.Add(1) go func() { From 2e90e9d5edd56dc8f3761e488255c7b43e287cb2 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Thu, 24 Oct 2024 16:00:31 +0200 Subject: [PATCH 10/10] docs: fix minor issues (#5903) * Mark all omitempty fields in the API as optional, so they won't appear as required in the documentation * Fix broken links to structs now in the barman-cloud API * Make Database resource appear in the API resource types Closes #5904 Signed-off-by: Francesco Canovai Signed-off-by: Gabriele Quaresima Co-authored-by: Gabriele Quaresima --- api/v1/backup_types.go | 4 + api/v1/cluster_types.go | 20 ++++ api/v1/common_types.go | 1 + api/v1/database_types.go | 4 + api/v1/pooler_types.go | 1 + api/v1/scheduledbackup_types.go | 1 + docs/config.yaml | 10 +- docs/src/backup_barmanobjectstore.md | 8 +- docs/src/cloudnative-pg.v1.md | 144 +++++++++++++++------------ docs/src/wal_archiving.md | 4 +- 10 files changed, 124 insertions(+), 73 deletions(-) diff --git a/api/v1/backup_types.go b/api/v1/backup_types.go index 9c3e49d2ae..f8a01fb1d3 100644 --- a/api/v1/backup_types.go +++ b/api/v1/backup_types.go @@ -182,6 +182,7 @@ type BackupSnapshotElementStatus struct { // TablespaceName is the name of the snapshotted tablespace. Only set // when type is PG_TABLESPACE + // +optional TablespaceName string `json:"tablespaceName,omitempty"` } @@ -285,9 +286,11 @@ type BackupStatus struct { Method BackupMethod `json:"method,omitempty"` // Whether the backup was online/hot (`true`) or offline/cold (`false`) + // +optional Online *bool `json:"online,omitempty"` // A map containing the plugin metadata + // +optional PluginMetadata map[string]string `json:"pluginMetadata,omitempty"` } @@ -333,6 +336,7 @@ type BackupList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional metav1.ListMeta `json:"metadata,omitempty"` // List of backups Items []Backup `json:"items"` diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 87171977b8..2fc34c4926 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -382,6 +382,7 @@ type ClusterSpec struct { // EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral // volumes + // +optional EphemeralVolumesSizeLimit *EphemeralVolumesSizeLimitConfiguration `json:"ephemeralVolumesSizeLimit,omitempty"` // Name of the priority class which will be used in every generated Pod, if the PriorityClass @@ -473,6 +474,7 @@ type ClusterSpec struct { // The plugins configuration, containing // any plugin to be loaded with the corresponding configuration + // +optional Plugins PluginConfigurationList `json:"plugins,omitempty"` } @@ -547,9 +549,11 @@ const ( // storage type EphemeralVolumesSizeLimitConfiguration struct { // Shm is the size limit of the shared memory volume + // +optional Shm *resource.Quantity `json:"shm,omitempty"` // TemporaryData is the size limit of the temporary data volume + // +optional TemporaryData *resource.Quantity `json:"temporaryData,omitempty"` } @@ -712,6 +716,7 @@ type ClusterStatus struct { // LastPromotionToken is the last verified promotion token that // was used to promote a replica cluster + // +optional LastPromotionToken string `json:"lastPromotionToken,omitempty"` // How many PVCs have been created by this cluster @@ -849,6 +854,7 @@ type ClusterStatus struct { Image string `json:"image,omitempty"` // PluginStatus is the status of the loaded plugins + // +optional PluginStatus []PluginStatus `json:"pluginStatus,omitempty"` // SwitchReplicaClusterStatus is the status of the switch to replica cluster @@ -967,10 +973,12 @@ type PgBouncerIntegrationStatus struct { type ReplicaClusterConfiguration struct { // Self defines the name of this cluster. It is used to determine if this is a primary // or a replica cluster, comparing it with `primary` + // +optional Self string `json:"self,omitempty"` // Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the // topology specified in externalClusters + // +optional Primary string `json:"primary,omitempty"` // The name of the external cluster which is the replication origin @@ -981,10 +989,12 @@ type ReplicaClusterConfiguration struct { // existing cluster. Replica cluster can be created from a recovery // object store or via streaming through pg_basebackup. // Refer to the Replica clusters page of the documentation for more information. + // +optional Enabled *bool `json:"enabled,omitempty"` // A demotion token generated by an external cluster used to // check if the promotion requirements are met. + // +optional PromotionToken string `json:"promotionToken,omitempty"` // When replica mode is enabled, this parameter allows you to replay @@ -992,6 +1002,7 @@ type ReplicaClusterConfiguration struct { // time past the commit time. This provides an opportunity to correct // data loss errors. Note that when this parameter is set, a promotion // token cannot be used. + // +optional MinApplyDelay *metav1.Duration `json:"minApplyDelay,omitempty"` } @@ -2005,6 +2016,7 @@ type ManagedServices struct { // +optional DisabledDefaultServices []ServiceSelectorType `json:"disabledDefaultServices,omitempty"` // Additional is a list of additional managed services specified by the user. + // +optional Additional []ManagedService `json:"additional,omitempty"` } @@ -2018,6 +2030,7 @@ type ManagedService struct { // UpdateStrategy describes how the service differences should be reconciled // +kubebuilder:default:="patch" + // +optional UpdateStrategy ServiceUpdateStrategy `json:"updateStrategy,omitempty"` // ServiceTemplate is the template specification for the service. @@ -2047,6 +2060,7 @@ type PluginConfiguration struct { Enabled *bool `json:"enabled,omitempty"` // Parameters is the configuration of the plugin + // +optional Parameters map[string]string `json:"parameters,omitempty"` } @@ -2061,21 +2075,26 @@ type PluginStatus struct { // Capabilities are the list of capabilities of the // plugin + // +optional Capabilities []string `json:"capabilities,omitempty"` // OperatorCapabilities are the list of capabilities of the // plugin regarding the reconciler + // +optional OperatorCapabilities []string `json:"operatorCapabilities,omitempty"` // WALCapabilities are the list of capabilities of the // plugin regarding the WAL management + // +optional WALCapabilities []string `json:"walCapabilities,omitempty"` // BackupCapabilities are the list of capabilities of the // plugin regarding the Backup management + // +optional BackupCapabilities []string `json:"backupCapabilities,omitempty"` // Status contain the status reported by the plugin through the SetStatusInCluster interface + // +optional Status string `json:"status,omitempty"` } @@ -2204,6 +2223,7 @@ type ClusterList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional metav1.ListMeta `json:"metadata,omitempty"` // List of clusters Items []Cluster `json:"items"` diff --git a/api/v1/common_types.go b/api/v1/common_types.go index fb5144ae5b..b87e009b23 100644 --- a/api/v1/common_types.go +++ b/api/v1/common_types.go @@ -25,6 +25,7 @@ const VolumeSnapshotKind = "VolumeSnapshot" // not using the core data types. type Metadata struct { // The name of the resource. Only supported for certain types + // +optional Name string `json:"name,omitempty"` // Map of string keys and values that can be used to organize and categorize diff --git a/api/v1/database_types.go b/api/v1/database_types.go index 243285dcbd..1089a97957 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -132,13 +132,17 @@ type DatabaseStatus struct { ObservedGeneration int64 `json:"observedGeneration,omitempty"` // Ready is true if the database was reconciled correctly + // +optional Ready bool `json:"ready,omitempty"` // Error is the reconciliation error message + // +optional Error string `json:"error,omitempty"` } +// +genclient // +kubebuilder:object:root=true +// +kubebuilder:storageversion // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" diff --git a/api/v1/pooler_types.go b/api/v1/pooler_types.go index 5fc3bdb9d1..b3f06fcbf2 100644 --- a/api/v1/pooler_types.go +++ b/api/v1/pooler_types.go @@ -260,6 +260,7 @@ type Pooler struct { // PoolerList contains a list of Pooler type PoolerList struct { metav1.TypeMeta `json:",inline"` + // +optional metav1.ListMeta `json:"metadata,omitempty"` Items []Pooler `json:"items"` } diff --git a/api/v1/scheduledbackup_types.go b/api/v1/scheduledbackup_types.go index 1929db5d95..b89248c49c 100644 --- a/api/v1/scheduledbackup_types.go +++ b/api/v1/scheduledbackup_types.go @@ -125,6 +125,7 @@ type ScheduledBackupList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional metav1.ListMeta `json:"metadata,omitempty"` // List of clusters Items []ScheduledBackup `json:"items"` diff --git a/docs/config.yaml b/docs/config.yaml index aa77638cf6..54ecf6e949 100644 --- a/docs/config.yaml +++ b/docs/config.yaml @@ -1,5 +1,6 @@ hiddenMemberFields: - "TypeMeta" + - "synchronizeReplicasCache" externalPackages: - match: ^github\.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1\.RelabelConfig$ @@ -31,7 +32,14 @@ externalPackages: hideTypePatterns: - "ParseError$" - - "List$" + # We cannot exclude all `List$` because we declare PluginConfigurationList + - "BackupList$" + - "ClusterList$" + - "ClusterImageCatalogList$" + - "DatabaseList$" + - "ImageCatalogList$" + - "PoolerList$" + - "ScheduledBackupList$" markdownDisabled: false diff --git a/docs/src/backup_barmanobjectstore.md b/docs/src/backup_barmanobjectstore.md index 34b907e0ae..5859966af3 100644 --- a/docs/src/backup_barmanobjectstore.md +++ b/docs/src/backup_barmanobjectstore.md @@ -96,9 +96,9 @@ algorithms via `barman-cloud-backup` (for backups) and * snappy The compression settings for backups and WALs are independent. See the -[DataBackupConfiguration](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-DataBackupConfiguration) and -[WALBackupConfiguration](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-WalBackupConfiguration) sections in -the API reference. +[DataBackupConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#DataBackupConfiguration) and +[WALBackupConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#WalBackupConfiguration) sections in +the barman-cloud API reference. It is important to note that archival time, restore time, and size change between the algorithms, so the compression algorithm should be chosen according @@ -198,4 +198,4 @@ spec: additionalCommandArgs: - "--max-concurrency=1" - "--read-timeout=60" -``` \ No newline at end of file +``` diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index f5678cb22e..7af6805e2e 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -9,6 +9,7 @@ - [Backup](#postgresql-cnpg-io-v1-Backup) - [Cluster](#postgresql-cnpg-io-v1-Cluster) - [ClusterImageCatalog](#postgresql-cnpg-io-v1-ClusterImageCatalog) +- [Database](#postgresql-cnpg-io-v1-Database) - [ImageCatalog](#postgresql-cnpg-io-v1-ImageCatalog) - [Pooler](#postgresql-cnpg-io-v1-Pooler) - [ScheduledBackup](#postgresql-cnpg-io-v1-ScheduledBackup) @@ -118,6 +119,44 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- +## Database {#postgresql-cnpg-io-v1-Database} + + + +

Database is the Schema for the databases API

+ + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.cnpg.io/v1
kind [Required]
string
Database
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+DatabaseSpec +
+

Specification of the desired Database. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
status
+DatabaseStatus +
+

Most recently observed status of the Database. This data may not be up to +date. Populated by the system. Read-only. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ ## ImageCatalog {#postgresql-cnpg-io-v1-ImageCatalog} @@ -492,7 +531,7 @@ plugin for this backup

Type is tho role of the snapshot in the cluster, such as PG_DATA, PG_WAL and PG_TABLESPACE

-tablespaceName [Required]
+tablespaceName
string @@ -809,14 +848,14 @@ parameter is omitted

The backup method being used

-online [Required]
+online
bool

Whether the backup was online/hot (true) or offline/cold (false)

-pluginMetadata [Required]
+pluginMetadata
map[string]string @@ -1638,7 +1677,7 @@ https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information.

-ephemeralVolumesSizeLimit [Required]
+ephemeralVolumesSizeLimit
EphemeralVolumesSizeLimitConfiguration @@ -1769,7 +1808,7 @@ advisable for any PostgreSQL cluster employed for development/staging purposes.

-plugins [Required]
+plugins
PluginConfigurationList @@ -1872,7 +1911,7 @@ any plugin to be loaded with the corresponding configuration

during a switchover or a failover

-lastPromotionToken [Required]
+lastPromotionToken
string @@ -2107,7 +2146,7 @@ This field is reported when .spec.failoverDelay is populated or dur

Image contains the image name used by the pods

-pluginStatus [Required]
+pluginStatus
[]PluginStatus @@ -2199,42 +2238,6 @@ PostgreSQL cluster from an existing storage

-## Database {#postgresql-cnpg-io-v1-Database} - - - -

Database is the Schema for the databases API

- - - - - - - - - - - - - - - -
FieldDescription
metadata [Required]
-meta/v1.ObjectMeta -
- No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
-DatabaseSpec -
-

Specification of the desired Database. -More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

-
status
-DatabaseStatus -
-

Most recently observed status of the Database. This data may not be up to -date. Populated by the system. Read-only. -More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

-
- ## DatabaseReclaimPolicy {#postgresql-cnpg-io-v1-DatabaseReclaimPolicy} (Alias of `string`) @@ -2438,14 +2441,14 @@ database is not valid

desired state that was synchronized

-ready [Required]
+ready
bool

Ready is true if the database was reconciled correctly

-error [Required]
+error
string @@ -2514,14 +2517,14 @@ storage

- - - - - - - - -
FieldDescription
shm [Required]
+
shm
k8s.io/apimachinery/pkg/api/resource.Quantity

Shm is the size limit of the shared memory volume

temporaryData [Required]
+
temporaryData
k8s.io/apimachinery/pkg/api/resource.Quantity
@@ -3063,7 +3066,7 @@ It includes the type of service and its associated template specification.

Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services.

updateStrategy [Required]
+
updateStrategy
ServiceUpdateStrategy
@@ -3102,7 +3105,7 @@ Valid values are "rw", "r", and "ro", representing Valid values are "r", and "ro", representing read, and read-only services.

additional [Required]
+
additional
[]ManagedService
@@ -3133,7 +3136,7 @@ not using the core data types.

-
FieldDescription
name [Required]
+
name
string
@@ -3478,6 +3481,21 @@ the operator calls PgBouncer's PAUSE and RESUME comman
+## PluginConfigurationList {#postgresql-cnpg-io-v1-PluginConfigurationList} + +(Alias of `[]github.com/cloudnative-pg/cloudnative-pg/api/v1.PluginConfiguration`) + +**Appears in:** + +- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) + + +

PluginConfigurationList represent a set of plugin with their +configuration parameters

+ + + + ## PluginStatus {#postgresql-cnpg-io-v1-PluginStatus} @@ -3507,7 +3525,7 @@ the operator calls PgBouncer's PAUSE and RESUME comman latest reconciliation loop

capabilities [Required]
+
capabilities
[]string
@@ -3515,7 +3533,7 @@ latest reconciliation loop

plugin

operatorCapabilities [Required]
+
operatorCapabilities
[]string
@@ -3523,7 +3541,7 @@ plugin

plugin regarding the reconciler

walCapabilities [Required]
+
walCapabilities
[]string
@@ -3531,7 +3549,7 @@ plugin regarding the reconciler

plugin regarding the WAL management

backupCapabilities [Required]
+
backupCapabilities
[]string
@@ -3539,7 +3557,7 @@ plugin regarding the WAL management

plugin regarding the Backup management

status [Required]
+
status
string
@@ -4045,7 +4063,7 @@ cluster

- - - - - - - -
FieldDescription
self [Required]
+
self
string
@@ -4053,7 +4071,7 @@ cluster

or a replica cluster, comparing it with primary

primary [Required]
+
primary
string
@@ -4068,7 +4086,7 @@ topology specified in externalClusters

The name of the external cluster which is the replication origin

enabled [Required]
+
enabled
bool
@@ -4078,7 +4096,7 @@ object store or via streaming through pg_basebackup. Refer to the Replica clusters page of the documentation for more information.

promotionToken [Required]
+
promotionToken
string
@@ -4086,7 +4104,7 @@ Refer to the Replica clusters page of the documentation for more information.

minApplyDelay [Required]
+
minApplyDelay
meta/v1.Duration
@@ -4900,12 +4918,6 @@ physical replication slots

List of regular expression patterns to match the names of replication slots to be excluded (by default empty)

- [Required]
-synchronizeReplicasCache -
- No description provided.
diff --git a/docs/src/wal_archiving.md b/docs/src/wal_archiving.md index bc67b13757..1f7b60e0c7 100644 --- a/docs/src/wal_archiving.md +++ b/docs/src/wal_archiving.md @@ -13,8 +13,8 @@ the ["Backup on object stores" section](backup_barmanobjectstore.md) to set up the WAL archive. !!! Info - Please refer to [`BarmanObjectStoreConfiguration`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-BarmanObjectStoreConfiguration) - in the API reference for a full list of options. + Please refer to [`BarmanObjectStoreConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration) + in the barman-cloud API for a full list of options. If required, you can choose to compress WAL files as soon as they are uploaded and/or encrypt them: