From b8a7fc980de5ec71a82002a835558e04b1b7e7ae Mon Sep 17 00:00:00 2001 From: cnp-autobot <85171364+cnp-autobot@users.noreply.github.com> Date: Thu, 25 Apr 2024 10:58:26 +0000 Subject: [PATCH 1/4] [create-pull-request] automated change --- .../docs/postgres_for_kubernetes/1/addons.mdx | 16 + .../postgres_for_kubernetes/1/bootstrap.mdx | 10 +- .../1/cluster_conf.mdx | 2 +- .../1/connection_pooling.mdx | 28 + .../1/container_images.mdx | 35 +- .../1/declarative_hibernation.mdx | 2 +- .../1/default-monitoring.yaml | 1 + .../1/failure_modes.mdx | 6 +- .../1/image_catalog.mdx | 110 ++++ .../docs/postgres_for_kubernetes/1/index.mdx | 2 - .../1/installation_upgrade.mdx | 183 +++--- .../1/kubectl-plugin.mdx | 547 ++---------------- .../1/kubernetes_upgrade.mdx | 145 +++-- .../1/labels_annotations.mdx | 8 +- .../postgres_for_kubernetes/1/monitoring.mdx | 2 +- .../1/operator_capability_levels.mdx | 7 +- .../postgres_for_kubernetes/1/pg4k.v1.mdx | 416 ++++++++++++- .../1/postgresql_conf.mdx | 6 +- .../1/replica_cluster.mdx | 54 +- .../postgres_for_kubernetes/1/replication.mdx | 103 +++- .../1/rolling_update.mdx | 2 + .../postgres_for_kubernetes/1/samples.mdx | 5 + .../cluster-example-bis-restore-cr.yaml | 26 + .../samples/cluster-example-bis-restore.yaml | 43 ++ .../1/samples/cluster-example-bis.yaml | 29 + .../1/samples/cluster-example-catalog.yaml | 24 + .../1/samples/cluster-example-full.yaml | 2 +- .../1/samples/pooler-external.yaml | 21 + .../postgres_for_kubernetes/1/scheduling.mdx | 2 +- .../postgres_for_kubernetes/1/security.mdx | 3 +- .../1/ssl_connections.mdx | 2 +- .../docs/postgres_for_kubernetes/1/tde.mdx | 12 +- .../1/troubleshooting.mdx | 4 +- .../1/wal_archiving.mdx | 2 +- 34 files changed, 1144 insertions(+), 716 deletions(-) create mode 100644 product_docs/docs/postgres_for_kubernetes/1/image_catalog.mdx create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis-restore-cr.yaml create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis-restore.yaml create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis.yaml create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-catalog.yaml create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/pooler-external.yaml diff --git a/product_docs/docs/postgres_for_kubernetes/1/addons.mdx b/product_docs/docs/postgres_for_kubernetes/1/addons.mdx index ec049e6fdf9..1f95ab6f6c4 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/addons.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/addons.mdx @@ -76,6 +76,7 @@ to be defined as a YAML object having the following keys: - `electedResourcesDecorators` - `excludedResourcesDecorators` +- `excludedResourcesSelector` - `backupInstanceDecorators` - `preBackupHookConfiguration` - `postBackupHookConfiguration` @@ -107,6 +108,12 @@ will be placed on every excluded pod and PVC. Each element of the array must have the same fields as the `electedResourcesDecorators` section above. +#### The `excludedResourcesSelector` section + +This section selects Pods and PVCs that are applied to the +`excludedResourcesDecorators`. It accepts a [label selector rule](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) +as value. When empty, all the Pods and every PVC that is not elected will be excluded. + #### The `backupInstanceDecorators` section This section allows you to configure an array of labels and/or annotations that @@ -188,6 +195,7 @@ data: - key: "app.example.com/elected" metadataType: "label" value: "true" + excludedResourcesSelector: app=xyz,env=prod excludedResourcesDecorators: - key: "app.example.com/excluded" metadataType: "label" @@ -239,6 +247,7 @@ metadata: - key: "app.example.com/elected" metadataType: "label" value: "true" + excludedResourcesSelector: app=xyz,env=prod excludedResourcesDecorators: - key: "app.example.com/excluded" metadataType: "label" @@ -342,6 +351,13 @@ excludedResourcesDecorators: metadataType: "annotation" value: "Not necessary for backup" +# A LabelSelector containing the labels being used to filter Pods +# and PVCs to decorate with excludedResourcesDecorators. +# It accepts a label selector rule as value. +# See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +# When empty, all the Pods and every PVC that is not elected will be excluded. +excludedResourcesSelector: app=xyz,env=prod + # An array of labels and/or annotations that will be placed # on the instance pod that's been selected for the backup by # the operator and which contains the hooks. diff --git a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx index 9bced338a72..6194e5f30c6 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx @@ -530,7 +530,7 @@ file on the source PostgreSQL instance: host replication streaming_replica all md5 ``` -The following manifest creates a new PostgreSQL 16.1 cluster, +The following manifest creates a new PostgreSQL 16.2 cluster, called `target-db`, using the `pg_basebackup` bootstrap method to clone an external PostgreSQL cluster defined as `source-db` (in the `externalClusters` array). As you can see, the `source-db` @@ -545,7 +545,7 @@ metadata: name: target-db spec: instances: 3 - imageName: quay.io/enterprisedb/postgresql:16.1 + imageName: quay.io/enterprisedb/postgresql:16.2 bootstrap: pg_basebackup: @@ -565,7 +565,7 @@ spec: ``` All the requirements must be met for the clone operation to work, including -the same PostgreSQL version (in our case 16.1). +the same PostgreSQL version (in our case 16.2). #### TLS certificate authentication @@ -580,7 +580,7 @@ in the same Kubernetes cluster. This example can be easily adapted to cover an instance that resides outside the Kubernetes cluster. -The manifest defines a new PostgreSQL 16.1 cluster called `cluster-clone-tls`, +The manifest defines a new PostgreSQL 16.2 cluster called `cluster-clone-tls`, which is bootstrapped using the `pg_basebackup` method from the `cluster-example` external cluster. The host is identified by the read/write service in the same cluster, while the `streaming_replica` user is authenticated @@ -595,7 +595,7 @@ metadata: name: cluster-clone-tls spec: instances: 3 - imageName: quay.io/enterprisedb/postgresql:16.1 + imageName: quay.io/enterprisedb/postgresql:16.2 bootstrap: pg_basebackup: diff --git a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx index 8b550eb893d..0a515fb9465 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx @@ -50,7 +50,7 @@ EDB Postgres for Kubernetes relies on [ephemeral volumes](https://kubernetes.io/ for part of the internal activities. Ephemeral volumes exist for the sole duration of a pod's life, without persisting across pod restarts. -### Volume Claim Template for Temporary Storage +# Volume Claim Template for Temporary Storage The operator uses by default an `emptyDir` volume, which can be customized by using the `.spec.ephemeralVolumesSizeLimit field`. This can be overridden by specifying a volume claim template in the `.spec.ephemeralVolumeSource` field. diff --git a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx index 57531ecd032..b2ac5abdc19 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx @@ -258,6 +258,34 @@ spec: memory: 500Mi ``` +## Service Template + +Sometimes, your pooler will require some different labels, annotations, or even change +the type of the service, you can achieve that by using the `serviceTemplate` field: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Pooler +metadata: + name: pooler-example-rw +spec: + cluster: + name: cluster-example + instances: 3 + type: rw + serviceTemplate: + metadata: + labels: + app: pooler + spec: + type: LoadBalancer + pgbouncer: + poolMode: session + parameters: + max_client_conn: "1000" + default_pool_size: "10" +``` + ## High availability (HA) Because of Kubernetes' deployments, you can configure your pooler to run on a diff --git a/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx b/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx index 5fad160a6d6..689f6f2d8e6 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx @@ -43,24 +43,35 @@ for EDB Postgres for Kubernetes, and publishes them on ## Image tag requirements -While the image name can be anything valid for Docker, the EDB Postgres for Kubernetes -operator relies on the *image tag* to detect the Postgres major -version contained in the image. +Certainly! Here's an improved version: -The image tag must start with a valid PostgreSQL major version number (e.g. -14.5 or 15) optionally followed by a dot and the patch level. +## Image Tag Requirements -This can be followed by any character combination that is valid and +To ensure the operator makes informed decisions, it must accurately detect the +PostgreSQL major version. This detection can occur in two ways: + +1. Utilizing the `major` field of the `imageCatalogRef`, if defined. +2. Auto-detecting the major version from the image tag of the `imageName` if + not explicitly specified. + +For auto-detection to work, the image tag must adhere to a specific format. It +should commence with a valid PostgreSQL major version number (e.g., 15.6 or +16), optionally followed by a dot and the patch level. + +Following this, the tag can include any character combination valid and accepted in a Docker tag, preceded by a dot, an underscore, or a minus sign. Examples of accepted image tags: -- `11.1` -- `12.3.2.1-1` -- `12.4` -- `13` -- `14.5-10` -- `15.0` +- `12.1` +- `13.3.2.1-1` +- `13.4` +- `14` +- `15.5-10` +- `16.0` !!! Warning `latest` is not considered a valid tag for the image. + +!!! Note + Image tag requirements do no apply for images defined in a catalog. diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx index ef3d5664ee7..5b56275699d 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx @@ -61,7 +61,7 @@ $ kubectl cnp status Cluster Summary Name: cluster-example Namespace: default -PostgreSQL Image: quay.io/enterprisedb/postgresql:16.1 +PostgreSQL Image: quay.io/enterprisedb/postgresql:16.2 Primary instance: cluster-example-2 Status: Cluster in healthy state Instances: 3 diff --git a/product_docs/docs/postgres_for_kubernetes/1/default-monitoring.yaml b/product_docs/docs/postgres_for_kubernetes/1/default-monitoring.yaml index bc2a4fa4877..309f6fd341a 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/default-monitoring.yaml +++ b/product_docs/docs/postgres_for_kubernetes/1/default-monitoring.yaml @@ -202,6 +202,7 @@ data: description: "Time at which these statistics were last reset" pg_stat_bgwriter: + runonserver: "<17.0.0" query: | SELECT checkpoints_timed , checkpoints_req diff --git a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx index a1aab1641cf..24771b9e34e 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx @@ -8,7 +8,7 @@ PostgreSQL can face on a Kubernetes cluster during its lifetime. !!! Important In case the failure scenario you are experiencing is not covered by this - section, please immediately contact EDB for support and assistance. + section, please immediately seek for [professional support](https://cloudnative-pg.io/support/). !!! Seealso "Postgres instance manager" Please refer to the ["Postgres instance manager" section](instance_manager.md) @@ -175,8 +175,8 @@ In the case of undocumented failure, it might be necessary to intervene to solve the problem manually. !!! Important - In such cases, please do not perform any manual operation without the - support and assistance of EDB engineering team. + In such cases, please do not perform any manual operation without + [professional support](https://cloudnative-pg.io/support/). From version 1.11.0 of the operator, you can use the `k8s.enterprisedb.io/reconciliationLoop` annotation to temporarily disable the diff --git a/product_docs/docs/postgres_for_kubernetes/1/image_catalog.mdx b/product_docs/docs/postgres_for_kubernetes/1/image_catalog.mdx new file mode 100644 index 00000000000..b14443967df --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/image_catalog.mdx @@ -0,0 +1,110 @@ +--- +title: 'Image Catalog' +originalFilePath: 'src/image_catalog.md' +--- + +`ImageCatalog` and `ClusterImageCatalog` are essential resources that empower +you to define images for creating a `Cluster`. + +The key distinction lies in their scope: an `ImageCatalog` is namespaced, while +a `ClusterImageCatalog` is cluster-scoped. + +Both share a common structure, comprising a list of images, each equipped with +a `major` field indicating the major version of the image. + +!!! Warning + The operator places trust in the user-defined major version and refrains + from conducting any PostgreSQL version detection. It is the user's + responsibility to ensure alignment between the declared major version in + the catalog and the PostgreSQL image. + +The `major` field's value must remain unique within a catalog, preventing +duplication across images. Distinct catalogs, however, may +expose different images under the same `major` value. + +**Example of a Namespaced `ImageCatalog`:** + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: ImageCatalog +metadata: + name: postgresql + namespace: default +spec: + images: + - major: 15 + image: quay.io/enterprisedb/postgresql:15.6 + - major: 16 + image: quay.io/enterprisedb/postgresql:16.2 +``` + +**Example of a Cluster-Wide Catalog using `ClusterImageCatalog` Resource:** + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: ClusterImageCatalog +metadata: + name: postgresql +spec: + images: + - major: 15 + image: quay.io/enterprisedb/postgresql:15.6 + - major: 16 + image: quay.io/enterprisedb/postgresql:16.2 +``` + +A `Cluster` resource has the flexibility to reference either an `ImageCatalog` +or a `ClusterImageCatalog` to precisely specify the desired image. + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + imageCatalogRef: + apiGroup: postgresql.k8s.enterprisedb.io + kind: ImageCatalog + name: postgresql + major: 16 + storage: + size: 1Gi +``` + +Clusters utilizing these catalogs maintain continuous monitoring. +Any alterations to the images within a catalog trigger automatic updates for +**all associated clusters** referencing that specific entry. + +## EDB Postgres for Kubernetes Catalogs + +The EDB Postgres for Kubernetes project maintains `ClusterImageCatalogs` for the images it +provides. These catalogs are regularly updated with the latest images for each +major version. By applying the `ClusterImageCatalog.yaml` file from the +EDB Postgres for Kubernetes project's GitHub repositories, cluster administrators can ensure +that their clusters are automatically updated to the latest version within the +specified major release. + +### PostgreSQL Container Images + +You can install the +[latest version of the cluster catalog for the PostgreSQL Container Images](https://raw.githubusercontent.com/cloudnative-pg/postgres-containers/main/Debian/ClusterImageCatalog.yaml) +([cloudnative-pg/postgres-containers](https://github.com/enterprisedb/docker-postgres) repository) +with: + +```shell +kubectl apply \ + -f https://raw.githubusercontent.com/cloudnative-pg/postgres-containers/main/Debian/ClusterImageCatalog.yaml +``` + +### PostGIS Container Images + +You can install the +[latest version of the cluster catalog for the PostGIS Container Images](https://raw.githubusercontent.com/cloudnative-pg/postgis-containers/main/PostGIS/ClusterImageCatalog.yaml) +([cloudnative-pg/postgis-containers](https://github.com/cloudnative-pg/postgis-containers) repository) +with: + +```shell +kubectl apply \ + -f https://raw.githubusercontent.com/cloudnative-pg/postgis-containers/main/PostGIS/ClusterImageCatalog.yaml +``` diff --git a/product_docs/docs/postgres_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/index.mdx index 7ddf1e5649b..c9d73b9b8d0 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/index.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/index.mdx @@ -80,8 +80,6 @@ and OpenShift. It is designed, developed, and supported by EDB and covers the full lifecycle of a highly available Postgres database clusters with a primary/standby architecture, using native streaming replication. -EDB Postgres for Kubernetes was made generally available on February 4, 2021. Earlier versions were made available to selected customers prior to the GA release. - !!! Note The operator has been renamed from Cloud Native PostgreSQL. Existing users diff --git a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx index 30b0aad876c..038fc04854a 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx @@ -19,12 +19,12 @@ The operator can be installed using the provided [Helm chart](https://github.com The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.22.2.yaml) +You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.23.0.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://get.enterprisedb.io/cnp/postgresql-operator-1.22.2.yaml + https://get.enterprisedb.io/cnp/postgresql-operator-1.23.0.yaml ``` You can verify that with: @@ -84,7 +84,7 @@ specific minor release, you can just run: ```sh curl -sSfL \ - https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.22/manifests/operator-manifest.yaml | \ + https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.23/manifests/operator-manifest.yaml | \ kubectl apply --server-side -f - ``` @@ -159,10 +159,6 @@ plane for self-managed Kubernetes installations). before performing an upgrade as some versions might require extra steps. -!!! Warning - If you are upgrading to version 1.20, please read carefully - the [dedicated section below](#upgrading-to-120-from-a-previous-minor-version). - Upgrading EDB Postgres for Kubernetes operator is a two-step process: 1. upgrade the controller and the related Kubernetes resources @@ -250,54 +246,51 @@ When versions are not directly upgradable, the old version needs to be removed before installing the new one. This won't affect user data but only the operator itself. -### Upgrading to 1.22 from a previous minor version +### Upgrading to 1.23.0, 1.22.3 or 1.21.5 !!! Important - If you are transitioning from a prior minor version to version 1.22, please - ensure that you are using the latest available patch version, which is - currently 1.22.2. This guarantees that you benefit from the most recent bug - fixes, security updates, and improvements associated with the 1.22 series. + We encourage all existing users of EDB Postgres for Kubernetes to upgrade to version + 1.23.0 or at least to the latest stable version of the minor release you are + currently using (namely 1.22.2 or 1.21.4). !!! Warning Every time you are upgrading to a higher minor release, make sure you go through the release notes and upgrade instructions of all the intermediate minor releases. For example, if you want to move - from 1.20.x to 1.22, make sure you go through the release notes - and upgrade instructions for 1.21 and 1.22. + from 1.21.x to 1.23, make sure you go through the release notes + and upgrade instructions for 1.22 and 1.23. -EDB Postgres for Kubernetes continues to adhere to the security-by-default approach. As of -version 1.22, the usage of the `ALTER SYSTEM` command is now disabled by -default. +#### User defined replication slots -The reason behind this choice is to ensure that, by default, changes to the -PostgreSQL configuration in a database cluster controlled by EDB Postgres for Kubernetes are -allowed only through the Kubernetes API. +EDB Postgres for Kubernetes now offers automated synchronization of all replication slots +defined on the primary to any standby within the High Availability (HA) +cluster. -At the same time, we are providing an option to enable `ALTER SYSTEM` if you -need to use it, even temporarily, from versions 1.22.0, 1.21.2, and 1.20.5, -by setting `.spec.postgresql.enableAlterSystem` to `true`, as in the following -excerpt: +If you manually manage replication slots on a standby, it is essential to +exclude those replication slots from synchronization. Failure to do so may +result in EDB Postgres for Kubernetes removing them from the standby. To implement this +exclusion, utilize the following YAML configuration. In this example, +replication slots with a name starting with 'foo' are prevented from +synchronization: ```yaml ... - postgresql: - enableAlterSystem: true -... + replicationSlots: + synchronizeReplicas: + enabled: true + excludePatterns: + - "^foo" ``` -Clusters in 1.22 will have `enableAlterSystem` set to `false` by default. -If you want to retain the existing behavior, in 1.22, you need to explicitly -set `enableAlterSystem` to `true` as shown above. +Alternatively, if you prefer to disable the synchronization mechanism entirely, +use the following configuration: -In versions 1.21.2 and 1.20.5, and later patch releases in the 1.20 and 1.21 -branches, `enableAlterSystem` will be set to `true` by default, keeping with -the existing behavior. If you don't need to use `ALTER SYSTEM`, we recommend -that you set `enableAlterSystem` explicitly to `false`. - -!!! Important - You can set the desired value for `enableAlterSystem` immediately - following your upgrade to version 1.22.0, 1.21.2, or 1.20.5, as shown in - the example above. +```yaml +... + replicationSlots: + synchronizeReplicas: + enabled: false +``` #### Server-side apply of manifests @@ -325,6 +318,42 @@ Henceforth, `kube-apiserver` will be automatically acknowledged as a recognized manager for the CRDs, eliminating the need for any further manual intervention on this matter. +### Upgrading to 1.22 from a previous minor version + +EDB Postgres for Kubernetes continues to adhere to the security-by-default approach. As of +version 1.22, the usage of the `ALTER SYSTEM` command is now disabled by +default. + +The reason behind this choice is to ensure that, by default, changes to the +PostgreSQL configuration in a database cluster controlled by EDB Postgres for Kubernetes are +allowed only through the Kubernetes API. + +At the same time, we are providing an option to enable `ALTER SYSTEM` if you +need to use it, even temporarily, from versions 1.22.0, 1.21.2, and 1.20.5, +by setting `.spec.postgresql.enableAlterSystem` to `true`, as in the following +excerpt: + +```yaml +... + postgresql: + enableAlterSystem: true +... +``` + +Clusters in 1.22 will have `enableAlterSystem` set to `false` by default. +If you want to retain the existing behavior, in 1.22, you need to explicitly +set `enableAlterSystem` to `true` as shown above. + +In versions 1.21.2 and 1.20.5, and later patch releases in the 1.20 and 1.21 +branches, `enableAlterSystem` will be set to `true` by default, keeping with +the existing behavior. If you don't need to use `ALTER SYSTEM`, we recommend +that you set `enableAlterSystem` explicitly to `false`. + +!!! Important + You can set the desired value for `enableAlterSystem` immediately + following your upgrade to version 1.22.0, 1.21.2, or 1.20.5, as shown in + the example above. + ### Upgrading to 1.21 from a previous minor version With the goal to keep improving out-of-the-box the *convention over @@ -498,79 +527,3 @@ spec: ... smartShutdownTimeout: 15 ``` - -### Upgrading to 1.20 from a previous minor version - -EDB Postgres for Kubernetes 1.20 introduces some changes from previous versions of the -operator in the default behavior of a few features, with the goal to improve -resilience and usability of a Postgres cluster out of the box, through -convention over configuration. - -!!! Important - These changes all involve cases where at least one replica is present, and - **only affect new `Cluster` resources**. - -#### Backup from a standby - -[Backup from a standby](backup.md#backup-from-a-standby) -was introduced in EDB Postgres for Kubernetes 1.19, but disabled by default - meaning that -the base backup is taken from the primary unless the target is explicitly -set to prefer standby. - -From version 1.20, if one or more replicas are available, the operator -will prefer the most aligned standby to take a full base backup. - -If you are upgrading your EDB Postgres for Kubernetes deployment to 1.20 and are concerned that -this feature might impact your production environment for the new `Cluster` resources -that you create, you can explicitly set the target to the primary by adding the -following line to all your `Cluster` resources: - -```yaml -spec: - ... - backup: - target: "primary" -``` - -#### Restart of a primary after a rolling update - -[Automated rolling updates](rolling_update.md#automated-updates-unsupervised) -have been always available in EDB Postgres for Kubernetes, and by default they update the -primary after having performed a switchover to the most aligned replica. - -From version 1.20, we are changing the default update method -of the primary from switchover to restart as, in most cases, this is -the fastest and safest way. - -If you are upgrading your EDB Postgres for Kubernetes deployment to 1.20 and are concerned that -this feature might impact your production environment for the new `Cluster` -resources that you create, you can explicitly set the update method of the -primary to switchover by adding the following line to all your `Cluster` -resources: - -```yaml -spec: - ... - primaryUpdateMethod: switchover -``` - -#### Replication slots for High Availability - -[Replication slots for High Availability](replication.md#replication-slots-for-high-availability) -were introduced in EDB Postgres for Kubernetes in version 1.18, but disabled by default. - -Version 1.20 prepares the ground for enabling this feature by default in any -future release, as replication slots enhance the resilience and robustness of a -High Availability cluster. - -For future compatibility, if you already know that your environments won't ever -need replication slots, our recommendation is that you explicitly disable their -management by adding from now the following lines to your `Cluster` resources: - -```yaml -spec: - ... - replicationSlots: - highAvailability: - enabled: false -``` diff --git a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx index b2b010faac1..e1982245b0a 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx @@ -34,67 +34,52 @@ them in your systems. #### Debian packages -For example, let's install the 1.22.2 release of the plugin, for an Intel based +For example, let's install the 1.18.1 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.22.2/kubectl-cnp_1.22.2_linux_x86_64.deb +$ wget https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.18.1/kubectl-cnp_1.18.1_linux_x86_64.deb ``` Then, install from the local file using `dpkg`: ```sh -dpkg -i kubectl-cnp_1.22.2_linux_x86_64.deb -__OUTPUT__ +$ dpkg -i kubectl-cnp_1.18.1_linux_x86_64.deb (Reading database ... 16102 files and directories currently installed.) -Preparing to unpack kubectl-cnp_1.22.2_linux_x86_64.deb ... -Unpacking cnp (1.22.2) over (1.22.2) ... -Setting up cnp (1.22.2) ... +Preparing to unpack kubectl-cnp_1.18.1_linux_x86_64.deb ... +Unpacking cnp (1.18.1) over (1.18.1) ... +Setting up cnp (1.18.1) ... ``` #### RPM packages -As in the example for `.deb` packages, let's install the 1.22.2 release for an +As in the example for `.deb` packages, let's install the 1.18.1 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. -``` sh -curl -L https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.22.2/kubectl-cnp_1.22.2_linux_x86_64.rpm \ - --output kube-plugin.rpm +```sh +curl -L https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.18.1/kubectl-cnp_1.18.1_linux_x86_64.rpm --output cnp-plugin.rpm ``` Then install with `yum`, and you're ready to use: ```sh -yum --disablerepo=* localinstall kube-plugin.rpm -__OUTPUT__ +$ yum --disablerepo=* localinstall cnp-plugin.rpm +yum --disablerepo=* localinstall cnp-plugin.rpm +Failed to set locale, defaulting to C.UTF-8 Dependencies resolved. -======================================================================================================================== - Package Architecture Version Repository Size -======================================================================================================================== +==================================================================================================== + Package Architecture Version Repository Size +==================================================================================================== Installing: - kubectl-cnp x86_64 1.22.2-1 @commandline 17 M + cnpg x86_64 1.18.1-1 @commandline 14 M Transaction Summary -======================================================================================================================== +==================================================================================================== Install 1 Package -Total size: 17 M -Installed size: 62 M +Total size: 14 M +Installed size: 43 M Is this ok [y/N]: y -Downloading Packages: -Running transaction check -Transaction check succeeded. -Running transaction test -Transaction test succeeded. -Running transaction - Preparing : 1/1 - Installing : kubectl-cnp-1.22.2-1.x86_64 1/1 - Verifying : kubectl-cnp-1.22.2-1.x86_64 1/1 - -Installed: - kubectl-cnp-1.22.2-1.x86_64 - -Complete! ``` ### Supported Architectures @@ -117,29 +102,6 @@ operating system and architectures: - arm 5/6/7 - arm64 -### Configuring auto-completion - -To configure [auto-completion](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_completion/) for the plugin, a helper shell script needs to be -installed into your current PATH. Assuming the latter contains `/usr/local/bin`, -this can be done with the following commands: - -```shell -cat > kubectl_complete-cnp <..` format (e.g. `1.22.2`). The default empty value installs the version of the operator that matches the version of the plugin. +- `--version`: minor version of the operator to be installed, such as `1.17`. + If a minor version is specified, the plugin will install the latest patch + version of that minor version. If no version is supplied the plugin will + install the latest `MAJOR.MINOR.PATCH` version of the operator. - `--watch-namespace`: comma separated string containing the namespaces to watch (by default all namespaces) @@ -175,7 +140,7 @@ will install the operator, is as follows: ```shell kubectl cnp install generate \ -n king \ - --version 1.22.2 \ + --version 1.17 \ --replicas 3 \ --watch-namespace "albert, bb, freddie" \ > operator.yaml @@ -184,9 +149,9 @@ kubectl cnp install generate \ The flags in the above command have the following meaning: - `-n king` install the CNP operator into the `king` namespace -- `--version 1.22.2` install operator version 1.22.2 +- `--version 1.17` install the latest patch version for minor version 1.17 - `--replicas 3` install the operator with 3 replicas -- `--watch-namespace "albert, bb, freddie"` have the operator watch for +- `--watch-namespaces "albert, bb, freddie"` have the operator watch for changes in the `albert`, `bb` and `freddie` namespaces only ### Status @@ -222,7 +187,7 @@ Cluster in healthy state Name: sandbox Namespace: default System ID: 7039966298120953877 -PostgreSQL Image: quay.io/enterprisedb/postgresql:16.2 +PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3 Primary instance: sandbox-2 Instances: 3 Ready instances: 3 @@ -267,7 +232,7 @@ Cluster in healthy state Name: sandbox Namespace: default System ID: 7039966298120953877 -PostgreSQL Image: quay.io/enterprisedb/postgresql:16.2 +PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3 Primary instance: sandbox-2 Instances: 3 Ready instances: 3 @@ -757,89 +722,6 @@ items: "apiVersion": "postgresql.k8s.enterprisedb.io/v1", ``` -### Logs - -The `kubectl cnp logs` command allows to follow the logs of a collection -of pods related to EDB Postgres for Kubernetes in a single go. - -It has at the moment one available sub-command: `cluster`. - -#### Cluster logs - -The `cluster` sub-command gathers all the pod logs for a cluster in a single -stream or file. -This means that you can get all the pod logs in a single terminal window, with a -single invocation of the command. - -As in all the cnp plugin sub-commands, you can get instructions and help with -the `-h` flag: - -`kubectl cnp logs cluster -h` - -The `logs` command will display logs in JSON-lines format, unless the -`--timestamps` flag is used, in which case, a human readable timestamp will be -prepended to each line. In this case, lines will no longer be valid JSON, -and tools such as `jq` may not work as desired. - -If the `logs cluster` sub-command is given the `-f` flag (aka `--follow`), it -will follow the cluster pod logs, and will also watch for any new pods created -in the cluster after the command has been invoked. -Any new pods found, including pods that have been restarted or re-created, -will also have their pods followed. -The logs will be displayed in the terminal's standard-out. -This command will only exit when the cluster has no more pods left, or when it -is interrupted by the user. - -If `logs` is called without the `-f` option, it will read the logs from all -cluster pods until the time of invocation and display them in the terminal's -standard-out, then exit. -The `-o` or `--output` flag can be provided, to specify the name -of the file where the logs should be saved, instead of displaying over -standard-out. -The `--tail` flag can be used to specify how many log lines will be retrieved -from each pod in the cluster. By default, the `logs cluster` sub-command will -display all the logs from each pod in the cluster. If combined with the "follow" -flag `-f`, the number of logs specified by `--tail` will be retrieved until the -current time, and and from then the new logs will be followed. - -NOTE: unlike other `cnp` plugin commands, the `-f` is used to denote "follow" -rather than specify a file. This keeps with the convention of `kubectl logs`, -which takes `-f` to mean the logs should be followed. - -Usage: - -```shell -kubectl cnp logs cluster [flags] -``` - -Using the `-f` option to follow: - -```shell -kubectl cnp report cluster cluster-example -f -``` - -Using `--tail` option to display 3 lines from each pod and the `-f` option -to follow: - -```shell -kubectl cnp report cluster cluster-example -f --tail 3 -``` - -``` json -{"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] LOG: ending log output to stderr","source":"/controller/log/postgres","logging_pod":"cluster-example-3"} -{"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] HINT: Future log output will go to log destination \"csvlog\".","source":"/controller/log/postgres","logging_pod":"cluster-example-3"} -… -… -``` - -With the `-o` option omitted, and with `--output` specified: - -``` sh -kubectl cnp logs cluster cluster-example --output my-cluster.log - -Successfully written logs to "my-cluster.log" -``` - ### Destroy The `kubectl cnp destroy` command helps remove an instance and all the @@ -944,16 +826,11 @@ kubectl cnp fio -n Refer to the [Benchmarking fio section](benchmarking.md#fio) for more details. -### Requesting a new physical backup +### Requesting a new base backup The `kubectl cnp backup` command requests a new physical base backup for an existing Postgres cluster by creating a new `Backup` resource. -!!! Info - From release 1.21, the `backup` command accepts a new flag, `-m` - to specify the backup method. - To request a backup using volume snapshots, set `-m volumeSnapshot` - The following example requests an on-demand backup for a given cluster: ```shell @@ -967,17 +844,10 @@ kubectl cnp backup cluster-example backup/cluster-example-20230121002300 created ``` -By default, a newly created backup will use the backup target policy defined -in the cluster to choose which instance to run on. -However, you can override this policy with the `--backup-target` option. - -In the case of volume snapshot backups, you can also use the `--online` option -to request an online/hot backup or an offline/cold one: additionally, you can -also tune online backups by explicitly setting the `--immediate-checkpoint` and -`--wait-for-archive` options. - -The ["Backup" section](./backup.md#backup) contains more information about -the configuration settings. +By default, new created backup will use the backup target policy defined +in cluster to choose which instance to run on. You can also use `--backup-target` +option to override this policy. please refer to [Backup and Recovery](backup_recovery.md) +for more information about backup target. ### Launching psql @@ -992,7 +862,7 @@ it from the actual pod. This means that you will be using the `postgres` user. ```shell kubectl cnp psql cluster-example -psql (16.2 (Debian 16.2-1.pgdg110+1)) +psql (15.3) Type "help" for help. postgres=# @@ -1003,7 +873,7 @@ select to work against a replica by using the `--replica` option: ```shell kubectl cnp psql --replica cluster-example -psql (16.2 (Debian 16.2-1.pgdg110+1)) +psql (15.3) Type "help" for help. @@ -1031,335 +901,44 @@ kubectl cnp psql cluster-example -- -U postgres ### Snapshotting a Postgres cluster -!!! Warning - The `kubectl cnp snapshot` command has been removed. - Please use the [`backup` command](#requesting-a-new-physical-backup) to request - backups using volume snapshots. - -### Using pgAdmin4 for evaluation/demonstration purposes only - -[pgAdmin](https://www.pgadmin.org/) stands as the most popular and feature-rich -open-source administration and development platform for PostgreSQL. -For more information on the project, please refer to the official -[documentation](https://www.pgadmin.org/docs/). - -Given that the pgAdmin Development Team maintains official Docker container -images, you can install pgAdmin in your environment as a standard -Kubernetes deployment. - -!!! Important - Deployment of pgAdmin in Kubernetes production environments is beyond the - scope of this document and, more broadly, of the EDB Postgres for Kubernetes project. - -However, **for the purposes of demonstration and evaluation**, EDB Postgres for Kubernetes -offers a suitable solution. The `cnp` plugin implements the `pgadmin4` -command, providing a straightforward method to connect to a given database -`Cluster` and navigate its content in a local environment such as `kind`. - -For example, you can install a demo deployment of pgAdmin4 for the -`cluster-example` cluster as follows: - -```sh -kubectl cnp pgadmin4 cluster-example -``` - -This command will produce: - -```output -ConfigMap/cluster-example-pgadmin4 created -Deployment/cluster-example-pgadmin4 created -Service/cluster-example-pgadmin4 created -Secret/cluster-example-pgadmin4 created - -[...] -``` - -After deploying pgAdmin, forward the port using kubectl and connect -through your browser by following the on-screen instructions. - -![Screenshot of desktop installation of pgAdmin](images/pgadmin4.png) +The `kubectl cnp snapshot` creates consistent snapshots of a Postgres +`Cluster` by: -As usual, you can use the `--dry-run` option to generate the YAML file: - -```sh -kubectl cnp pgadmin4 --dry-run cluster-example -``` - -pgAdmin4 can be installed in either desktop or server mode, with the default -being server. - -In `server` mode, authentication is required using a randomly generated password, -and users must manually specify the database to connect to. - -On the other hand, `desktop` mode initiates a pgAdmin web interface without -requiring authentication. It automatically connects to the `app` database as the -`app` user, making it ideal for quick demos, such as on a local deployment using -`kind`: - -```sh -kubectl cnp pgadmin4 --mode desktop cluster-example -``` - -After concluding your demo, ensure the termination of the pgAdmin deployment by -executing: - -```sh -kubectl cnp pgadmin4 --dry-run cluster-example | kubectl delete -f - -``` - -!!! Warning - Never deploy pgAdmin in production using the plugin. - -### Logical Replication Publications - -The `cnp publication` command group is designed to streamline the creation and -removal of [PostgreSQL logical replication publications](https://www.postgresql.org/docs/current/logical-replication-publication.html). -Be aware that these commands are primarily intended for assisting in the -creation of logical replication publications, particularly on remote PostgreSQL -databases. +1. choosing a replica Pod to work on +2. fencing the replica +3. taking the snapshot +4. unfencing the replica !!! Warning - It is crucial to have a solid understanding of both the capabilities and - limitations of PostgreSQL's native logical replication system before using - these commands. - In particular, be mindful of the [logical replication restrictions](https://www.postgresql.org/docs/current/logical-replication-restrictions.html). - -#### Creating a new publication - -To create a logical replication publication, use the `cnp publication create` -command. The basic structure of this command is as follows: + A cluster already having a fenced instance cannot be snapshotted. -```sh -kubectl cnp publication create \ - --publication \ - [--external-cluster ] - [options] -``` +At the moment, this command can be used only for clusters having at least one +replica: that replica will be shut down by the fencing procedure to ensure the +snapshot to be consistent (cold backup). As the development of +declarative support for Kubernetes' `VolumeSnapshot` API continues, +this limitation will be removed, allowing you to take online backups +as business continuity requires. -There are two primary use cases: - -- With `--external-cluster`: Use this option to create a publication on an - external cluster (i.e. defined in the `externalClusters` stanza). The commands - will be issued from the ``, but the publication will be for the - data in ``. - -- Without `--external-cluster`: Use this option to create a publication in the - `` PostgreSQL `Cluster` (by default, the `app` database). - -!!! Warning - When connecting to an external cluster, ensure that the specified user has - sufficient permissions to execute the `CREATE PUBLICATION` command. - -You have several options, similar to the [`CREATE PUBLICATION`](https://www.postgresql.org/docs/current/sql-createpublication.html) -command, to define the group of tables to replicate. Notable options include: - -- If you specify the `--all-tables` option, you create a publication `FOR ALL TABLES`. -- Alternatively, you can specify multiple occurrences of: - - `--table`: Add a specific table (with an expression) to the publication. - - `--schema`: Include all tables in the specified database schema (available - from PostgreSQL 15). - -The `--dry-run` option enables you to preview the SQL commands that the plugin -will execute. - -For additional information and detailed instructions, type the following -command: - -```sh -kubectl cnp publication create --help -``` - -##### Example - -Given a `source-cluster` and a `destination-cluster`, we would like to create a -publication for the data on `source-cluster`. -The `destination-cluster` has an entry in the `externalClusters` stanza pointing -to `source-cluster`. - -We can run: - -``` sh -kubectl cnp publication create destination-cluster \ - --external-cluster=source-cluster --all-tables -``` - -which will create a publication for all tables on `source-cluster`, running -the SQL commands on the `destination-cluster`. - -Or instead, we can run: - -``` sh -kubectl cnp publication create source-cluster \ - --publication=app --all-tables -``` - -which will create a publication named `app` for all the tables in the -`source-cluster`, running the SQL commands on the source cluster. - -!!! Info - There are two sample files that have been provided for illustration and inspiration: - [logical-source](../samples/cluster-example-logical-source.yaml) and - [logical-destination](../samples/cluster-example-logical-destination.yaml). - -#### Dropping a publication - -The `cnp publication drop` command seamlessly complements the `create` command -by offering similar key options, including the publication name, cluster name, -and an optional external cluster. You can drop a `PUBLICATION` with the -following command structure: - -```sh -kubectl cnp publication drop \ - --publication \ - [--external-cluster ] - [options] -``` - -To access further details and precise instructions, use the following command: - -```sh -kubectl cnp publication drop --help -``` - -### Logical Replication Subscriptions - -The `cnp subscription` command group is a dedicated set of commands designed -to simplify the creation and removal of -[PostgreSQL logical replication subscriptions](https://www.postgresql.org/docs/current/logical-replication-subscription.html). -These commands are specifically crafted to aid in the establishment of logical -replication subscriptions, especially when dealing with remote PostgreSQL -databases. - -!!! Warning - Before using these commands, it is essential to have a comprehensive - understanding of both the capabilities and limitations of PostgreSQL's - native logical replication system. - In particular, be mindful of the [logical replication restrictions](https://www.postgresql.org/docs/current/logical-replication-restrictions.html). - -In addition to subscription management, we provide a helpful command for -synchronizing all sequences from the source cluster. While its applicability -may vary, this command can be particularly useful in scenarios involving major -upgrades or data import from remote servers. - -#### Creating a new subscription - -To create a logical replication subscription, use the `cnp subscription create` -command. The basic structure of this command is as follows: - -```sh -kubectl cnp subscription create \ - --subscription \ - --publication \ - --external-cluster \ - [options] -``` - -This command configures a subscription directed towards the specified -publication in the designated external cluster, as defined in the -`externalClusters` stanza of the ``. - -For additional information and detailed instructions, type the following -command: - -```sh -kubectl cnp subscription create --help -``` - -##### Example - -As in the section on publications, we have a `source-cluster` and a -`destination-cluster`, and we have already created a publication called -`app`. - -The following command: - -``` sh -kubectl cnp subscription create destination-cluster \ - --external-cluster=source-cluster \ - --publication=app --subscription=app -``` - -will create a subscription for `app` on the destination cluster. - -!!! Warning - Prioritize testing subscriptions in a non-production environment to ensure - their effectiveness and identify any potential issues before implementing them - in a production setting. - -!!! Info - There are two sample files that have been provided for illustration and inspiration: - [logical-source](../samples/cluster-example-logical-source.yaml) and - [logical-destination](../samples/cluster-example-logical-destination.yaml). - -#### Dropping a subscription - -The `cnp subscription drop` command seamlessly complements the `create` command. -You can drop a `SUBSCRIPTION` with the following command structure: - -```sh -kubectl cnp subcription drop \ - --subscription \ - [options] -``` - -To access further details and precise instructions, use the following command: - -```sh -kubectl cnp subscription drop --help -``` - -#### Synchronizing sequences - -One notable constraint of PostgreSQL logical replication, implemented through -publications and subscriptions, is the lack of sequence synchronization. This -becomes particularly relevant when utilizing logical replication for live -database migration, especially to a higher version of PostgreSQL. A crucial -step in this process involves updating sequences before transitioning -applications to the new database (*cutover*). - -To address this limitation, the `cnp subscription sync-sequences` command -offers a solution. This command establishes a connection with the source -database, retrieves all relevant sequences, and subsequently updates local -sequences with matching identities (based on database schema and sequence -name). - -You can use the command as shown below: +!!! Important + Even if the procedure will shut down a replica, the primary + Pod will not be involved. -```sh -kubectl cnp subscription sync-sequences \ - --subscription \ - -``` +The `kubectl cnp snapshot` command requires the cluster name: -For comprehensive details and specific instructions, utilize the following -command: +```shell +kubectl cnp snapshot cluster-example -```sh -kubectl cnp subscription sync-sequences --help +waiting for cluster-example-3 to be fenced +waiting for VolumeSnapshot cluster-example-3-1682539624 to be ready to use +unfencing pod cluster-example-3 ``` -##### Example +The `VolumeSnapshot` resource will be created with an empty +`VolumeSnapshotClass` reference. That resource is intended by be used by the +`VolumeSnapshotClass` configured as default. -As in the previous sections for publication and subscription, we have -a `source-cluster` and a `destination-cluster`. The publication and the -subscription, both called `app`, are already present. +A specific `VolumeSnapshotClass` can be requested via the `-c` option: -The following command will synchronize the sequences involved in the -`app` subscription, from the source cluster into the destination cluster. - -``` sh -kubectl cnp subscription sync-sequences destination-cluster \ - --subscription=app +```shell +kubectl cnp snapshot cluster-example -c longhorn ``` - -!!! Warning - Prioritize testing subscriptions in a non-production environment to - guarantee their effectiveness and detect any potential issues before deploying - them in a production setting. - -## Integration with K9s - -The `cnp` plugin can be easily integrated in [K9s](https://k9scli.io/), a -popular terminal-based UI to interact with Kubernetes clusters. - -See [`k9s/plugins.yml`](../samples/k9s/plugins.yml) for details. diff --git a/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx index 321a8dd6b29..956e2e2813a 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx @@ -1,57 +1,112 @@ --- -title: 'Kubernetes Upgrade' +title: 'Kubernetes Upgrade and Maintenance' originalFilePath: 'src/kubernetes_upgrade.md' --- -Kubernetes clusters must be kept updated. This becomes even more -important if you are self-managing your Kubernetes clusters, especially -on **bare metal**. - -Planning and executing regular updates is a way for your organization -to clean up the technical debt and reduce the business risks, despite -the introduction in your Kubernetes infrastructure of controlled -downtimes that temporarily take out a node from the cluster for -maintenance reasons (recommended reading: +Maintaining an up-to-date Kubernetes cluster is crucial for ensuring optimal +performance and security, particularly for self-managed clusters, especially +those running on bare metal infrastructure. Regular updates help address +technical debt and mitigate business risks, despite the controlled downtimes +associated with temporarily removing a node from the cluster for maintenance +purposes. For further insights on embracing risk in operations, refer to the ["Embracing Risk"](https://landing.google.com/sre/sre-book/chapters/embracing-risk/) -from the Site Reliability Engineering book). +chapter from the Site Reliability Engineering book. + +## Importance of Regular Updates -For example, you might need to apply security updates on the Linux -servers where Kubernetes is installed, or to replace a malfunctioning -hardware component such as RAM, CPU, or RAID controller, or even upgrade -the cluster to the latest version of Kubernetes. +Updating Kubernetes involves planning and executing maintenance tasks, such as +applying security updates to underlying Linux servers, replacing malfunctioning +hardware components, or upgrading the cluster to the latest Kubernetes version. +These activities are essential for maintaining a robust and secure +infrastructure. -Usually, maintenance operations in a cluster are performed one node -at a time by: +## Maintenance Operations in a Cluster -1. evicting the workloads from the node to be updated (`drain`) -2. performing the actual operation (for example, system update) -3. re-joining the node to the cluster (`uncordon`) +Typically, maintenance operations are carried out on one node at a time, following a [structured process](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/): -The above process requires workloads to be either stopped for the -entire duration of the upgrade or migrated to another node. +1. eviction of workloads (`drain`): workloads are gracefully moved away from + the node to be updated, ensuring a smooth transition. +2. performing the operation: the actual maintenance operation, such as a + system update or hardware replacement, is executed. +3. rejoining the node to the cluster (`uncordon`): the updated node is + reintegrated into the cluster, ready to resume its responsibilities. -While the latest case is the expected one in terms of service -reliability and self-healing capabilities of Kubernetes, there can -be situations where it is advised to operate with a temporarily -degraded cluster and wait for the upgraded node to be up again. +This process requires either stopping workloads for the entire upgrade duration +or migrating them to other nodes in the cluster. -In particular, if your PostgreSQL cluster relies on **node-local storage** -\- that is *storage which is local to the Kubernetes worker node where -the PostgreSQL database is running*. -Node-local storage (or simply *local storage*) is used to enhance performance. +## Temporary PostgreSQL Cluster Degradation + +While the standard approach ensures service reliability and leverages +Kubernetes' self-healing capabilities, there are scenarios where operating with +a temporarily degraded cluster may be acceptable. This is particularly relevant +for PostgreSQL clusters relying on **node-local storage**, where the storage is +local to the Kubernetes worker node running the PostgreSQL database. Node-local +storage, or simply *local storage*, is employed to enhance performance. !!! Note - If your database files are on shared storage over the network, - you may not need to define a maintenance window. If the volumes currently - used by the pods can be reused by pods running on different nodes after - the drain, the default self-healing behavior of the operator will work - fine (you can then skip the rest of this section). - -When using local storage for PostgreSQL, you are advised to temporarily -put the cluster in **maintenance mode** through the `nodeMaintenanceWindow` -option to avoid standard self-healing procedures to kick in, -while, for example, enlarging the partition on the physical node or -updating the node itself. + If your database files reside on shared storage accessible over the + network, the default self-healing behavior of the operator can efficiently + handle scenarios where volumes are reused by pods on different nodes after a + drain operation. In such cases, you can skip the remaining sections of this + document. + +## Pod Disruption Budgets + +By default, EDB Postgres for Kubernetes safeguards Postgres cluster operations. If a node is +to be drained and contains a cluster's primary instance, a switchover happens +ahead of the drain. Once the instance in the node is downgraded to replica, the +draining can resume. +For single-instance clusters, a switchover is not possible, so EDB Postgres for Kubernetes +will prevent draining the node where the instance is housed. +Additionally, in multi-instance clusters, EDB Postgres for Kubernetes guarantees that only +one replica at a time is gracefully shut down during a drain operation. + +Each PostgreSQL `Cluster` is equipped with two associated `PodDisruptionBudget` +resources - you can easily confirm it with the `kubectl get pdb` command. + +Our recommendation is to leave pod disruption budgets enabled for every +production Postgres cluster. This can be effortlessly managed by toggling the +`.spec.enablePDB` option, as detailed in the +[API reference](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-ClusterSpec). + +## PostgreSQL Clusters used for Development or Testing + +For PostgreSQL clusters used for development purposes, often consisting of +a single instance, it is essential to disable pod disruption budgets. Failure +to do so will prevent the node hosting that cluster from being drained. + +The following example illustrates how to disable pod disruption budgets for a +1-instance development cluster: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: dev +spec: + instances: 1 + enablePDB: false + + storage: + size: 1Gi +``` + +This configuration ensures smoother maintenance procedures without restrictions +on draining the node during development activities. + +## Node Maintenance Window + +!!! Important + While EDB Postgres for Kubernetes will continue supporting the node maintenance window, + it is currently recommended to transition to direct control of pod disruption + budgets, as explained in the previous section. This section is retained + mainly for backward compatibility. + +Prior to release 1.23, EDB Postgres for Kubernetes had just one declarative mechanism to manage +Kubernetes upgrades when dealing with local storage: you had to temporarily put +the cluster in **maintenance mode** through the `nodeMaintenanceWindow` option +to avoid standard self-healing procedures to kick in, while, for example, +enlarging the partition on the physical node or updating the node itself. !!! Warning Limit the duration of the maintenance window to the shortest @@ -90,7 +145,13 @@ reusePVC disabled: see section below. Don't be afraid: it refers to another volume internally used by the operator - not the PostgreSQL data directory. -## Single instance clusters with `reusePVC` set to `false` +!!! Important + `PodDisruptionBudget` management can be disabled by setting the + `.spec.enablePDB` field to `false`. In that case, the operator won't + create `PodDisruptionBudgets` and will delete them if they were + previously created. + +### Single instance clusters with `reusePVC` set to `false` !!! Important We recommend to always create clusters with more diff --git a/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx b/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx index 9bb673bd5ab..55805a60e80 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx @@ -71,7 +71,8 @@ These predefined labels are managed by EDB Postgres for Kubernetes. instead `k8s.enterprisedb.io/podRole` -: Role of the pod: `instance`, or `pooler` +: Distinguishes pods dedicated to pooler deployment from those used for + database instances `k8s.enterprisedb.io/poolerName` : Name of the PgBouncer pooler @@ -85,12 +86,15 @@ instead `role` - **deprecated** : Whether the instance running in a pod is a `primary` or a `replica`. - This label is deprecated, you should use `k8s.enterprisedb.io/podRole` instead. + This label is deprecated, you should use `k8s.enterprisedb.io/instanceRole` instead. `k8s.enterprisedb.io/scheduled-backup` : When available, name of the `ScheduledBackup` resource that created a given `Backup` object +`k8s.enterprisedb.io/instanceRole` +: Whether the instance running in a pod is a `primary` or a `replica`. + ## Predefined annotations These predefined annotations are managed by EDB Postgres for Kubernetes. diff --git a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx index b06db08c8f4..4dca75edd84 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx @@ -176,7 +176,7 @@ cnp_collector_up{cluster="cluster-example"} 1 # HELP cnp_collector_postgres_version Postgres version # TYPE cnp_collector_postgres_version gauge -cnp_collector_postgres_version{cluster="cluster-example",full="16.1"} 16.1 +cnp_collector_postgres_version{cluster="cluster-example",full="16.2"} 16.2 # HELP cnp_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp # TYPE cnp_collector_last_failed_backup_timestamp gauge diff --git a/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx b/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx index a28a1b1fba9..83a061bf6b4 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx @@ -64,6 +64,10 @@ primary/standby architecture directly by setting the `imageName` attribute in the CR. The operator also supports `imagePullSecrets` to access private container registries, and it supports digests and tags for finer control of container image immutability. +If you prefer not to specify an image name, you can leverage +[image catalogs](image_catalog.md) by simply referencing the PostgreSQL +major version. Moreover, image catalogs enable you to effortlessly create +custom catalogs, directing to images based on your specific requirements. ### Labels and annotations @@ -115,7 +119,8 @@ switchover operations. EDB Postgres for Kubernetes manages replication slots for all the replicas in the HA cluster. The implementation is inspired by the previously proposed patch for PostgreSQL, called -[failover slots](https://wiki.postgresql.org/wiki/Failover_slots). +[failover slots](https://wiki.postgresql.org/wiki/Failover_slots), and +also supports user defined physical replication slots on the primary. ### Database configuration diff --git a/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1.mdx b/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1.mdx index 6a363d2d707..0c0aed6dd60 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1.mdx @@ -9,6 +9,8 @@ originalFilePath: 'src/pg4k.v1.md' - [Backup](#postgresql-k8s-enterprisedb-io-v1-Backup) - [Cluster](#postgresql-k8s-enterprisedb-io-v1-Cluster) +- [ClusterImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ClusterImageCatalog) +- [ImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ImageCatalog) - [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler) - [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup) @@ -86,6 +88,62 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- +
+ +## ClusterImageCatalog + +

ClusterImageCatalog is the Schema for the clusterimagecatalogs API

+ + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.k8s.enterprisedb.io/v1
kind [Required]
string
ClusterImageCatalog
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+ImageCatalogSpec +
+

Specification of the desired behavior of the ClusterImageCatalog. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ +
+ +## ImageCatalog + +

ImageCatalog is the Schema for the imagecatalogs API

+ + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.k8s.enterprisedb.io/v1
kind [Required]
string
ImageCatalog
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+ImageCatalogSpec +
+

Specification of the desired behavior of the ImageCatalog. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+
## Pooler @@ -426,6 +484,40 @@ the selected PostgreSQL instance

BackupPhase is the phase of the backup

+
+ +## BackupPluginConfiguration + +**Appears in:** + +- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec) + +- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec) + +

BackupPluginConfiguration contains the backup configuration used by +the backup plugin

+ + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name is the name of the plugin managing this backup

+
parameters
+map[string]string +
+

Parameters are the configuration parameters passed to the backup +plugin for this backup

+
+
## BackupSnapshotElementStatus @@ -555,8 +647,15 @@ standby, if available.

BackupMethod -

The backup method to be used, possible options are barmanObjectStore -and volumeSnapshot. Defaults to: barmanObjectStore.

+

The backup method to be used, possible options are barmanObjectStore, +volumeSnapshot or plugin. Defaults to: barmanObjectStore.

+ + +pluginConfiguration
+BackupPluginConfiguration + + +

Configuration parameters passed to the plugin managing this backup

online
@@ -1247,6 +1346,36 @@ created from scratch

+
+ +## CatalogImage + +**Appears in:** + +- [ImageCatalogSpec](#postgresql-k8s-enterprisedb-io-v1-ImageCatalogSpec) + +

CatalogImage defines the image and major version

+ + + + + + + + + + + +
FieldDescription
image [Required]
+string +
+

The image reference

+
major [Required]
+int +
+

The PostgreSQL major version of the image. Must be unique within the catalog.

+
+
## CertificatesConfiguration @@ -1392,6 +1521,13 @@ and digests for deterministic and repeatable deployments (<image>:<tag>@sha256:<digestValue>)

+imageCatalogRef
+ImageCatalogRef + + +

Defines the major PostgreSQL version we want to use within an ImageCatalog

+ + imagePullPolicy
core/v1.PullPolicy @@ -1754,6 +1890,28 @@ Defaults to: RuntimeDefault

The tablespaces configuration

+enablePDB
+bool + + +

Manage the PodDisruptionBudget resources within the cluster. When +configured as true (default setting), the pod disruption budgets +will safeguard the primary node from being terminated. Conversely, +setting it to false will result in the absence of any +PodDisruptionBudget resource, permitting the shutdown of all nodes +hosting the PostgreSQL cluster. This latter configuration is +advisable for any PostgreSQL cluster employed for +development/staging purposes.

+ + +plugins [Required]
+PluginConfigurationList + + +

The plugins configuration, containing +any plugin to be loaded with the corresponding configuration

+ + @@ -2075,6 +2233,27 @@ This field is reported when .spec.failoverDelay is populated or dur

AzurePVCUpdateEnabled shows if the PVC online upgrade is enabled for this cluster

+image
+string + + +

Image contains the image name used by the pods

+ + +pluginStatus [Required]
+[]PluginStatus + + +

PluginStatus is the status of the loaded plugins

+ + +switchReplicaClusterStatus
+SwitchReplicaClusterStatus + + +

SwitchReplicaClusterStatus is the status of the switch to replica cluster

+ + @@ -2506,6 +2685,60 @@ default to false.

+
+ +## ImageCatalogRef + +**Appears in:** + +- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec) + +

ImageCatalogRef defines the reference to a major version in an ImageCatalog

+ + + + + + + + + + + +
FieldDescription
TypedLocalObjectReference
+core/v1.TypedLocalObjectReference +
(Members of TypedLocalObjectReference are embedded into this type.) + No description provided.
major [Required]
+int +
+

The major version of PostgreSQL we want to use from the ImageCatalog

+
+ +
+ +## ImageCatalogSpec + +**Appears in:** + +- [ClusterImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ClusterImageCatalog) + +- [ImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ImageCatalog) + +

ImageCatalogSpec defines the desired ImageCatalog

+ + + + + + + + +
FieldDescription
images [Required]
+[]CatalogImage +
+

List of CatalogImages available in the catalog

+
+
## Import @@ -2921,6 +3154,8 @@ with an explanation of the cause

- [ServiceAccountTemplate](#postgresql-k8s-enterprisedb-io-v1-ServiceAccountTemplate) +- [ServiceTemplateSpec](#postgresql-k8s-enterprisedb-io-v1-ServiceTemplateSpec) +

Metadata is a structure similar to the metav1.ObjectMeta, but still parseable by controller-gen to create a suitable CRD for the user. The comment of PodTemplateSpec has an explanation of why we are @@ -3250,6 +3485,69 @@ the operator calls PgBouncer's PAUSE and RESUME comman +

+ +## PluginStatus + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

PluginStatus is the status of a loaded plugin

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name is the name of the plugin

+
version [Required]
+string +
+

Version is the version of the plugin loaded by the +latest reconciliation loop

+
capabilities [Required]
+[]string +
+

Capabilities are the list of capabilities of the +plugin

+
operatorCapabilities [Required]
+[]string +
+

OperatorCapabilities are the list of capabilities of the +plugin regarding the reconciler

+
walCapabilities [Required]
+[]string +
+

WALCapabilities are the list of capabilities of the +plugin regarding the WAL management

+
backupCapabilities [Required]
+[]string +
+

BackupCapabilities are the list of capabilities of the +plugin regarding the Backup management

+
+
## PodTemplateSpec @@ -3471,6 +3769,13 @@ Pooler name should never match with any cluster name within the same namespace.<

The configuration of the monitoring infrastructure of this pooler.

+serviceTemplate
+ServiceTemplateSpec + + +

Template for the Service to be created

+ + @@ -3804,6 +4109,13 @@ of replication slots

every updateInterval seconds (default 30).

+synchronizeReplicas
+SynchronizeReplicasConfiguration + + +

Configures the synchronization of the user defined physical replication slots

+ + @@ -4134,6 +4446,13 @@ standby, if available.

and volumeSnapshot. Defaults to: barmanObjectStore.

+pluginConfiguration
+BackupPluginConfiguration + + +

Configuration parameters passed to the plugin managing this backup

+ + online
bool @@ -4387,6 +4706,39 @@ service account

+
+ +## ServiceTemplateSpec + +**Appears in:** + +- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec) + +

ServiceTemplateSpec is a structure allowing the user to set +a template for Service generation.

+ + + + + + + + + + + +
FieldDescription
metadata
+Metadata +
+

Standard object's metadata. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata

+
spec
+core/v1.ServiceSpec +
+

Specification of the desired behavior of the service. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+
## SnapshotOwnerReference @@ -4464,6 +4816,29 @@ Size cannot be decreased.

+
+ +## SwitchReplicaClusterStatus + +**Appears in:** + +- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus) + +

SwitchReplicaClusterStatus contains all the statuses regarding the switch of a cluster to a replica cluster

+ + + + + + + + +
FieldDescription
inProgress
+bool +
+

InProgress indicates if there is an ongoing procedure of switching a cluster to a replica cluster.

+
+
## SyncReplicaElectionConstraints @@ -4497,6 +4872,43 @@ if all the labels values match.

+
+ +## SynchronizeReplicasConfiguration + +**Appears in:** + +- [ReplicationSlotsConfiguration](#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration) + +

SynchronizeReplicasConfiguration contains the configuration for the synchronization of user defined +physical replication slots

+ + + + + + + + + + + + + + +
FieldDescription
enabled [Required]
+bool +
+

When set to true, every replication slot that is on the primary is synchronized on each standby

+
excludePatterns
+[]string +
+

List of regular expression patterns to match the names of replication slots to be excluded (by default empty)

+
- [Required]
+synchronizeReplicasCache +
+ No description provided.
+
## TDEConfiguration diff --git a/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx index 5df10fe24ca..35c66161607 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx @@ -71,6 +71,7 @@ operator by applying the following sections in this order: The **global default parameters** are: ```text +archive_mode = 'on' dynamic_shared_memory_type = 'posix' logging_collector = 'on' log_destination = 'csvlog' @@ -86,6 +87,7 @@ shared_memory_type = 'mmap' # for PostgreSQL >= 12 only wal_keep_size = '512MB' # for PostgreSQL >= 13 only wal_keep_segments = '32' # for PostgreSQL <= 12 only wal_level = 'logical' +wal_log_hints = 'on' wal_sender_timeout = '5s' wal_receiver_timeout = '5s' ``` @@ -116,7 +118,6 @@ The following parameters are **fixed** and exclusively controlled by the operato ```text archive_command = '/controller/manager wal-archive %p' -archive_mode = 'on' full_page_writes = 'on' hot_standby = 'true' listen_addresses = '*' @@ -127,8 +128,6 @@ ssl_ca_file = '/controller/certificates/client-ca.crt' ssl_cert_file = '/controller/certificates/server.crt' ssl_key_file = '/controller/certificates/server.key' unix_socket_directories = '/controller/run' -wal_level = 'logical' -wal_log_hints = 'on' ``` Since the fixed parameters are added at the end, they can't be overridden by the @@ -653,4 +652,3 @@ Users are not allowed to set the following configuration parameters in the - `unix_socket_directories` - `unix_socket_group` - `unix_socket_permissions` -- `wal_log_hints` diff --git a/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx b/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx index 8aa82012334..14be10f32c4 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx @@ -205,33 +205,61 @@ store to fetch the WAL files. You can check the [sample YAML](../samples/cluster-example-replica-from-volume-snapshot.yaml) for it in the `samples/` subdirectory. +## Demoting a Primary to a Replica Cluster + +EDB Postgres for Kubernetes provides the functionality to demote a primary cluster to a +replica cluster. This action is typically planned when transitioning the +primary role from one data center to another. The process involves demoting the +current primary cluster (e.g., cluster-eu-south) to a replica cluster and +subsequently promoting the designated replica cluster (e.g., +`cluster-eu-central`) to primary when fully synchronized. +Provided you have defined an external cluster in the current primary `Cluster` +resource that points to the replica cluster that's been selected to become the +new primary, all you need to do is to enable replica mode and define the source +as follows: + +```yaml + replica: + enabled: true + source: cluster-eu-central +``` + ## Promoting the designated primary in the replica cluster -To promote the **designated primary** to **primary**, all we need to do is to +To promote a replica cluster (e.g. `cluster-eu-central`) to a primary cluster +and make the designated primary a real primary, all you need to do is to disable the replica mode in the replica cluster through the option -`.spec.replica.enabled` +`.spec.replica.enabled`: ```yaml replica: enabled: false - source: cluster-example + source: cluster-eu-south ``` -Once the replica mode is disabled, the replica cluster and the source cluster -will become two separate clusters, and the **designated primary** in the replica -cluster will be promoted to be that cluster's **primary**. We can verify the role -change using the cnp plugin, checking the status of the cluster which was -previously the replica: +If you have first demoted the `cluster-eu-south` and waited for +`cluster-eu-central` to be in sync, once `cluster-eu-central` starts as +primary, the `cluster-eu-south` cluster will seamlessly start as a replica +cluster, without the need of re-cloning. + +If you disable replica mode without prior demotion, the replica cluster and the +source cluster will become two separate clusters. + +When replica mode is disabled, the **designated primary** in the replica +cluster will be promoted to be that cluster's **primary**. + +You can verify the role change using the `cnp` plugin, checking the status of +the cluster which was previously the replica: ```shell -kubectl cnp -n status cluster-replica-example +kubectl cnp -n status cluster-eu-central ``` !!! Note - Disabling replication is an **irreversible** operation: once replication is - disabled and the **designated primary** is promoted to **primary**, the - replica cluster and the source cluster will become two independent clusters - definitively. + Disabling replication is an **irreversible** operation. Once replication is + disabled and the designated primary is promoted to primary, the replica cluster + and the source cluster become two independent clusters definitively. Ensure to + follow the demotion procedure correctly to avoid unintended consequences. ## Delayed replicas diff --git a/product_docs/docs/postgres_for_kubernetes/1/replication.mdx b/product_docs/docs/postgres_for_kubernetes/1/replication.mdx index 19af9d44327..ff24b536e26 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/replication.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/replication.mdx @@ -195,7 +195,7 @@ As you can imagine, the availability zone is just an example, but you could customize this behavior based on other labels that describe the node, such as storage, CPU, or memory. -## Replication slots for High Availability +## Replication slots [Replication slots](https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION-SLOTS) are a native PostgreSQL feature introduced in 9.4 that provides an automated way @@ -207,9 +207,19 @@ standby is (temporarily) disconnected. A replication slot exists solely on the instance that created it, and PostgreSQL does not replicate it on the standby servers. As a result, after a failover or a switchover, the new primary does not contain the replication slot from -the old primary. This can create problems for -the streaming replication clients that were connected to the old -primary and have lost their slot. +the old primary. This can create problems for the streaming replication clients +that were connected to the old primary and have lost their slot. + +EDB Postgres for Kubernetes provides a turn-key solution to synchronize the content of +physical replication slots from the primary to each standby, addressing two use +cases: + +- the replication slots automatically created for the High Availability of the + Postgres cluster (see ["Replication slots for High Availability" below](#replication-slots-for-high-availability) for details) +- [user-defined replication slots](#user-defined-replication-slots) created on + the primary + +### Replication slots for High Availability EDB Postgres for Kubernetes fills this gap by introducing the concept of cluster-managed replication slots, starting with high availability clusters. This feature @@ -227,13 +237,13 @@ In EDB Postgres for Kubernetes, we use the terms: content of the `pg_replication_slots` view in the primary, and updated at regular intervals using `pg_replication_slot_advance()`. -This feature, introduced in EDB Postgres for Kubernetes 1.18, is now enabled by default and -can be disabled via configuration. For details, please refer to the +This feature is enabled by default and can be disabled via configuration. +For details, please refer to the ["replicationSlots" section in the API reference](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration). Here follows a brief description of the main options: `.spec.replicationSlots.highAvailability.enabled` -: if true, the feature is enabled (`true` is the default since 1.21) +: if `true`, the feature is enabled (`true` is the default since 1.21) `.spec.replicationSlots.highAvailability.slotPrefix` : the prefix that identifies replication slots managed by the operator @@ -277,6 +287,63 @@ spec: size: 1Gi ``` +### User-Defined Replication slots + +Although EDB Postgres for Kubernetes doesn't support a way to declaratively define physical +replication slots, you can still [create your own slots via SQL](https://www.postgresql.org/docs/current/functions-admin.html#FUNCTIONS-REPLICATION). + +!!! Info rmation + At the moment, we don't have any plans to manage replication slots + in a declarative way, but it might change depending on the feedback + we receive from users. The reason is that replication slots exist + for a specific purpose and each should be managed by a specific application + the oversees the entire lifecycle of the slot on the primary. + +EDB Postgres for Kubernetes can manage the synchronization of any user managed physical +replication slots between the primary and standbys, similarly to what it does +for the HA replication slots explained above (the only difference is that you +need to create the replication slot). + +This feature is enabled by default (meaning that any replication slot is +synchronized), but you can disable it or further customize its behavior (for +example by excluding some slots using regular expressions) through the +`synchronizeReplicas` stanza. For example: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + replicationSlots: + synchronizeReplicas: + enabled: true + excludePatterns: + - "^foo" +``` + +For details, please refer to the +["replicationSlots" section in the API reference](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration). +Here follows a brief description of the main options: + +`.spec.replicationSlots.synchronizeReplicas.enabled` +: When true or not specified, every user-defined replication slot on the + primary is synchronized on each standby. If changed to false, the operator will + remove any replication slot previously created by itself on each standby. + +`.spec.replicationSlots.synchronizeReplicas.excludePatterns` +: A list of regular expression patterns to match the names of user-defined + replication slots to be excluded from synchronization. This can be useful to + exclude specific slots based on naming conventions. + +!!! Warning + Users utilizing this feature should carefully monitor user-defined replication + slots to ensure they align with their operational requirements and do not + interfere with the failover process. + +### Synchronization frequency + You can also control the frequency with which a standby queries the `pg_replication_slots` view on the primary, and updates its local copy of the replication slots, like in this example: @@ -290,23 +357,12 @@ spec: instances: 3 # Reduce the frequency of standby HA slots updates to once every 5 minutes replicationSlots: - highAvailability: - enabled: true updateInterval: 300 storage: size: 1Gi ``` -Replication slots must be carefully monitored in your infrastructure. By default, -we provide the `pg_replication_slots` metric in our Prometheus exporter with -key information such as the name of the slot, the type, whether it is active, -the lag from the primary. - -!!! Seealso "Monitoring" - Please refer to the ["Monitoring" section](monitoring.md) for details on - how to monitor a EDB Postgres for Kubernetes deployment. - ### Capping the WAL size retained for replication slots When replication slots is enabled, you might end up running out of disk @@ -330,3 +386,14 @@ when replication slots support is enabled. For example: max_slot_wal_keep_size: "10GB" # ... ``` + +### Monitoring replication slots + +Replication slots must be carefully monitored in your infrastructure. By default, +we provide the `pg_replication_slots` metric in our Prometheus exporter with +key information such as the name of the slot, the type, whether it is active, +the lag from the primary. + +!!! Seealso "Monitoring" + Please refer to the ["Monitoring" section](monitoring.md) for details on + how to monitor a EDB Postgres for Kubernetes deployment. diff --git a/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx b/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx index e68d22cae34..2c23ed4fe4c 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx @@ -13,6 +13,8 @@ Rolling upgrades are started when: - the user changes the `imageName` attribute of the cluster specification; +- the [image catalog](image_catalog.md) is updated with a new image for the major used by the cluster; + - a change in the PostgreSQL configuration requires a restart to be applied; diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples.mdx b/product_docs/docs/postgres_for_kubernetes/1/samples.mdx index f04675c44cb..bf370afd530 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/samples.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/samples.mdx @@ -131,3 +131,8 @@ your PostgreSQL cluster. : [`cluster-restore-with-tablespaces.yaml`](../samples/cluster-restore-with-tablespaces.yaml) For a list of available options, see [API reference](pg4k.v1.md). + +## Pooler configuration + +**Pooler with custom service config** +: [`pooler-external.yaml`](../samples/pooler-external.yaml) diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis-restore-cr.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis-restore-cr.yaml new file mode 100644 index 00000000000..3d033ade9dd --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis-restore-cr.yaml @@ -0,0 +1,26 @@ +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-restore +spec: + instances: 3 + + storage: + size: 1Gi + storageClass: csi-hostpath-sc + walStorage: + size: 1Gi + storageClass: csi-hostpath-sc + + bootstrap: + recovery: + volumeSnapshots: + storage: + name: cluster-example-20231031161103 + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + walStorage: + name: cluster-example-20231031161103-wal + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis-restore.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis-restore.yaml new file mode 100644 index 00000000000..a9f14f917d1 --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis-restore.yaml @@ -0,0 +1,43 @@ +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-restore +spec: + instances: 3 + imageName: registry.dev:5000/postgresql:16 + + storage: + size: 1Gi + storageClass: csi-hostpath-sc + walStorage: + size: 1Gi + storageClass: csi-hostpath-sc + + # Backup properties + # This assumes a local minio setup +# backup: +# barmanObjectStore: +# destinationPath: s3://backups/ +# endpointURL: http://minio:9000 +# s3Credentials: +# accessKeyId: +# name: minio +# key: ACCESS_KEY_ID +# secretAccessKey: +# name: minio +# key: ACCESS_SECRET_KEY +# wal: +# compression: gzip + + bootstrap: + recovery: + volumeSnapshots: + storage: + name: snapshot-0bc6095db42768c7a1fe897494a966f541ef5fb29b2eb8e9399d80bd0a32408a-2023-11-13-7.41.53 + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + walStorage: + name: snapshot-a67084ba08097fd8c3e34c6afef8110091da67e5895f0379fd2df5b9f73ff524-2023-11-13-7.41.53 + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis.yaml new file mode 100644 index 00000000000..0a5ae32f7d9 --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-bis.yaml @@ -0,0 +1,29 @@ +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + imageName: registry.dev:5000/postgresql:16 + + backup: + volumeSnapshot: + className: csi-hostpath-groupsnapclass + #className: csi-hostpath-snapclass + groupSnapshot: true + + storage: + storageClass: csi-hostpath-sc + size: 1Gi + walStorage: + storageClass: csi-hostpath-sc + size: 1Gi + # tablespaces: + # first: + # storage: + # storageClass: csi-hostpath-sc + # size: 1Gi + # second: + # storage: + # storageClass: csi-hostpath-sc + # size: 1Gi diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-catalog.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-catalog.yaml new file mode 100644 index 00000000000..bbf9232c28b --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-catalog.yaml @@ -0,0 +1,24 @@ +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: ImageCatalog +metadata: + name: image-catalog-example +spec: + images: + - image: quay.io/enterprisedb/postgresql:16 + major: 16 + - image: quay.io/enterprisedb/postgresql:15 + major: 15 +--- +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + imageCatalogRef: + apiGroup: postgresql.k8s.enterprisedb.io + kind: ImageCatalog + name: image-catalog-example + major: 15 + storage: + size: 1Gi diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml index a1c8bb7d269..39a5794da7e 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml @@ -35,7 +35,7 @@ metadata: name: cluster-example-full spec: description: "Example of cluster" - imageName: quay.io/enterprisedb/postgresql:16.1 + imageName: quay.io/enterprisedb/postgresql:16.2 # imagePullSecret is only required if the images are located in a private registry # imagePullSecrets: # - name: private_registry_access diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/pooler-external.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/pooler-external.yaml new file mode 100644 index 00000000000..227fdb61423 --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/pooler-external.yaml @@ -0,0 +1,21 @@ +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Pooler +metadata: + name: pooler-example-rw +spec: + cluster: + name: cluster-example + instances: 3 + type: rw + serviceTemplate: + metadata: + labels: + app: pooler + spec: + type: LoadBalancer + pgbouncer: + poolMode: session + parameters: + max_client_conn: "1000" + default_pool_size: "10" + \ No newline at end of file diff --git a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx index 970845a668b..02385d14111 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx @@ -61,7 +61,7 @@ metadata: name: cluster-example spec: instances: 3 - imageName: quay.io/enterprisedb/postgresql:16.1 + imageName: quay.io/enterprisedb/postgresql:16.2 affinity: enablePodAntiAffinity: true #default value diff --git a/product_docs/docs/postgres_for_kubernetes/1/security.mdx b/product_docs/docs/postgres_for_kubernetes/1/security.mdx index b354c6eb880..02f92ffe2a8 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/security.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/security.mdx @@ -83,7 +83,8 @@ For OpenShift specificities on this matter, please consult the The above permissions are exclusively reserved for the operator's service account to interact with the Kubernetes API server. They are not directly accessible by the users of the operator that interact only with `Cluster`, - `Pooler`, `Backup`, and `ScheduledBackup` resources. + `Pooler`, `Backup`, `ScheduledBackup`, `ImageCatalog` and + `ClusterImageCatalog` resources. Below we provide some examples and, most importantly, the reasons why EDB Postgres for Kubernetes requires full or partial management of standard Kubernetes diff --git a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx index baafac3efb7..36059f2c09a 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx @@ -176,7 +176,7 @@ Output: version -------------------------------------------------------------------------------------- ------------------ -PostgreSQL 16.1 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat +PostgreSQL 16.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5), 64-bit (1 row) ``` diff --git a/product_docs/docs/postgres_for_kubernetes/1/tde.mdx b/product_docs/docs/postgres_for_kubernetes/1/tde.mdx index 13f1850542a..4140eb0767a 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/tde.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/tde.mdx @@ -5,14 +5,15 @@ originalFilePath: 'src/tde.md' !!! Important TDE is available *only* for operands that support it: - EPAS versions 15 and newer. + EPAS and PG Extended, versions 15 and newer. Transparent Data Encryption, or TDE, is a technology used by several database vendors to **encrypt data at rest**, i.e. database files on disk. TDE does not however encrypt data in use. -TDE is included in EDB Postgres Advanced Server (EPAS), starting with version -15, and it is supported by the EDB Postgres for Kubernetes operator. +TDE is included in EDB Postgres Advanced Server and EDB Postgres Extended +Server from version 15, and is supported by the EDB Postgres for Kubernetes +operator. !!! Important Before you proceed, please take some time to familiarize with the @@ -23,6 +24,11 @@ Data encryption/decryption is entirely transparent to the user, as it is managed by the database without requiring any application changes or updated client drivers. +!!! Note + In the code samples shown below, the `epas` sub-section of `postgresql` in + the YAML manifests is used to activate TDE. The `epas` section can be used + to enable TDE for PG Extended images as well as for EPAS images. + EDB Postgres for Kubernetes provides 3 ways to use TDE: - using a secret containing the passphrase diff --git a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx index f826127e879..d2be118ac9d 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx @@ -221,7 +221,7 @@ Cluster in healthy state Name: cluster-example Namespace: default System ID: 7044925089871458324 -PostgreSQL Image: quay.io/enterprisedb/postgresql:16.1-3 +PostgreSQL Image: quay.io/enterprisedb/postgresql:16.2-3 Primary instance: cluster-example-1 Instances: 3 Ready instances: 3 @@ -297,7 +297,7 @@ kubectl describe cluster -n | grep "Image Name" Output: ```shell - Image Name: quay.io/enterprisedb/postgresql:16.1-3 + Image Name: quay.io/enterprisedb/postgresql:16.2-3 ``` !!! Note diff --git a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx index 26385fee0ef..b75441ee1b2 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx @@ -16,7 +16,7 @@ the ["Backup on object stores" section](backup_barmanobjectstore.md) to set up the WAL archive. !!! Info - Please refer to [`BarmanObjectStoreConfiguration`](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-BarmanObjectStoreConfiguration) + Please refer to [`BarmanObjectStoreConfiguration`](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-barmanobjectstoreconfiguration) in the API reference for a full list of options. If required, you can choose to compress WAL files as soon as they From 89aea0d917354bef2330464ebb841656410add25 Mon Sep 17 00:00:00 2001 From: Josh Heyer Date: Thu, 25 Apr 2024 19:44:28 +0000 Subject: [PATCH 2/4] Misc corrections and rollbacks --- .../1/cluster_conf.mdx | 2 +- .../1/container_images.mdx | 4 - .../1/failure_modes.mdx | 6 +- .../docs/postgres_for_kubernetes/1/index.mdx | 2 + .../1/kubectl-plugin.mdx | 547 ++++++++++++++++-- .../1/wal_archiving.mdx | 2 +- 6 files changed, 491 insertions(+), 72 deletions(-) diff --git a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx index 0a515fb9465..8b550eb893d 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx @@ -50,7 +50,7 @@ EDB Postgres for Kubernetes relies on [ephemeral volumes](https://kubernetes.io/ for part of the internal activities. Ephemeral volumes exist for the sole duration of a pod's life, without persisting across pod restarts. -# Volume Claim Template for Temporary Storage +### Volume Claim Template for Temporary Storage The operator uses by default an `emptyDir` volume, which can be customized by using the `.spec.ephemeralVolumesSizeLimit field`. This can be overridden by specifying a volume claim template in the `.spec.ephemeralVolumeSource` field. diff --git a/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx b/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx index 689f6f2d8e6..6d57d72929f 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx @@ -43,10 +43,6 @@ for EDB Postgres for Kubernetes, and publishes them on ## Image tag requirements -Certainly! Here's an improved version: - -## Image Tag Requirements - To ensure the operator makes informed decisions, it must accurately detect the PostgreSQL major version. This detection can occur in two ways: diff --git a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx index 24771b9e34e..a1aab1641cf 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx @@ -8,7 +8,7 @@ PostgreSQL can face on a Kubernetes cluster during its lifetime. !!! Important In case the failure scenario you are experiencing is not covered by this - section, please immediately seek for [professional support](https://cloudnative-pg.io/support/). + section, please immediately contact EDB for support and assistance. !!! Seealso "Postgres instance manager" Please refer to the ["Postgres instance manager" section](instance_manager.md) @@ -175,8 +175,8 @@ In the case of undocumented failure, it might be necessary to intervene to solve the problem manually. !!! Important - In such cases, please do not perform any manual operation without - [professional support](https://cloudnative-pg.io/support/). + In such cases, please do not perform any manual operation without the + support and assistance of EDB engineering team. From version 1.11.0 of the operator, you can use the `k8s.enterprisedb.io/reconciliationLoop` annotation to temporarily disable the diff --git a/product_docs/docs/postgres_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/index.mdx index c9d73b9b8d0..7ddf1e5649b 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/index.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/index.mdx @@ -80,6 +80,8 @@ and OpenShift. It is designed, developed, and supported by EDB and covers the full lifecycle of a highly available Postgres database clusters with a primary/standby architecture, using native streaming replication. +EDB Postgres for Kubernetes was made generally available on February 4, 2021. Earlier versions were made available to selected customers prior to the GA release. + !!! Note The operator has been renamed from Cloud Native PostgreSQL. Existing users diff --git a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx index e1982245b0a..b2b010faac1 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx @@ -34,52 +34,67 @@ them in your systems. #### Debian packages -For example, let's install the 1.18.1 release of the plugin, for an Intel based +For example, let's install the 1.22.2 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -$ wget https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.18.1/kubectl-cnp_1.18.1_linux_x86_64.deb +wget https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.22.2/kubectl-cnp_1.22.2_linux_x86_64.deb ``` Then, install from the local file using `dpkg`: ```sh -$ dpkg -i kubectl-cnp_1.18.1_linux_x86_64.deb +dpkg -i kubectl-cnp_1.22.2_linux_x86_64.deb +__OUTPUT__ (Reading database ... 16102 files and directories currently installed.) -Preparing to unpack kubectl-cnp_1.18.1_linux_x86_64.deb ... -Unpacking cnp (1.18.1) over (1.18.1) ... -Setting up cnp (1.18.1) ... +Preparing to unpack kubectl-cnp_1.22.2_linux_x86_64.deb ... +Unpacking cnp (1.22.2) over (1.22.2) ... +Setting up cnp (1.22.2) ... ``` #### RPM packages -As in the example for `.deb` packages, let's install the 1.18.1 release for an +As in the example for `.deb` packages, let's install the 1.22.2 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. -```sh -curl -L https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.18.1/kubectl-cnp_1.18.1_linux_x86_64.rpm --output cnp-plugin.rpm +``` sh +curl -L https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.22.2/kubectl-cnp_1.22.2_linux_x86_64.rpm \ + --output kube-plugin.rpm ``` Then install with `yum`, and you're ready to use: ```sh -$ yum --disablerepo=* localinstall cnp-plugin.rpm -yum --disablerepo=* localinstall cnp-plugin.rpm -Failed to set locale, defaulting to C.UTF-8 +yum --disablerepo=* localinstall kube-plugin.rpm +__OUTPUT__ Dependencies resolved. -==================================================================================================== - Package Architecture Version Repository Size -==================================================================================================== +======================================================================================================================== + Package Architecture Version Repository Size +======================================================================================================================== Installing: - cnpg x86_64 1.18.1-1 @commandline 14 M + kubectl-cnp x86_64 1.22.2-1 @commandline 17 M Transaction Summary -==================================================================================================== +======================================================================================================================== Install 1 Package -Total size: 14 M -Installed size: 43 M +Total size: 17 M +Installed size: 62 M Is this ok [y/N]: y +Downloading Packages: +Running transaction check +Transaction check succeeded. +Running transaction test +Transaction test succeeded. +Running transaction + Preparing : 1/1 + Installing : kubectl-cnp-1.22.2-1.x86_64 1/1 + Verifying : kubectl-cnp-1.22.2-1.x86_64 1/1 + +Installed: + kubectl-cnp-1.22.2-1.x86_64 + +Complete! ``` ### Supported Architectures @@ -102,6 +117,29 @@ operating system and architectures: - arm 5/6/7 - arm64 +### Configuring auto-completion + +To configure [auto-completion](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_completion/) for the plugin, a helper shell script needs to be +installed into your current PATH. Assuming the latter contains `/usr/local/bin`, +this can be done with the following commands: + +```shell +cat > kubectl_complete-cnp <..` format (e.g. `1.22.2`). The default empty value installs the version of the operator that matches the version of the plugin. - `--watch-namespace`: comma separated string containing the namespaces to watch (by default all namespaces) @@ -140,7 +175,7 @@ will install the operator, is as follows: ```shell kubectl cnp install generate \ -n king \ - --version 1.17 \ + --version 1.22.2 \ --replicas 3 \ --watch-namespace "albert, bb, freddie" \ > operator.yaml @@ -149,9 +184,9 @@ kubectl cnp install generate \ The flags in the above command have the following meaning: - `-n king` install the CNP operator into the `king` namespace -- `--version 1.17` install the latest patch version for minor version 1.17 +- `--version 1.22.2` install operator version 1.22.2 - `--replicas 3` install the operator with 3 replicas -- `--watch-namespaces "albert, bb, freddie"` have the operator watch for +- `--watch-namespace "albert, bb, freddie"` have the operator watch for changes in the `albert`, `bb` and `freddie` namespaces only ### Status @@ -187,7 +222,7 @@ Cluster in healthy state Name: sandbox Namespace: default System ID: 7039966298120953877 -PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3 +PostgreSQL Image: quay.io/enterprisedb/postgresql:16.2 Primary instance: sandbox-2 Instances: 3 Ready instances: 3 @@ -232,7 +267,7 @@ Cluster in healthy state Name: sandbox Namespace: default System ID: 7039966298120953877 -PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3 +PostgreSQL Image: quay.io/enterprisedb/postgresql:16.2 Primary instance: sandbox-2 Instances: 3 Ready instances: 3 @@ -722,6 +757,89 @@ items: "apiVersion": "postgresql.k8s.enterprisedb.io/v1", ``` +### Logs + +The `kubectl cnp logs` command allows to follow the logs of a collection +of pods related to EDB Postgres for Kubernetes in a single go. + +It has at the moment one available sub-command: `cluster`. + +#### Cluster logs + +The `cluster` sub-command gathers all the pod logs for a cluster in a single +stream or file. +This means that you can get all the pod logs in a single terminal window, with a +single invocation of the command. + +As in all the cnp plugin sub-commands, you can get instructions and help with +the `-h` flag: + +`kubectl cnp logs cluster -h` + +The `logs` command will display logs in JSON-lines format, unless the +`--timestamps` flag is used, in which case, a human readable timestamp will be +prepended to each line. In this case, lines will no longer be valid JSON, +and tools such as `jq` may not work as desired. + +If the `logs cluster` sub-command is given the `-f` flag (aka `--follow`), it +will follow the cluster pod logs, and will also watch for any new pods created +in the cluster after the command has been invoked. +Any new pods found, including pods that have been restarted or re-created, +will also have their pods followed. +The logs will be displayed in the terminal's standard-out. +This command will only exit when the cluster has no more pods left, or when it +is interrupted by the user. + +If `logs` is called without the `-f` option, it will read the logs from all +cluster pods until the time of invocation and display them in the terminal's +standard-out, then exit. +The `-o` or `--output` flag can be provided, to specify the name +of the file where the logs should be saved, instead of displaying over +standard-out. +The `--tail` flag can be used to specify how many log lines will be retrieved +from each pod in the cluster. By default, the `logs cluster` sub-command will +display all the logs from each pod in the cluster. If combined with the "follow" +flag `-f`, the number of logs specified by `--tail` will be retrieved until the +current time, and and from then the new logs will be followed. + +NOTE: unlike other `cnp` plugin commands, the `-f` is used to denote "follow" +rather than specify a file. This keeps with the convention of `kubectl logs`, +which takes `-f` to mean the logs should be followed. + +Usage: + +```shell +kubectl cnp logs cluster [flags] +``` + +Using the `-f` option to follow: + +```shell +kubectl cnp report cluster cluster-example -f +``` + +Using `--tail` option to display 3 lines from each pod and the `-f` option +to follow: + +```shell +kubectl cnp report cluster cluster-example -f --tail 3 +``` + +``` json +{"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] LOG: ending log output to stderr","source":"/controller/log/postgres","logging_pod":"cluster-example-3"} +{"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] HINT: Future log output will go to log destination \"csvlog\".","source":"/controller/log/postgres","logging_pod":"cluster-example-3"} +… +… +``` + +With the `-o` option omitted, and with `--output` specified: + +``` sh +kubectl cnp logs cluster cluster-example --output my-cluster.log + +Successfully written logs to "my-cluster.log" +``` + ### Destroy The `kubectl cnp destroy` command helps remove an instance and all the @@ -826,11 +944,16 @@ kubectl cnp fio -n Refer to the [Benchmarking fio section](benchmarking.md#fio) for more details. -### Requesting a new base backup +### Requesting a new physical backup The `kubectl cnp backup` command requests a new physical base backup for an existing Postgres cluster by creating a new `Backup` resource. +!!! Info + From release 1.21, the `backup` command accepts a new flag, `-m` + to specify the backup method. + To request a backup using volume snapshots, set `-m volumeSnapshot` + The following example requests an on-demand backup for a given cluster: ```shell @@ -844,10 +967,17 @@ kubectl cnp backup cluster-example backup/cluster-example-20230121002300 created ``` -By default, new created backup will use the backup target policy defined -in cluster to choose which instance to run on. You can also use `--backup-target` -option to override this policy. please refer to [Backup and Recovery](backup_recovery.md) -for more information about backup target. +By default, a newly created backup will use the backup target policy defined +in the cluster to choose which instance to run on. +However, you can override this policy with the `--backup-target` option. + +In the case of volume snapshot backups, you can also use the `--online` option +to request an online/hot backup or an offline/cold one: additionally, you can +also tune online backups by explicitly setting the `--immediate-checkpoint` and +`--wait-for-archive` options. + +The ["Backup" section](./backup.md#backup) contains more information about +the configuration settings. ### Launching psql @@ -862,7 +992,7 @@ it from the actual pod. This means that you will be using the `postgres` user. ```shell kubectl cnp psql cluster-example -psql (15.3) +psql (16.2 (Debian 16.2-1.pgdg110+1)) Type "help" for help. postgres=# @@ -873,7 +1003,7 @@ select to work against a replica by using the `--replica` option: ```shell kubectl cnp psql --replica cluster-example -psql (15.3) +psql (16.2 (Debian 16.2-1.pgdg110+1)) Type "help" for help. @@ -901,44 +1031,335 @@ kubectl cnp psql cluster-example -- -U postgres ### Snapshotting a Postgres cluster -The `kubectl cnp snapshot` creates consistent snapshots of a Postgres -`Cluster` by: +!!! Warning + The `kubectl cnp snapshot` command has been removed. + Please use the [`backup` command](#requesting-a-new-physical-backup) to request + backups using volume snapshots. -1. choosing a replica Pod to work on -2. fencing the replica -3. taking the snapshot -4. unfencing the replica +### Using pgAdmin4 for evaluation/demonstration purposes only -!!! Warning - A cluster already having a fenced instance cannot be snapshotted. +[pgAdmin](https://www.pgadmin.org/) stands as the most popular and feature-rich +open-source administration and development platform for PostgreSQL. +For more information on the project, please refer to the official +[documentation](https://www.pgadmin.org/docs/). -At the moment, this command can be used only for clusters having at least one -replica: that replica will be shut down by the fencing procedure to ensure the -snapshot to be consistent (cold backup). As the development of -declarative support for Kubernetes' `VolumeSnapshot` API continues, -this limitation will be removed, allowing you to take online backups -as business continuity requires. +Given that the pgAdmin Development Team maintains official Docker container +images, you can install pgAdmin in your environment as a standard +Kubernetes deployment. !!! Important - Even if the procedure will shut down a replica, the primary - Pod will not be involved. + Deployment of pgAdmin in Kubernetes production environments is beyond the + scope of this document and, more broadly, of the EDB Postgres for Kubernetes project. -The `kubectl cnp snapshot` command requires the cluster name: +However, **for the purposes of demonstration and evaluation**, EDB Postgres for Kubernetes +offers a suitable solution. The `cnp` plugin implements the `pgadmin4` +command, providing a straightforward method to connect to a given database +`Cluster` and navigate its content in a local environment such as `kind`. -```shell -kubectl cnp snapshot cluster-example +For example, you can install a demo deployment of pgAdmin4 for the +`cluster-example` cluster as follows: -waiting for cluster-example-3 to be fenced -waiting for VolumeSnapshot cluster-example-3-1682539624 to be ready to use -unfencing pod cluster-example-3 +```sh +kubectl cnp pgadmin4 cluster-example ``` -The `VolumeSnapshot` resource will be created with an empty -`VolumeSnapshotClass` reference. That resource is intended by be used by the -`VolumeSnapshotClass` configured as default. +This command will produce: -A specific `VolumeSnapshotClass` can be requested via the `-c` option: +```output +ConfigMap/cluster-example-pgadmin4 created +Deployment/cluster-example-pgadmin4 created +Service/cluster-example-pgadmin4 created +Secret/cluster-example-pgadmin4 created -```shell -kubectl cnp snapshot cluster-example -c longhorn +[...] +``` + +After deploying pgAdmin, forward the port using kubectl and connect +through your browser by following the on-screen instructions. + +![Screenshot of desktop installation of pgAdmin](images/pgadmin4.png) + +As usual, you can use the `--dry-run` option to generate the YAML file: + +```sh +kubectl cnp pgadmin4 --dry-run cluster-example +``` + +pgAdmin4 can be installed in either desktop or server mode, with the default +being server. + +In `server` mode, authentication is required using a randomly generated password, +and users must manually specify the database to connect to. + +On the other hand, `desktop` mode initiates a pgAdmin web interface without +requiring authentication. It automatically connects to the `app` database as the +`app` user, making it ideal for quick demos, such as on a local deployment using +`kind`: + +```sh +kubectl cnp pgadmin4 --mode desktop cluster-example ``` + +After concluding your demo, ensure the termination of the pgAdmin deployment by +executing: + +```sh +kubectl cnp pgadmin4 --dry-run cluster-example | kubectl delete -f - +``` + +!!! Warning + Never deploy pgAdmin in production using the plugin. + +### Logical Replication Publications + +The `cnp publication` command group is designed to streamline the creation and +removal of [PostgreSQL logical replication publications](https://www.postgresql.org/docs/current/logical-replication-publication.html). +Be aware that these commands are primarily intended for assisting in the +creation of logical replication publications, particularly on remote PostgreSQL +databases. + +!!! Warning + It is crucial to have a solid understanding of both the capabilities and + limitations of PostgreSQL's native logical replication system before using + these commands. + In particular, be mindful of the [logical replication restrictions](https://www.postgresql.org/docs/current/logical-replication-restrictions.html). + +#### Creating a new publication + +To create a logical replication publication, use the `cnp publication create` +command. The basic structure of this command is as follows: + +```sh +kubectl cnp publication create \ + --publication \ + [--external-cluster ] + [options] +``` + +There are two primary use cases: + +- With `--external-cluster`: Use this option to create a publication on an + external cluster (i.e. defined in the `externalClusters` stanza). The commands + will be issued from the ``, but the publication will be for the + data in ``. + +- Without `--external-cluster`: Use this option to create a publication in the + `` PostgreSQL `Cluster` (by default, the `app` database). + +!!! Warning + When connecting to an external cluster, ensure that the specified user has + sufficient permissions to execute the `CREATE PUBLICATION` command. + +You have several options, similar to the [`CREATE PUBLICATION`](https://www.postgresql.org/docs/current/sql-createpublication.html) +command, to define the group of tables to replicate. Notable options include: + +- If you specify the `--all-tables` option, you create a publication `FOR ALL TABLES`. +- Alternatively, you can specify multiple occurrences of: + - `--table`: Add a specific table (with an expression) to the publication. + - `--schema`: Include all tables in the specified database schema (available + from PostgreSQL 15). + +The `--dry-run` option enables you to preview the SQL commands that the plugin +will execute. + +For additional information and detailed instructions, type the following +command: + +```sh +kubectl cnp publication create --help +``` + +##### Example + +Given a `source-cluster` and a `destination-cluster`, we would like to create a +publication for the data on `source-cluster`. +The `destination-cluster` has an entry in the `externalClusters` stanza pointing +to `source-cluster`. + +We can run: + +``` sh +kubectl cnp publication create destination-cluster \ + --external-cluster=source-cluster --all-tables +``` + +which will create a publication for all tables on `source-cluster`, running +the SQL commands on the `destination-cluster`. + +Or instead, we can run: + +``` sh +kubectl cnp publication create source-cluster \ + --publication=app --all-tables +``` + +which will create a publication named `app` for all the tables in the +`source-cluster`, running the SQL commands on the source cluster. + +!!! Info + There are two sample files that have been provided for illustration and inspiration: + [logical-source](../samples/cluster-example-logical-source.yaml) and + [logical-destination](../samples/cluster-example-logical-destination.yaml). + +#### Dropping a publication + +The `cnp publication drop` command seamlessly complements the `create` command +by offering similar key options, including the publication name, cluster name, +and an optional external cluster. You can drop a `PUBLICATION` with the +following command structure: + +```sh +kubectl cnp publication drop \ + --publication \ + [--external-cluster ] + [options] +``` + +To access further details and precise instructions, use the following command: + +```sh +kubectl cnp publication drop --help +``` + +### Logical Replication Subscriptions + +The `cnp subscription` command group is a dedicated set of commands designed +to simplify the creation and removal of +[PostgreSQL logical replication subscriptions](https://www.postgresql.org/docs/current/logical-replication-subscription.html). +These commands are specifically crafted to aid in the establishment of logical +replication subscriptions, especially when dealing with remote PostgreSQL +databases. + +!!! Warning + Before using these commands, it is essential to have a comprehensive + understanding of both the capabilities and limitations of PostgreSQL's + native logical replication system. + In particular, be mindful of the [logical replication restrictions](https://www.postgresql.org/docs/current/logical-replication-restrictions.html). + +In addition to subscription management, we provide a helpful command for +synchronizing all sequences from the source cluster. While its applicability +may vary, this command can be particularly useful in scenarios involving major +upgrades or data import from remote servers. + +#### Creating a new subscription + +To create a logical replication subscription, use the `cnp subscription create` +command. The basic structure of this command is as follows: + +```sh +kubectl cnp subscription create \ + --subscription \ + --publication \ + --external-cluster \ + [options] +``` + +This command configures a subscription directed towards the specified +publication in the designated external cluster, as defined in the +`externalClusters` stanza of the ``. + +For additional information and detailed instructions, type the following +command: + +```sh +kubectl cnp subscription create --help +``` + +##### Example + +As in the section on publications, we have a `source-cluster` and a +`destination-cluster`, and we have already created a publication called +`app`. + +The following command: + +``` sh +kubectl cnp subscription create destination-cluster \ + --external-cluster=source-cluster \ + --publication=app --subscription=app +``` + +will create a subscription for `app` on the destination cluster. + +!!! Warning + Prioritize testing subscriptions in a non-production environment to ensure + their effectiveness and identify any potential issues before implementing them + in a production setting. + +!!! Info + There are two sample files that have been provided for illustration and inspiration: + [logical-source](../samples/cluster-example-logical-source.yaml) and + [logical-destination](../samples/cluster-example-logical-destination.yaml). + +#### Dropping a subscription + +The `cnp subscription drop` command seamlessly complements the `create` command. +You can drop a `SUBSCRIPTION` with the following command structure: + +```sh +kubectl cnp subcription drop \ + --subscription \ + [options] +``` + +To access further details and precise instructions, use the following command: + +```sh +kubectl cnp subscription drop --help +``` + +#### Synchronizing sequences + +One notable constraint of PostgreSQL logical replication, implemented through +publications and subscriptions, is the lack of sequence synchronization. This +becomes particularly relevant when utilizing logical replication for live +database migration, especially to a higher version of PostgreSQL. A crucial +step in this process involves updating sequences before transitioning +applications to the new database (*cutover*). + +To address this limitation, the `cnp subscription sync-sequences` command +offers a solution. This command establishes a connection with the source +database, retrieves all relevant sequences, and subsequently updates local +sequences with matching identities (based on database schema and sequence +name). + +You can use the command as shown below: + +```sh +kubectl cnp subscription sync-sequences \ + --subscription \ + +``` + +For comprehensive details and specific instructions, utilize the following +command: + +```sh +kubectl cnp subscription sync-sequences --help +``` + +##### Example + +As in the previous sections for publication and subscription, we have +a `source-cluster` and a `destination-cluster`. The publication and the +subscription, both called `app`, are already present. + +The following command will synchronize the sequences involved in the +`app` subscription, from the source cluster into the destination cluster. + +``` sh +kubectl cnp subscription sync-sequences destination-cluster \ + --subscription=app +``` + +!!! Warning + Prioritize testing subscriptions in a non-production environment to + guarantee their effectiveness and detect any potential issues before deploying + them in a production setting. + +## Integration with K9s + +The `cnp` plugin can be easily integrated in [K9s](https://k9scli.io/), a +popular terminal-based UI to interact with Kubernetes clusters. + +See [`k9s/plugins.yml`](../samples/k9s/plugins.yml) for details. diff --git a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx index b75441ee1b2..26385fee0ef 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx @@ -16,7 +16,7 @@ the ["Backup on object stores" section](backup_barmanobjectstore.md) to set up the WAL archive. !!! Info - Please refer to [`BarmanObjectStoreConfiguration`](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-barmanobjectstoreconfiguration) + Please refer to [`BarmanObjectStoreConfiguration`](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-BarmanObjectStoreConfiguration) in the API reference for a full list of options. If required, you can choose to compress WAL files as soon as they From 1b7363473d0e0b691799d362d12bd1406c61235e Mon Sep 17 00:00:00 2001 From: Josh Heyer Date: Thu, 25 Apr 2024 18:59:09 +0000 Subject: [PATCH 3/4] Remove interactive demo - Katacoda is truly dead --- .../docs/postgres_for_kubernetes/1/index.mdx | 1 - .../1/interactive_demo.mdx | 536 ------------------ .../postgres_for_kubernetes/1/quickstart.mdx | 10 +- scripts/source/process-cnp-docs.sh | 4 - 4 files changed, 2 insertions(+), 549 deletions(-) delete mode 100644 product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx diff --git a/product_docs/docs/postgres_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/index.mdx index 7ddf1e5649b..26179ee202d 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/index.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/index.mdx @@ -14,7 +14,6 @@ navigation: - architecture - installation_upgrade - quickstart - - interactive_demo - '#Configuration' - postgresql_conf - operator_conf diff --git a/product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx b/product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx deleted file mode 100644 index ad1f860d26f..00000000000 --- a/product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx +++ /dev/null @@ -1,536 +0,0 @@ ---- -title: "Installation, Configuration and Deployment Demo" -description: "Walk through the process of installing, configuring and deploying the EDB Postgres for Kubernetes Operator via a browser-hosted Kubernetes environment" -navTitle: Install, Configure, Deploy -platform: ubuntu -tags: - - postgresql - - cloud-native-postgresql-operator - - kubernetes - - k3d - - live-demo -katacodaPanel: - scenario: ubuntu:2004 - initializeCommand: clear; echo -e \\\\033[1mPreparing k3d and kubectl...\\\\n\\\\033[0m; snap install kubectl --classic; wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash; clear; echo -e \\\\033[2mk3d is ready\\ - enjoy Kubernetes\\!\\\\033[0m; - codelanguages: shell, yaml -showInteractiveBadge: true ---- - -Want to see what it takes to get the EDB Postgres for Kubernetes Operator up and running? This section will demonstrate the following: - -1. Installing the EDB Postgres for Kubernetes Operator -2. Deploying a three-node PostgreSQL cluster -3. Installing and using the kubectl-cnp plugin -4. Testing failover to verify the resilience of the cluster - -It will take roughly 5-10 minutes to work through. - -!!!interactive This demo is interactive - You can follow along right in your browser by clicking the button below. Once the environment initializes, you'll see a terminal open at the bottom of the screen. - - - -Once [k3d](https://k3d.io/) is ready, we need to start a cluster: - -```shell -k3d cluster create -__OUTPUT__ -INFO[0000] Prep: Network -INFO[0000] Created network 'k3d-k3s-default' -INFO[0000] Created image volume k3d-k3s-default-images -INFO[0000] Starting new tools node... -INFO[0001] Pulling image 'ghcr.io/k3d-io/k3d-tools:5.6.0' -INFO[0001] Creating node 'k3d-k3s-default-server-0' -INFO[0001] Pulling image 'docker.io/rancher/k3s:v1.27.4-k3s1' -INFO[0003] Starting Node 'k3d-k3s-default-tools' -INFO[0005] Creating LoadBalancer 'k3d-k3s-default-serverlb' -INFO[0006] Pulling image 'ghcr.io/k3d-io/k3d-proxy:5.6.0' -INFO[0011] Using the k3d-tools node to gather environment information -INFO[0011] HostIP: using network gateway 172.17.0.1 address -INFO[0011] Starting cluster 'k3s-default' -INFO[0011] Starting servers... -INFO[0011] Starting Node 'k3d-k3s-default-server-0' -INFO[0016] All agents already running. -INFO[0016] Starting helpers... -INFO[0016] Starting Node 'k3d-k3s-default-serverlb' -INFO[0023] Injecting records for hostAliases (incl. host.k3d.internal) and for 2 network members into CoreDNS configmap... -INFO[0025] Cluster 'k3s-default' created successfully! -INFO[0025] You can now use it like this: -kubectl cluster-info -``` - -This will create the Kubernetes cluster, and you will be ready to use it. -Verify that it works with the following command: - -```shell -kubectl get nodes -__OUTPUT__ -NAME STATUS ROLES AGE VERSION -k3d-k3s-default-server-0 Ready control-plane,master 17s v1.27.4+k3s1 -``` - -You will see one node called `k3d-k3s-default-server-0`. If the status isn't yet "Ready", wait for a few seconds and run the command above again. - -## Install EDB Postgres for Kubernetes - -Now that the Kubernetes cluster is running, you can proceed with EDB Postgres for Kubernetes installation as described in the ["Installation and upgrades"](installation_upgrade.md) section: - -```shell -kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.21.0.yaml -__OUTPUT__ -namespace/postgresql-operator-system created -customresourcedefinition.apiextensions.k8s.io/backups.postgresql.k8s.enterprisedb.io created -customresourcedefinition.apiextensions.k8s.io/clusters.postgresql.k8s.enterprisedb.io created -customresourcedefinition.apiextensions.k8s.io/poolers.postgresql.k8s.enterprisedb.io created -customresourcedefinition.apiextensions.k8s.io/scheduledbackups.postgresql.k8s.enterprisedb.io created -serviceaccount/postgresql-operator-manager created -clusterrole.rbac.authorization.k8s.io/postgresql-operator-manager created -clusterrolebinding.rbac.authorization.k8s.io/postgresql-operator-manager-rolebinding created -configmap/postgresql-operator-default-monitoring created -service/postgresql-operator-webhook-service created -deployment.apps/postgresql-operator-controller-manager created -mutatingwebhookconfiguration.admissionregistration.k8s.io/postgresql-operator-mutating-webhook-configuration created -validatingwebhookconfiguration.admissionregistration.k8s.io/postgresql-operator-validating-webhook-configuration created -``` - -And then verify that it was successfully installed: - -```shell -kubectl get deploy -n postgresql-operator-system postgresql-operator-controller-manager -__OUTPUT__ -NAME READY UP-TO-DATE AVAILABLE AGE -postgresql-operator-controller-manager 1/1 1 1 52s -``` - -## Deploy a PostgreSQL cluster - -As with any other deployment in Kubernetes, to deploy a PostgreSQL cluster -you need to apply a configuration file that defines your desired `Cluster`. - -The [`cluster-example.yaml`](../samples/cluster-example.yaml) sample file -defines a simple `Cluster` using the default storage class to allocate -disk space: - -```yaml -cat < cluster-example.yaml -# Example of PostgreSQL cluster -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-example -spec: - instances: 3 - - # Example of rolling update strategy: - # - unsupervised: automated update of the primary once all - # replicas have been upgraded (default) - # - supervised: requires manual supervision to perform - # the switchover of the primary - primaryUpdateStrategy: unsupervised - - # Require 1Gi of space - storage: - size: 1Gi -EOF -``` - -!!! Note "There's more" - For more detailed information about the available options, please refer - to the ["API Reference" section](pg4k.v1.md). - -In order to create the 3-node PostgreSQL cluster, you need to run the following command: - -```shell -kubectl apply -f cluster-example.yaml -__OUTPUT__ -cluster.postgresql.k8s.enterprisedb.io/cluster-example created -``` - -You can check that the pods are being created with the `get pods` command. It'll take a bit to initialize, so if you run that -immediately after applying the cluster configuration you'll see the status as `Init:` or `PodInitializing`: - -```shell -kubectl get pods -__OUTPUT__ -NAME READY STATUS RESTARTS AGE -cluster-example-1-initdb-sdr25 0/1 PodInitializing 0 20s -``` - -...give it a minute, and then check on it again: - -```shell -kubectl get pods -__OUTPUT__ -NAME READY STATUS RESTARTS AGE -cluster-example-1 1/1 Running 0 47s -cluster-example-2 1/1 Running 0 24s -cluster-example-3 1/1 Running 0 8s -``` - -Now we can check the status of the cluster: - - -```shell -kubectl get cluster cluster-example -o yaml -__OUTPUT__ -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - annotations: - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"postgresql.k8s.enterprisedb.io/v1","kind":"Cluster","metadata":{"annotations":{},"name":"cluster-example","namespace":"default"},"spec":{"instances":3,"primaryUpdateStrategy":"unsupervised","storage":{"size":"1Gi"}}} - creationTimestamp: "2023-10-18T19:53:06Z" - generation: 1 - name: cluster-example - namespace: default - resourceVersion: "1201" - uid: 9d712b83-f2ea-4835-8de1-c2cee75bd3c7 -spec: - affinity: - podAntiAffinityType: preferred - topologyKey: "" - bootstrap: - initdb: - database: app - encoding: UTF8 - localeCType: C - localeCollate: C - owner: app - enableSuperuserAccess: true - failoverDelay: 0 - imageName: quay.io/enterprisedb/postgresql:15.3 - instances: 3 - logLevel: info - maxSyncReplicas: 0 - minSyncReplicas: 0 - monitoring: - customQueriesConfigMap: - - key: queries - name: postgresql-operator-default-monitoring - disableDefaultQueries: false - enablePodMonitor: false - postgresGID: 26 - postgresUID: 26 - postgresql: - parameters: - archive_mode: "on" - archive_timeout: 5min - dynamic_shared_memory_type: posix - log_destination: csvlog - log_directory: /controller/log - log_filename: postgres - log_rotation_age: "0" - log_rotation_size: "0" - log_truncate_on_rotation: "false" - logging_collector: "on" - max_parallel_workers: "32" - max_replication_slots: "32" - max_worker_processes: "32" - shared_memory_type: mmap - shared_preload_libraries: "" - wal_keep_size: 512MB - wal_receiver_timeout: 5s - wal_sender_timeout: 5s - syncReplicaElectionConstraint: - enabled: false - primaryUpdateMethod: restart - primaryUpdateStrategy: unsupervised - resources: {} - startDelay: 30 - stopDelay: 30 - storage: - resizeInUseVolumes: true - size: 1Gi - switchoverDelay: 40000000 -status: - certificates: - clientCASecret: cluster-example-ca - expirations: - cluster-example-ca: 2024-01-16 19:48:06 +0000 UTC - cluster-example-replication: 2024-01-16 19:48:06 +0000 UTC - cluster-example-server: 2024-01-16 19:48:06 +0000 UTC - replicationTLSSecret: cluster-example-replication - serverAltDNSNames: - - cluster-example-rw - - cluster-example-rw.default - - cluster-example-rw.default.svc - - cluster-example-r - - cluster-example-r.default - - cluster-example-r.default.svc - - cluster-example-ro - - cluster-example-ro.default - - cluster-example-ro.default.svc - serverCASecret: cluster-example-ca - serverTLSSecret: cluster-example-server - cloudNativePostgresqlCommitHash: c42ca1c2 - cloudNativePostgresqlOperatorHash: 1d51c15adffb02c81dbc4e8752ddb68f709699c78d9c3384ed9292188685971b - conditions: - - lastTransitionTime: "2023-10-18T19:54:30Z" - message: Cluster is Ready - reason: ClusterIsReady - status: "True" - type: Ready - - lastTransitionTime: "2023-10-18T19:54:30Z" - message: velero addon is disabled - reason: Disabled - status: "False" - type: k8s.enterprisedb.io/velero - - lastTransitionTime: "2023-10-18T19:54:30Z" - message: external-backup-adapter addon is disabled - reason: Disabled - status: "False" - type: k8s.enterprisedb.io/externalBackupAdapter - - lastTransitionTime: "2023-10-18T19:54:30Z" - message: external-backup-adapter-cluster addon is disabled - reason: Disabled - status: "False" - type: k8s.enterprisedb.io/externalBackupAdapterCluster - - lastTransitionTime: "2023-10-18T19:54:31Z" - message: kasten addon is disabled - reason: Disabled - status: "False" - type: k8s.enterprisedb.io/kasten - configMapResourceVersion: - metrics: - postgresql-operator-default-monitoring: "860" - currentPrimary: cluster-example-1 - currentPrimaryTimestamp: "2023-10-18T19:53:49.065241Z" - healthyPVC: - - cluster-example-1 - - cluster-example-2 - - cluster-example-3 - instanceNames: - - cluster-example-1 - - cluster-example-2 - - cluster-example-3 - instances: 3 - instancesReportedState: - cluster-example-1: - isPrimary: true - timeLineID: 1 - cluster-example-2: - isPrimary: false - timeLineID: 1 - cluster-example-3: - isPrimary: false - timeLineID: 1 - instancesStatus: - healthy: - - cluster-example-1 - - cluster-example-2 - - cluster-example-3 - latestGeneratedNode: 3 - licenseStatus: - isImplicit: true - isTrial: true - licenseExpiration: "2023-11-17T19:53:06Z" - licenseStatus: Implicit trial license - repositoryAccess: false - valid: true - managedRolesStatus: {} - phase: Cluster in healthy state - poolerIntegrations: - pgBouncerIntegration: {} - pvcCount: 3 - readService: cluster-example-r - readyInstances: 3 - secretsResourceVersion: - applicationSecretVersion: "832" - clientCaSecretVersion: "828" - replicationSecretVersion: "830" - serverCaSecretVersion: "828" - serverSecretVersion: "829" - superuserSecretVersion: "831" - targetPrimary: cluster-example-1 - targetPrimaryTimestamp: "2023-10-18T19:53:06.981792Z" - timelineID: 1 - topology: - instances: - cluster-example-1: {} - cluster-example-2: {} - cluster-example-3: {} - nodesUsed: 1 - successfullyExtracted: true - writeService: cluster-example-rw -``` - -!!! Note - By default, the operator will install the latest available minor version - of the latest major version of PostgreSQL when the operator was released. - You can override this by setting [the `imageName` key in the `spec` section of - the `Cluster` definition](pg4k.v1/#clusterspec). - -!!! Important - The immutable infrastructure paradigm requires that you always - point to a specific version of the container image. - Never use tags like `latest` or `13` in a production environment - as it might lead to unpredictable scenarios in terms of update - policies and version consistency in the cluster. - -## Install the kubectl-cnp plugin - -EDB Postgres for Kubernetes provides [a plugin for kubectl](kubectl-plugin) to manage a cluster in Kubernetes, along with a script to install it: - -```shell -curl -sSfL \ - https://github.com/EnterpriseDB/kubectl-cnp/raw/main/install.sh | \ - sudo sh -s -- -b /usr/local/bin -__OUTPUT__ -EnterpriseDB/kubectl-cnp info checking GitHub for latest tag -EnterpriseDB/kubectl-cnp info found version: 1.21.0 for v1.21.0/linux/x86_64 -EnterpriseDB/kubectl-cnp info installed /usr/local/bin/kubectl-cnp -``` - -The `cnp` command is now available in kubectl: - -```shell -kubectl cnp status cluster-example -__OUTPUT__ -Cluster Summary -Name: cluster-example -Namespace: default -System ID: 7291389121501601807 -PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3 -Primary instance: cluster-example-1 -Primary start time: 2023-10-18 19:53:49 +0000 UTC (uptime 2m32s) -Status: Cluster in healthy state -Instances: 3 -Ready instances: 3 -Current Write LSN: 0/6054B60 (Timeline: 1 - WAL File: 000000010000000000000006) - -Certificates Status -Certificate Name Expiration Date Days Left Until Expiration ----------------- --------------- -------------------------- -cluster-example-ca 2024-01-16 19:48:06 +0000 UTC 89.99 -cluster-example-replication 2024-01-16 19:48:06 +0000 UTC 89.99 -cluster-example-server 2024-01-16 19:48:06 +0000 UTC 89.99 - -Continuous Backup status -Not configured - -Streaming Replication status -Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority ----- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- -cluster-example-2 0/6054B60 0/6054B60 0/6054B60 0/6054B60 00:00:00 00:00:00 00:00:00 streaming async 0 -cluster-example-3 0/6054B60 0/6054B60 0/6054B60 0/6054B60 00:00:00 00:00:00 00:00:00 streaming async 0 - -Unmanaged Replication Slot Status -No unmanaged replication slots found - -Instances status -Name Database Size Current LSN Replication role Status QoS Manager Version Node ----- ------------- ----------- ---------------- ------ --- --------------- ---- -cluster-example-1 29 MB 0/6054B60 Primary OK BestEffort 1.20.2 k3d-k3s-default-server-0 -cluster-example-2 29 MB 0/6054B60 Standby (async) OK BestEffort 1.20.2 k3d-k3s-default-server-0 -cluster-example-3 29 MB 0/6054B60 Standby (async) OK BestEffort 1.20.2 k3d-k3s-default-server-0 -``` - -!!! Note "There's more" - See [the Cloud Native PostgreSQL Plugin page](kubectl-plugin/) for more commands and options. - -## Testing failover - -As our status checks show, we're running two replicas - if something happens to the primary instance of PostgreSQL, the cluster will fail over to one of them. Let's demonstrate this by killing the primary pod: - -```shell -kubectl delete pod --wait=false cluster-example-1 -__OUTPUT__ -pod "cluster-example-1" deleted -``` - -This simulates a hard shutdown of the server - a scenario where something has gone wrong. - -Now if we check the status... -```shell -kubectl cnp status cluster-example -__OUTPUT__ -Cluster Summary -Name: cluster-example -Namespace: default -System ID: 7291389121501601807 -PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3 -Primary instance: cluster-example-2 -Primary start time: 2023-10-18 19:57:07 +0000 UTC (uptime 5s) -Status: Failing over Failing over from cluster-example-1 to cluster-example-2 -Instances: 3 -Ready instances: 2 -Current Write LSN: 0/7001000 (Timeline: 2 - WAL File: 000000020000000000000007) - -Certificates Status -Certificate Name Expiration Date Days Left Until Expiration ----------------- --------------- -------------------------- -cluster-example-ca 2024-01-16 19:48:06 +0000 UTC 89.99 -cluster-example-replication 2024-01-16 19:48:06 +0000 UTC 89.99 -cluster-example-server 2024-01-16 19:48:06 +0000 UTC 89.99 - -Continuous Backup status -Not configured - -Streaming Replication status -Not available yet - -Unmanaged Replication Slot Status -No unmanaged replication slots found - -Instances status -Name Database Size Current LSN Replication role Status QoS Manager Version Node ----- ------------- ----------- ---------------- ------ --- --------------- ---- -cluster-example-2 29 MB 0/7001000 Primary OK BestEffort 1.20.2 k3d-k3s-default-server-0 -cluster-example-3 29 MB 0/70000A0 Standby (file based) OK BestEffort 1.20.2 k3d-k3s-default-server-0 -cluster-example-1 - - - pod not available BestEffort - k3d-k3s-default-server-0 -``` - -...the failover process has begun, with the second pod promoted to primary. Once the failed pod has restarted, it will become a replica of the new primary: - -```shell -kubectl cnp status cluster-example -__OUTPUT__ -Cluster Summary -Name: cluster-example -Namespace: default -System ID: 7291389121501601807 -PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3 -Primary instance: cluster-example-2 -Primary start time: 2023-10-18 19:57:07 +0000 UTC (uptime 1m14s) -Status: Cluster in healthy state -Instances: 3 -Ready instances: 3 -Current Write LSN: 0/7004D98 (Timeline: 2 - WAL File: 000000020000000000000007) - -Certificates Status -Certificate Name Expiration Date Days Left Until Expiration ----------------- --------------- -------------------------- -cluster-example-ca 2024-01-16 19:48:06 +0000 UTC 89.99 -cluster-example-replication 2024-01-16 19:48:06 +0000 UTC 89.99 -cluster-example-server 2024-01-16 19:48:06 +0000 UTC 89.99 - -Continuous Backup status -Not configured - -Streaming Replication status -Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority ----- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- -cluster-example-1 0/7004D98 0/7004D98 0/7004D98 0/7004D98 00:00:00 00:00:00 00:00:00 streaming async 0 -cluster-example-3 0/7004D98 0/7004D98 0/7004D98 0/7004D98 00:00:00 00:00:00 00:00:00 streaming async 0 - -Unmanaged Replication Slot Status -No unmanaged replication slots found - -Instances status -Name Database Size Current LSN Replication role Status QoS Manager Version Node ----- ------------- ----------- ---------------- ------ --- --------------- ---- -cluster-example-2 29 MB 0/7004D98 Primary OK BestEffort 1.20.2 k3d-k3s-default-server-0 -cluster-example-1 29 MB 0/7004D98 Standby (async) OK BestEffort 1.20.2 k3d-k3s-default-server-0 -cluster-example-3 29 MB 0/7004D98 Standby (async) OK BestEffort 1.20.2 k3d-k3s-default-server-0 -``` - - -### Further reading - -This is all it takes to get a PostgreSQL cluster up and running, but of course there's a lot more possible - and certainly much more that is prudent before you should ever deploy in a production environment! - -- Design goals and possibilities offered by the CloudNativePG Operator: check out the [Architecture](architecture) and [Use cases](use_cases) sections. - -- Configuring a secure and reliable system: read through the [Security](security), [Failure Modes](failure_modes) and [Backup and Recovery](backup_recovery) sections. - - diff --git a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx index eb19791ad1f..6d9b01f5d00 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx @@ -1,6 +1,8 @@ --- title: 'Quickstart' originalFilePath: 'src/quickstart.md' +redirects: + - ../interactive_demo/ --- This section describes how to test a PostgreSQL cluster on your laptop/computer @@ -8,14 +10,6 @@ using EDB Postgres for Kubernetes on a local Kubernetes cluster in [Kind](https: [Minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/). - -!!! Tip "Live demonstration" - Don't want to install anything locally just yet? Try a demonstration directly in your browser: - - [EDB Postgres for Kubernetes Operator Interactive Quickstart](interactive_demo) - - - Red Hat OpenShift Container Platform users can test the certified operator for EDB Postgres for Kubernetes on the [Red Hat OpenShift Local](https://developers.redhat.com/products/openshift-local/overview) (formerly Red Hat CodeReady Containers). diff --git a/scripts/source/process-cnp-docs.sh b/scripts/source/process-cnp-docs.sh index 7e8419b1d4a..7f666edb5b7 100755 --- a/scripts/source/process-cnp-docs.sh +++ b/scripts/source/process-cnp-docs.sh @@ -30,10 +30,6 @@ cd $SOURCE_CHECKOUT/docs-import/docs # grab key bit of source for use in docs cp $SOURCE_CHECKOUT/docs-import/config/manager/default-monitoring.yaml $SOURCE_CHECKOUT/docs-import/docs/src/ -node $DESTINATION_CHECKOUT/scripts/fileProcessor/main.mjs \ - -f "src/**/quickstart.md" \ - -p cnp/add-quickstart-content - node $DESTINATION_CHECKOUT/scripts/fileProcessor/main.mjs \ -f "src/**/*.md" \ -p "cnp/add-frontmatters" \ From 878c1472b10af90ae5ac19bb84de8b5c018f9e9e Mon Sep 17 00:00:00 2001 From: Josh Heyer Date: Thu, 25 Apr 2024 20:26:19 +0000 Subject: [PATCH 4/4] Release notes for PG4K 1.18.12, 1.21.5, 1.22.3, 1.23.0 --- .../1/rel_notes/1_18_12_rel_notes.mdx | 22 +++++++++++++++++++ .../1/rel_notes/1_21_5_rel_notes.mdx | 12 ++++++++++ .../1/rel_notes/1_22_3_rel_notes.mdx | 12 ++++++++++ .../1/rel_notes/1_23_0_rel_notes.mdx | 12 ++++++++++ .../1/rel_notes/index.mdx | 8 +++++++ 5 files changed, 66 insertions(+) create mode 100644 product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_12_rel_notes.mdx create mode 100644 product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_21_5_rel_notes.mdx create mode 100644 product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_22_3_rel_notes.mdx create mode 100644 product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_23_0_rel_notes.mdx diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_12_rel_notes.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_12_rel_notes.mdx new file mode 100644 index 00000000000..a429ce20374 --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_12_rel_notes.mdx @@ -0,0 +1,22 @@ +--- +title: "EDB Postgres for Kubernetes 1.18.12 release notes" +navTitle: "Version 1.18.12" +--- + +Released: 24 Apr 2024 + +EDB Postgres for Kubernetes version 1.18.12 is an LTS release of EDB Postgres for Kubernetes; there is no corresponding upstream release of CloudNativePG. + +This release of EDB Postgres for Kubernetes includes the following: + +| Type | Description | +| ------------ | ------------------------------------------------------------------------------------------------------------------------------ | +| Enhancement | Added upgrade process from 1.18.x LTS to 1.22.x LTS | +| Enhancement | Documentation for Kubernetes 1.29.x or above ([#3729](https://github.com/cloudnative-pg/cloudnative-pg/pull/3729)) | +| Bug fix | Properly handle LSN sorting when is empty on a replica ([#4283](https://github.com/cloudnative-pg/cloudnative-pg/pull/4283)) | +| Bug fix | Avoids stopping reconciliation loop when there is no instance status available ([#4132](https://github.com/cloudnative-pg/cloudnative-pg/pull/4132)) | +| Bug fix | Waits for elected replica to be in streaming mode before a switchover ([#4288](https://github.com/cloudnative-pg/cloudnative-pg/pull/4288)) | +| Bug fix | Allow backup hooks to be called while using Velero backup | +| Bug fix | Waits for the Restic init container to be completed | +| Bug fix | Ensure pods with no ownership are deleted during cluster restore ([#4141](https://github.com/cloudnative-pg/cloudnative-pg/pull/4141)) | +| Security | Updated all Go dependencies to fix any latest security issues | diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_21_5_rel_notes.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_21_5_rel_notes.mdx new file mode 100644 index 00000000000..6134ee0fe34 --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_21_5_rel_notes.mdx @@ -0,0 +1,12 @@ +--- +title: "EDB Postgres for Kubernetes 1.21.5 release notes" +navTitle: "Version 1.21.5" +--- + +Released: 23 Apr 2024 + +This release of EDB Postgres for Kubernetes includes the following: + +| Type | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Upstream merge | Merged with community CloudNativePG 1.21.5. See the community [Release Notes](https://cloudnative-pg.io/documentation/1.21/release_notes/v1.21/). | diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_22_3_rel_notes.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_22_3_rel_notes.mdx new file mode 100644 index 00000000000..58a233181e6 --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_22_3_rel_notes.mdx @@ -0,0 +1,12 @@ +--- +title: "EDB Postgres for Kubernetes 1.22.3 release notes" +navTitle: "Version 1.22.3" +--- + +Released: 24 Apr 2024 + +This release of EDB Postgres for Kubernetes includes the following: + +| Type | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Upstream merge | Merged with community CloudNativePG 1.22.3. See the community [Release Notes](https://cloudnative-pg.io/documentation/1.22/release_notes/v1.22/). | diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_23_0_rel_notes.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_23_0_rel_notes.mdx new file mode 100644 index 00000000000..f143b0c5594 --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_23_0_rel_notes.mdx @@ -0,0 +1,12 @@ +--- +title: "EDB Postgres for Kubernetes 1.23.0 release notes" +navTitle: "Version 1.23.0" +--- + +Released: 24 Apr 2024 + +This release of EDB Postgres for Kubernetes includes the following: + +| Type | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Upstream merge | Merged with community CloudNativePG 1.23.0. See the community [Release Notes](https://cloudnative-pg.io/documentation/1.23/release_notes/v1.23/). | diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx index 9653bb2319a..5e1ef89fc17 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx @@ -4,9 +4,12 @@ navTitle: "Release notes" redirects: - ../release_notes navigation: +- 1_23_0_rel_notes +- 1_22_3_rel_notes - 1_22_2_rel_notes - 1_22_1_rel_notes - 1_22_0_rel_notes +- 1_21_5_rel_notes - 1_21_4_rel_notes - 1_21_3_rel_notes - 1_21_2_rel_notes @@ -26,6 +29,7 @@ navigation: - 1_19_2_rel_notes - 1_19_1_rel_notes - 1_19_0_rel_notes +- 1_18_12_rel_notes - 1_18_11_rel_notes - 1_18_10_rel_notes - 1_18_9_rel_notes @@ -91,9 +95,12 @@ The EDB Postgres for Kubernetes documentation describes the major version of EDB | Version | Release date | Upstream merges | | -------------------------- | ------------ | ------------------------------------------------------------------------------------------- | +| [1.23.0](1_23_0_rel_notes) | 24 Apr 2024 | Upstream [1.23.0](https://cloudnative-pg.io/documentation/1.22/release_notes/v1.23/) | +| [1.22.3](1_22_3_rel_notes) | 24 Apr 2024 | Upstream [1.22.3](https://cloudnative-pg.io/documentation/1.22/release_notes/v1.22/) | | [1.22.2](1_22_2_rel_notes) | 22 Mar 2024 | Upstream [1.22.2](https://cloudnative-pg.io/documentation/1.22/release_notes/v1.22/) | | [1.22.1](1_22_1_rel_notes) | 02 Feb 2024 | Upstream [1.22.1](https://cloudnative-pg.io/documentation/1.22/release_notes/v1.22/) | | [1.22.0](1_22_0_rel_notes) | 22 Dec 2023 | Upstream [1.22.0](https://cloudnative-pg.io/documentation/1.22/release_notes/v1.22/) | +| [1.21.5](1_21_5_rel_notes) | 24 Apr 2024 | Upstream [1.21.5](https://cloudnative-pg.io/documentation/1.21/release_notes/v1.21/) | | [1.21.4](1_21_4_rel_notes) | 22 Mar 2024 | Upstream [1.21.4](https://cloudnative-pg.io/documentation/1.21/release_notes/v1.21/) | | [1.21.3](1_21_3_rel_notes) | 02 Feb 2024 | Upstream [1.21.3](https://cloudnative-pg.io/documentation/1.21/release_notes/v1.21/) | | [1.21.2](1_21_2_rel_notes) | 22 Dec 2023 | Upstream [1.21.2](https://cloudnative-pg.io/documentation/1.21/release_notes/v1.21/) | @@ -113,6 +120,7 @@ The EDB Postgres for Kubernetes documentation describes the major version of EDB | [1.19.2](1_19_2_rel_notes) | 27 Apr 2023 | Upstream [1.19.2](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) | | [1.19.1](1_19_1_rel_notes) | 20 Mar 2023 | Upstream [1.19.1](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) | | [1.19.0](1_19_0_rel_notes) | 14 Feb 2023 | Upstream [1.19.0](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) | +| [1.18.12](1_18_12_rel_notes) | 24 Apr 2024 | None | | [1.18.11](1_18_11_rel_notes) | 22 Mar 2024 | None | | [1.18.10](1_18_10_rel_notes) | 02 Feb 2024 | None | | [1.18.9](1_18_9_rel_notes) | 22 Dec 2023 | None |