From 967ef9778dd72f51aa189533aca2534f6943c1c9 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Wed, 20 Nov 2024 16:43:11 +0000
Subject: [PATCH] PG4K import for v1.24.2
---
.../docs/postgres_for_kubernetes/1/addons.mdx | 6 +
.../1/applications.mdx | 3 +
.../1/architecture.mdx | 17 +
.../docs/postgres_for_kubernetes/1/backup.mdx | 13 +
.../1/backup_barmanobjectstore.mdx | 9 +-
.../1/backup_volumesnapshot.mdx | 8 +
.../1/benchmarking.mdx | 4 +
.../postgres_for_kubernetes/1/bootstrap.mdx | 99 +-
.../1/certificates.mdx | 10 +
.../1/cluster_conf.mdx | 2 +-
.../1/connection_pooling.mdx | 17 +
.../1/container_images.mdx | 4 +
.../1/database_import.mdx | 5 +
.../1/declarative_database_management.mdx | 63 +
.../1/declarative_hibernation.mdx | 1 +
.../1/declarative_role_management.mdx | 2 +
.../postgres_for_kubernetes/1/evaluation.mdx | 35 +-
.../postgres_for_kubernetes/1/failover.mdx | 4 +
.../1/failure_modes.mdx | 14 +-
.../docs/postgres_for_kubernetes/1/faq.mdx | 1 +
.../postgres_for_kubernetes/1/fencing.mdx | 1 +
.../1/image_catalog.mdx | 1 +
.../docs/postgres_for_kubernetes/1/index.mdx | 2 -
.../1/installation_upgrade.mdx | 114 +-
.../1/instance_manager.mdx | 6 +
.../1/kubectl-plugin.mdx | 512 +-
.../1/kubernetes_upgrade.mdx | 6 +
.../1/labels_annotations.mdx | 3 +
.../1/license_keys.mdx | 9 +-
.../postgres_for_kubernetes/1/logging.mdx | 22 +-
.../postgres_for_kubernetes/1/monitoring.mdx | 22 +
.../1/object_stores.mdx | 6 +
.../postgres_for_kubernetes/1/openshift.mdx | 58 +
.../1/operator_capability_levels.mdx | 34 +-
.../1/operator_conf.mdx | 70 +-
.../1/pg4k.v1/index.mdx | 5241 +++++++++++++++++
.../1/{pg4k.v1.mdx => pg4k.v1/v1.24.2.mdx} | 456 +-
.../postgres_for_kubernetes/1/postgis.mdx | 4 +
.../1/postgresql_conf.mdx | 11 +
.../1/preview_version.mdx | 1 +
...egistry.mdx => private_edb_registries.mdx} | 13 +-
.../postgres_for_kubernetes/1/quickstart.mdx | 45 +-
.../postgres_for_kubernetes/1/recovery.mdx | 17 +
.../1/replica_cluster.mdx | 10 +
.../postgres_for_kubernetes/1/replication.mdx | 395 +-
.../1/resource_management.mdx | 1 +
.../1/rolling_update.mdx | 1 +
.../postgres_for_kubernetes/1/samples.mdx | 10 +
.../1/samples/cluster-example-epas.yaml | 8 +-
.../1/samples/cluster-example-pge.yaml | 10 +
.../1/samples/cluster-example-tde.yaml | 6 +-
.../1/samples/database-example-fail.yaml | 9 +
.../1/samples/database-example-icu.yaml | 16 +
.../1/samples/database-example.yaml | 9 +
.../1/samples/monitoring/prometheusrule.yaml | 2 +-
.../postgres_for_kubernetes/1/scheduling.mdx | 5 +
.../1/service_management.mdx | 6 +
.../1/ssl_connections.mdx | 2 +
.../postgres_for_kubernetes/1/storage.mdx | 13 +
.../postgres_for_kubernetes/1/tablespaces.mdx | 3 +
.../docs/postgres_for_kubernetes/1/tde.mdx | 5 +
.../1/troubleshooting.mdx | 12 +
.../1/wal_archiving.mdx | 7 +-
63 files changed, 6926 insertions(+), 575 deletions(-)
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/declarative_database_management.mdx
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/pg4k.v1/index.mdx
rename product_docs/docs/postgres_for_kubernetes/1/{pg4k.v1.mdx => pg4k.v1/v1.24.2.mdx} (94%)
rename product_docs/docs/postgres_for_kubernetes/1/{private_edb_registry.mdx => private_edb_registries.mdx} (96%)
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-pge.yaml
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/database-example-fail.yaml
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/database-example-icu.yaml
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/database-example.yaml
diff --git a/product_docs/docs/postgres_for_kubernetes/1/addons.mdx b/product_docs/docs/postgres_for_kubernetes/1/addons.mdx
index 6b283f9e25b..17707c9488a 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/addons.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/addons.mdx
@@ -11,6 +11,7 @@ per-cluster basis. These add-ons are:
3. [Velero](#velero)
!!! Info
+
If you are planning to use Velero in OpenShift, please refer to the
[OADP section](openshift.md#oadp-for-velero) in the Openshift documentation.
@@ -51,6 +52,7 @@ Recovery simply relies on the operator to reconcile the cluster from an
existing PVC group.
!!! Important
+
The External Backup Adapter is not a tool to perform backups. It simply
provides a generic interface that any third-party backup tool in the Kubernetes
space can use. Such tools are responsible for safely storing the PVC
@@ -327,6 +329,7 @@ Here is a full example of YAML content to be placed in either:
for the `external-backup-adapter-cluster` add-on
!!! Hint
+
Copy the content below and paste it inside the `ConfigMap` or `Secret` that
you use to configure the operator or the annotation in the `Cluster`, making
sure you use the `|` character that [YAML reserves for literals](https://yaml.org/spec/1.2.2/#812-literal-style),
@@ -446,6 +449,7 @@ ahead replica instance to be the designated backup and will add Kasten-specific
backup hooks through annotations and labels to that instance.
!!! Important
+
The operator will refuse to shut down a primary instance to take a cold
backup unless the Cluster is annotated with
`k8s.enterprisedb.io/snapshotAllowColdBackupOnPrimary: enabled`
@@ -510,6 +514,7 @@ These [annotations](https://velero.io/docs/latest/backup-hooks/) are used by
Velero to run the commands to prepare the Postgres instance to be backed up.
!!! Important
+
The operator will refuse to shut down a primary instance to take a cold
backup unless the Cluster is annotated with
`k8s.enterprisedb.io/snapshotAllowColdBackupOnPrimary: enabled`
@@ -556,6 +561,7 @@ This command will create a standard Velero backup using the configured object
storage and the configured Snapshot API.
!!! Important
+
By default, the Velero add-on exclude only a few resources from the backup
operation, namely pods and PVCs of the instances that have not been selected
(as you recall, the operator tries to backup the PVCs of the first replica).
diff --git a/product_docs/docs/postgres_for_kubernetes/1/applications.mdx b/product_docs/docs/postgres_for_kubernetes/1/applications.mdx
index 84cf029a86c..f0d464e541d 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/applications.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/applications.mdx
@@ -10,6 +10,7 @@ For more information on services and how to manage them, please refer to the
["Service management"](service_management.md) section.
!!! Hint
+
It is highly recommended using those services in your applications,
and avoiding connecting directly to a specific PostgreSQL instance, as the latter
can change during the cluster lifetime.
@@ -23,6 +24,7 @@ For the credentials to connect to PostgreSQL, you can
use the secrets generated by the operator.
!!! Seealso "Connection Pooling"
+
Please refer to the ["Connection Pooling" section](connection_pooling.md) for
information about how to take advantage of PgBouncer as a connection pooler,
and create an access layer between your applications and the PostgreSQL clusters.
@@ -83,4 +85,5 @@ The `-superuser` ones are supposed to be used only for administrative purposes,
and correspond to the `postgres` user.
!!! Important
+
Superuser access over the network is disabled by default.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx
index c65095dd272..89913a756df 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx
@@ -4,6 +4,7 @@ originalFilePath: 'src/architecture.md'
---
!!! Hint
+
For a deeper understanding, we recommend reading our article on the CNCF
blog post titled ["Recommended Architectures for PostgreSQL in Kubernetes"](https://www.cncf.io/blog/2023/09/29/recommended-architectures-for-postgresql-in-kubernetes/),
which provides valuable insights into best practices and design
@@ -53,6 +54,7 @@ Replicas are usually called *standby servers* and can also be used for
read-only workloads, thanks to the *Hot Standby* feature.
!!! Important
+
**We recommend against storage-level replication with PostgreSQL**, although
EDB Postgres for Kubernetes allows you to adopt that strategy. For more information, please refer
to the talk given by Chris Milsted and Gabriele Bartolini at KubeCon NA 2022 entitled
@@ -75,6 +77,7 @@ This means that **each data center is active at any time** and can run workloads
simultaneously.
!!! Note
+
Most of the public Cloud Providers' managed Kubernetes services already
provide 3 or more availability zones in each region.
@@ -107,6 +110,7 @@ managing them via declarative configuration. This setup is ideal for disaster
recovery (DR), read-only operations, or cross-region availability.
!!! Important
+
Each operator deployment can only manage operations within its local
Kubernetes cluster. For operations across Kubernetes clusters, such as
controlled switchover or unexpected failover, coordination must be handled
@@ -138,6 +142,7 @@ the [replica cluster feature](replica_cluster.md)).
![Example of a Kubernetes architecture with only 2 data centers](./images/k8s-architecture-2-az.png)
!!! Hint
+
If you are at en early stage of your Kubernetes journey, please share this
document with your infrastructure team. The two data centers setup might
be simply the result of a "lift-and-shift" transition to Kubernetes
@@ -171,6 +176,7 @@ within EDB Postgres for Kubernetes' scope, as the operator can only function wit
Kubernetes cluster.
!!! Important
+
EDB Postgres for Kubernetes provides all the necessary primitives and probes to
coordinate PostgreSQL active/passive topologies across different Kubernetes
clusters through a higher-level operator or management tool.
@@ -186,6 +192,7 @@ This approach ensures optimal performance and resource allocation for your
database operations.
!!! Hint
+
As a general rule of thumb, deploy Postgres nodes in multiples of
three—ideally with one node per availability zone. Three nodes is
an optimal number because it ensures that a PostgreSQL cluster with three
@@ -199,6 +206,7 @@ taints help prevent any non-`postgres` workloads from being scheduled on that
node.
!!! Important
+
This methodology is the most straightforward way to ensure that PostgreSQL
workloads are isolated from other workloads in terms of both computing
resources and, when using locally attached disks, storage. While different
@@ -279,6 +287,7 @@ Kubernetes cluster, with the following specifications:
within the same Kubernetes cluster / region
!!! Important
+
You can configure the above services through the `managed.services` section
in the `Cluster` configuration. This can be done by reducing the number of
services and selecting the type (default is `ClusterIP`). For more details,
@@ -298,16 +307,19 @@ automatically updates the `-rw` service to point to the promoted primary,
making sure that traffic from the applications is seamlessly redirected.
!!! Seealso "Replication"
+
Please refer to the ["Replication" section](replication.md) for more
information about how EDB Postgres for Kubernetes relies on PostgreSQL replication,
including synchronous settings.
!!! Seealso "Connecting from an application"
+
Please refer to the ["Connecting from an application" section](applications.md) for
information about how to connect to EDB Postgres for Kubernetes from a stateless
application within the same Kubernetes cluster.
!!! Seealso "Connection Pooling"
+
Please refer to the ["Connection Pooling" section](connection_pooling.md) for
information about how to take advantage of PgBouncer as a connection pooler,
and create an access layer between your applications and the PostgreSQL clusters.
@@ -329,6 +341,7 @@ service to another instance of the cluster.
### Read-only workloads
!!! Important
+
Applications must be aware of the limitations that
[Hot Standby](https://www.postgresql.org/docs/current/hot-standby.html)
presents and familiar with the way PostgreSQL operates when dealing with
@@ -348,6 +361,7 @@ Applications can also access any PostgreSQL instance through the
## Deployments across Kubernetes clusters
!!! Info
+
EDB Postgres for Kubernetes supports deploying PostgreSQL across multiple Kubernetes
clusters through a feature that allows you to define a distributed PostgreSQL
topology using replica clusters, as described in this section.
@@ -403,12 +417,14 @@ This is typically triggered by:
cluster by promoting the PostgreSQL replica cluster.
!!! Warning
+
EDB Postgres for Kubernetes cannot perform any cross-cluster automated failover, as it
does not have authority beyond a single Kubernetes cluster. Such operations
must be performed manually or delegated to a multi-cluster/federated
cluster-aware authority.
!!! Important
+
EDB Postgres for Kubernetes allows you to control the distributed topology via
declarative configuration, enabling you to automate these procedures as part of
your Infrastructure as Code (IaC) process, including GitOps.
@@ -422,6 +438,7 @@ You can also define replica clusters with a lower number of replicas, and then
increase this number when the cluster is promoted to primary.
!!! Seealso "Replica clusters"
+
Please refer to the ["Replica Clusters" section](replica_cluster.md) for
more detailed information on how physical replica clusters operate and how to
define a distributed topology with read-only clusters across different
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup.mdx
index 9a37c5f9a64..2b30fa46da5 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/backup.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup.mdx
@@ -10,6 +10,7 @@ organizations all over the world achieve their disaster recovery goals with
Postgres.
!!! Note
+
There's another way to backup databases in PostgreSQL, through the
`pg_dump` utility - which relies on logical backups instead of physical ones.
However, logical backups are not suitable for business continuity use cases
@@ -35,11 +36,13 @@ On the other hand, EDB Postgres for Kubernetes supports two ways to store physic
the underlying storage class
!!! Important
+
Before choosing your backup strategy with EDB Postgres for Kubernetes, it is important that
you take some time to familiarize with some basic concepts, like WAL archive,
hot and cold backups.
!!! Important
+
Please refer to the official Kubernetes documentation for a list of all
the supported [Container Storage Interface (CSI) drivers](https://kubernetes-csi.github.io/docs/drivers.html)
that provide snapshotting capabilities.
@@ -56,6 +59,7 @@ is fundamental for the following reasons:
time from the first available base backup in your system
!!! Warning
+
WAL archive alone is useless. Without a physical base backup, you cannot
restore a PostgreSQL cluster.
@@ -72,6 +76,7 @@ When you [configure a WAL archive](wal_archiving.md), EDB Postgres for Kubernete
out-of-the-box an RPO <= 5 minutes for disaster recovery, even across regions.
!!! Important
+
Our recommendation is to always setup the WAL archive in production.
There are known use cases - normally involving staging and development
environments - where none of the above benefits are needed and the WAL
@@ -157,6 +162,7 @@ Scheduled backups are the recommended way to configure your backup strategy in
EDB Postgres for Kubernetes. They are managed by the `ScheduledBackup` resource.
!!! Info
+
Please refer to [`ScheduledBackupSpec`](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec)
in the API reference for a full list of options.
@@ -165,6 +171,7 @@ which includes seconds, as expressed in
the [Go `cron` package format](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format).
!!! Warning
+
Beware that this format accepts also the `seconds` field, and it is
different from the `crontab` format in Unix/Linux systems.
@@ -190,6 +197,7 @@ In Kubernetes CronJobs, the equivalent expression is `0 0 * * *` because seconds
are not included.
!!! Hint
+
Backup frequency might impact your recovery time object (RTO) after a
disaster which requires a full or Point-In-Time recovery operation. Our
advice is that you regularly test your backups by recovering them, and then
@@ -217,6 +225,7 @@ In case you want to issue a backup as soon as the ScheduledBackup resource is cr
you can set `.spec.immediate: true`.
!!! Note
+
`.spec.backupOwnerReference` indicates which ownerReference should be put inside
the created backup resources.
@@ -227,6 +236,7 @@ you can set `.spec.immediate: true`.
## On-demand backups
!!! Info
+
Please refer to [`BackupSpec`](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-BackupSpec)
in the API reference for a full list of options.
@@ -302,6 +312,7 @@ Events:
```
!!!Important
+
This feature will not backup the secrets for the superuser and the
application user. The secrets are supposed to be backed up as part of
the standard backup procedures for the Kubernetes cluster.
@@ -321,6 +332,7 @@ By default, backups will run on the most aligned replica of a `Cluster`. If
no replicas are available, backups will run on the primary instance.
!!! Info
+
Although the standby might not always be up to date with the primary,
in the time continuum from the first available backup to the last
archived WAL this is normally irrelevant. The base backup indeed
@@ -345,6 +357,7 @@ spec:
```
!!! Warning
+
Beware of setting the target to primary when performing a cold backup
with volume snapshots, as this will shut down the primary for
the time needed to take the snapshot, impacting write operations.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
index 1ec2df60615..61370517dea 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
@@ -24,6 +24,7 @@ as it is composed of a community PostgreSQL image and the latest
`barman-cli-cloud` package.
!!! Important
+
Always ensure that you are running the latest version of the operands
in your system to take advantage of the improvements introduced in
Barman cloud (as well as improve the security aspects of your cluster).
@@ -46,6 +47,7 @@ provider, please refer to [Appendix A - Common object stores](object_stores.md).
## Retention policies
!!! Important
+
Retention policies are not currently available on volume snapshots.
EDB Postgres for Kubernetes can manage the automated deletion of backup files from
@@ -77,6 +79,7 @@ spec:
```
!!! Note "There's more ..."
+
The **recovery window retention policy** is focused on the concept of
*Point of Recoverability* (`PoR`), a moving point in time determined by
`current time - recovery window`. The *first valid backup* is the first
@@ -99,9 +102,9 @@ algorithms via `barman-cloud-backup` (for backups) and
- snappy
The compression settings for backups and WALs are independent. See the
-[DataBackupConfiguration](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-DataBackupConfiguration) and
-[WALBackupConfiguration](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-WalBackupConfiguration) sections in
-the API reference.
+[DataBackupConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#DataBackupConfiguration) and
+[WALBackupConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#WalBackupConfiguration) sections in
+the barman-cloud API reference.
It is important to note that archival time, restore time, and size change
between the algorithms, so the compression algorithm should be chosen according
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
index 3a085374575..8cbe384cfbd 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
@@ -4,6 +4,7 @@ originalFilePath: 'src/backup_volumesnapshot.md'
---
!!! Warning
+
As noted in the [backup document](backup.md), a cold snapshot explicitly
set to target the primary will result in the primary being fenced for
the duration of the backup, rendering the cluster read-only during that
@@ -53,6 +54,7 @@ volumes of a given storage class, and managed as `VolumeSnapshot` and
`VolumeSnapshotContent` resources.
!!! Important
+
It is your responsibility to verify with the third party vendor
that volume snapshots are supported. EDB Postgres for Kubernetes only interacts
with the Kubernetes API on this matter and we cannot support issues
@@ -64,6 +66,7 @@ EDB Postgres for Kubernetes allows you to configure a given Postgres cluster for
Snapshot backups through the `backup.volumeSnapshot` stanza.
!!! Info
+
Please refer to [`VolumeSnapshotConfiguration`](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-VolumeSnapshotConfiguration)
in the API reference for a full list of options.
@@ -99,6 +102,7 @@ As you can see, the `backup` section contains both the `volumeSnapshot` stanza
`barmanObjectStore` one (controlling the [WAL archive](wal_archiving.md)).
!!! Info
+
Once you have defined the `barmanObjectStore`, you can decide to use
both volume snapshot and object store backup strategies simultaneously
to take physical backups.
@@ -108,6 +112,7 @@ The `volumeSnapshot.className` option allows you to reference the default
defined in your PostgreSQL cluster.
!!! Info
+
In case you are using a different storage class for `PGDATA` and
WAL files, you can specify a separate `VolumeSnapshotClass` for
that volume through the `walClassName` option (which defaults to
@@ -126,6 +131,7 @@ By default, EDB Postgres for Kubernetes requests an online/hot backup on volume
terminating the backup procedure
!!! Important
+
The default values are suitable for most production environments. Hot
backups are consistent and can be used to perform snapshot recovery, as we
ensure WAL retention from the start of the backup through a temporary
@@ -231,6 +237,7 @@ In case a `VolumeSnapshot` is deleted, the `deletionPolicy` specified in the
- if set to `Delete`, the `VolumeSnapshotContent` object is removed as well
!!! Warning
+
`VolumeSnapshotContent` objects do not keep all the information regarding the
backup and the cluster they refer to (like the annotations and labels that
are contained in the `VolumeSnapshot` object). Although possible, restoring
@@ -252,6 +259,7 @@ EKS cluster on AWS using the `ebs-sc` storage class and the `csi-aws-vsc`
volume snapshot class.
!!! Important
+
If you are interested in testing the example, please read
["Volume Snapshots" for the Amazon Elastic Block Store (EBS) CSI driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/tree/master/examples/kubernetes/snapshot)
for detailed instructions on the installation process for the storage class and the snapshot class.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx b/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx
index 27d5daa558c..dcdb2aea22d 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx
@@ -11,6 +11,7 @@ Benchmarking is focused on two aspects:
- the **storage**, by relying on [fio](https://fio.readthedocs.io/en/latest/fio_doc.html)
!!! IMPORTANT
+
`pgbench` and `fio` must be run in a staging or pre-production environment.
Do not use these plugins in a production environment, as it might have
catastrophic consequences on your databases and the other
@@ -35,6 +36,7 @@ kubectl cnp pgbench \
```
!!! IMPORTANT
+
Please refer to the [`pgbench` documentation](https://www.postgresql.org/docs/current/pgbench.html)
for information about the specific options to be used in your jobs.
@@ -50,6 +52,7 @@ kubectl cnp pgbench \
```
!!! Note
+
This will generate a database with 100000000 records, taking approximately 13GB
of space on disk.
@@ -117,6 +120,7 @@ Through the `--dry-run` flag you can generate the manifest of the job for later
modification/execution.
!!! Note
+
The kubectl plugin command `fio` will create a deployment with predefined
fio job values using a ConfigMap. If you want to provide custom job values, we
recommend generating a manifest using the `--dry-run` flag and providing your
diff --git a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx
index 650b6c544b3..f6c2a4225fd 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx
@@ -4,6 +4,7 @@ originalFilePath: 'src/bootstrap.md'
---
!!! Note
+
When referring to "PostgreSQL cluster" in this section, the same
concepts apply to both PostgreSQL and EDB Postgres Advanced, unless
differently stated.
@@ -23,12 +24,14 @@ For more detailed information about this feature, please refer to the
["Importing Postgres databases"](database_import.md) section.
!!! Important
+
Bootstrapping from an existing cluster opens up the possibility
to create a **replica cluster**, that is an independent PostgreSQL
cluster which is in continuous recovery, synchronized with the source
and that accepts read-only connections.
!!! Warning
+
EDB Postgres for Kubernetes requires both the `postgres` user and database to
always exists. Using the local Unix Domain Socket, it needs to connect
as `postgres` user to the `postgres` database via `peer` authentication in
@@ -36,6 +39,7 @@ For more detailed information about this feature, please refer to the
**DO NOT DELETE** the `postgres` user or the `postgres` database!!!
!!! Info
+
EDB Postgres for Kubernetes is gradually introducing support for
[Kubernetes' native `VolumeSnapshot` API](https://github.com/cloudnative-pg/cloudnative-pg/issues/2081)
for both incremental and differential copy in backup and recovery
@@ -67,6 +71,7 @@ storage that the EDB Postgres for Kubernetes operator provides, please refer to
["Recovery" section](recovery.md) for guidance on each method.
!!! Seealso "API reference"
+
Please refer to the ["API reference for the `bootstrap` section](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration)
for more information.
@@ -87,6 +92,7 @@ cases include:
backup.
!!! Info
+
Ongoing development will extend the functionality of `externalClusters` to
accommodate additional use cases, such as logical replication and foreign
servers in future releases.
@@ -106,6 +112,7 @@ method or the `recovery` one. An external cluster needs to have:
- the catalog of physical base backups for the Postgres cluster
!!! Note
+
A recovery object store is normally an AWS S3, or an Azure Blob Storage,
or a Google Cloud Storage source that is managed by Barman Cloud.
@@ -120,6 +127,7 @@ continuously fed from the source, either via streaming, via WAL shipping
through the PostgreSQL's `restore_command`, or any of the two.
!!! Seealso "API reference"
+
Please refer to the ["API reference for the `externalClusters` section](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-ExternalCluster)
for more information.
@@ -201,6 +209,7 @@ data. Applications should connect to the cluster with the user that owns
the application database.
!!! Important
+
If you need to create additional users, please refer to
["Declarative database role management"](declarative_role_management.md).
@@ -245,6 +254,7 @@ walSegmentSize
option in `initdb` (default: not set - defined by PostgreSQL as 16 megabytes).
!!! Note
+
The only two locale options that EDB Postgres for Kubernetes implements during
the `initdb` bootstrap refer to the `LC_COLLATE` and `LC_TYPE` subcategories.
The remaining locale subcategories can be configured directly in the PostgreSQL
@@ -273,6 +283,7 @@ spec:
```
!!! Warning
+
EDB Postgres for Kubernetes supports another way to customize the behavior of the
`initdb` invocation, using the `options` subsection. However, given that there
are options that can break the behavior of the operator (such as `--auth` or
@@ -302,12 +313,14 @@ queries, executed in the following order:
Objects in each list will be processed sequentially.
!!! Warning
+
Use the `postInit`, `postInitTemplate`, and `postInitApplication` options
with extreme care, as queries are run as a superuser and can disrupt the entire
cluster. An error in any of those queries will interrupt the bootstrap phase,
leaving the cluster incomplete and requiring manual intervention.
!!! Important
+
Ensure the existence of entries inside the ConfigMaps or Secrets specified
in `postInitSQLRefs`, `postInitTemplateSQLRefs`, and
`postInitApplicationSQLRefs`, otherwise the bootstrap will fail. Errors in any
@@ -366,6 +379,7 @@ spec:
```
!!! Note
+
Within SQL scripts, each SQL statement is executed in a single exec on the
server according to the [PostgreSQL semantics](https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-MULTI-STATEMENT).
Comments can be included, but internal commands like `psql` cannot.
@@ -401,6 +415,7 @@ spec:
```
!!! Important
+
EDB Postgres Advanced requires a valid license key (trial or production) to start.
## Bootstrap from another cluster
@@ -415,6 +430,7 @@ The source cluster must be defined in the `externalClusters` section, identified
by `name` (our recommendation is to use the same `name` of the origin cluster).
!!! Important
+
By default the `recovery` method strictly uses the `name` of the
cluster in the `externalClusters` section to locate the main folder
of the backup data within the object store, which is normally reserved
@@ -430,44 +446,62 @@ to the ["Recovery" section](recovery.md).
### Bootstrap from a live cluster (`pg_basebackup`)
-The `pg_basebackup` bootstrap mode lets you create a new cluster (*target*) as
-an exact physical copy of an existing and **binary compatible** PostgreSQL
-instance (*source*), through a valid *streaming replication* connection.
-The source instance can be either a primary or a standby PostgreSQL server.
+The `pg_basebackup` bootstrap mode allows you to create a new cluster
+(*target*) as an exact physical copy of an existing and **binary-compatible**
+PostgreSQL instance (*source*) managed by EDB Postgres for Kubernetes, using a valid
+*streaming replication* connection. The source instance can either be a primary
+or a standby PostgreSQL server. It’s crucial to thoroughly review the
+requirements section below, as the pros and cons of PostgreSQL physical
+replication fully apply.
-The primary use case for this method is represented by **migrations** to EDB Postgres for Kubernetes,
-either from outside Kubernetes or within Kubernetes (e.g., from another operator).
+The primary use cases for this method include:
+
+- Reporting and business intelligence clusters that need to be regenerated
+ periodically (daily, weekly)
+- Test databases containing live data that require periodic regeneration
+ (daily, weekly, monthly) and anonymization
+- Rapid spin-up of a standalone replica cluster
+- Physical migrations of EDB Postgres for Kubernetes clusters to different namespaces or
+ Kubernetes clusters
+
+!!! Important
+
+ Avoid using this method, based on physical replication, to migrate an
+ existing PostgreSQL cluster outside of Kubernetes into EDB Postgres for Kubernetes unless you
+ are completely certain that all requirements are met and the operation has been
+ thoroughly tested. The EDB Postgres for Kubernetes community does not endorse this approach
+ for such use cases and recommends using logical import instead. It is
+ exceedingly rare that all requirements for physical replication are met in a
+ way that seamlessly works with EDB Postgres for Kubernetes.
!!! Warning
- The current implementation creates a *snapshot* of the origin PostgreSQL
- instance when the cloning process terminates and immediately starts
- the created cluster. See ["Current limitations"](#current-limitations) below for details.
-Similar to the case of the `recovery` bootstrap method, once the clone operation
-completes, the operator will take ownership of the target cluster, starting from
-the first instance. This includes overriding some configuration parameters, as
-required by EDB Postgres for Kubernetes, resetting the superuser password, creating
-the `streaming_replica` user, managing the replicas, and so on. The resulting
-cluster will be completely independent of the source instance.
+ In its current implementation, this method clones the source PostgreSQL
+ instance, thereby creating a *snapshot*. Once the cloning process has finished,
+ the new cluster is immediately started.
+ Refer to ["Current limitations"](#current-limitations) for more details.
+
+Similar to the `recovery` bootstrap method, once the cloning operation is
+complete, the operator takes full ownership of the target cluster, starting
+from the first instance. This includes overriding certain configuration
+parameters as required by EDB Postgres for Kubernetes, resetting the superuser password,
+creating the `streaming_replica` user, managing replicas, and more. The
+resulting cluster operates independently from the source instance.
!!! Important
- Configuring the network between the target instance and the source instance
- goes beyond the scope of EDB Postgres for Kubernetes documentation, as it depends
- on the actual context and environment.
-The streaming replication client on the target instance, which will be
-transparently managed by `pg_basebackup`, can authenticate itself on the source
-instance in any of the following ways:
+ Configuring the network connection between the target and source instances
+ lies outside the scope of EDB Postgres for Kubernetes documentation, as it depends heavily on
+ the specific context and environment.
-1. via [username/password](#usernamepassword-authentication)
-2. via [TLS client certificate](#tls-certificate-authentication)
+The streaming replication client on the target instance, managed transparently
+by `pg_basebackup`, can authenticate on the source instance using one of the
+following methods:
-The latter is the recommended one if you connect to a source managed
-by EDB Postgres for Kubernetes or configured for TLS authentication.
-The first option is, however, the most common form of authentication to a
-PostgreSQL server in general, and might be the easiest way if the source
-instance is on a traditional environment outside Kubernetes.
-Both cases are explained below.
+1. [Username/password](#usernamepassword-authentication)
+2. [TLS client certificate](#tls-certificate-authentication)
+
+Both authentication methods are detailed below.
#### Requirements
@@ -488,6 +522,7 @@ The following requirements apply to the `pg_basebackup` bootstrap method:
using a role with `REPLICATION LOGIN` privileges
!!! Seealso
+
For further information, please refer to the
["Planning" section for Warm Standby](https://www.postgresql.org/docs/current/warm-standby.html#STANDBY-PLANNING),
the
@@ -521,6 +556,7 @@ Enter the password at the prompt and save it for later, as you
will need to add it to a secret in the target instance.
!!! Note
+
Although the name is not important, we will use `streaming_replica`
for the sake of simplicity. Feel free to change it as you like,
provided you adapt the instructions in the following sections.
@@ -592,6 +628,7 @@ The following example clones an existing PostgreSQL cluster (`cluster-example`)
in the same Kubernetes cluster.
!!! Note
+
This example can be easily adapted to cover an instance that resides
outside the Kubernetes cluster.
@@ -644,6 +681,7 @@ If the new cluster is created as a replica cluster (with replica mode enabled),
database configuration will be skipped.
!!! Important
+
While the `Cluster` is in recovery mode, no changes to the database,
including the catalog, are permitted. This restriction includes any role
overrides, which are deferred until the `Cluster` transitions to primary.
@@ -691,9 +729,10 @@ instance using a second connection (see the `--wal-method=stream` option for
Once the backup is completed, the new instance will be started on a new timeline
and diverge from the source.
For this reason, it is advised to stop all write operations to the source database
-before migrating to the target database in Kubernetes.
+before migrating to the target database.
!!! Important
+
Before you attempt a migration, you must test both the procedure
and the applications. In particular, it is fundamental that you run the migration
procedure as many times as needed to systematically measure the downtime of your
diff --git a/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx b/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx
index 829e682b172..99fd3f6b96a 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx
@@ -12,6 +12,7 @@ To set up a cluster, the operator requires:
- A streaming replication client certificate generated by the client CA
!!! Note
+
You can find all the secrets used by the cluster and their expiration dates
in the cluster's status.
@@ -30,6 +31,7 @@ You can also choose a hybrid approach, where only part of the certificates is
generated outside CNP.
!!! Note
+
The operator and instances verify server certificates against the CA only,
disregarding the DNS name. This approach is due to the typical absence of DNS
names in user-provided certificates for the `-rw` service used for
@@ -43,11 +45,13 @@ managed continuously by the operator, with automatic renewal 7 days before
expiration (within a 90-day validity period).
!!! Info
+
You can adjust this default behavior by configuring the
`CERTIFICATE_DURATION` and `EXPIRING_CHECK_THRESHOLD` environment variables.
For detailed guidance, refer to the [Operator Configuration](operator_conf.md).
!!! Important
+
Certificate renewal does not cause any downtime for the PostgreSQL server,
as a simple reload operation is sufficient. However, any user-managed
certificates not controlled by EDB Postgres for Kubernetes must be re-issued following the
@@ -108,16 +112,19 @@ the following parameters:
- `serverCASecret` – The name of a secret containing the `ca.crt` key.
!!! Note
+
The operator still creates and manages the two secrets related to client
certificates.
!!! Note
+
The operator and instances verify server certificates against the CA only,
disregarding the DNS name. This approach is due to the typical absence of DNS
names in user-provided certificates for the `-rw` service used for
communication within the cluster.
!!! Note
+
If you want ConfigMaps and secrets to be reloaded by instances, you can add
a label with the key `k8s.enterprisedb.io/reload` to it. Otherwise you must reload the
instances using the `kubectl cnp reload` subcommand.
@@ -248,14 +255,17 @@ the following parameters:
to use to verify client certificate.
!!! Note
+
The operator still creates and manages the two secrets related to server
certificates.
!!! Note
+
As the cluster isn't in control of the client CA secret key, you can no
longer generate client certificates using `kubectl cnp certificate`.
!!! Note
+
If you want ConfigMaps and secrets to be automatically reloaded by
instances, you can add a label with the key `k8s.enterprisedb.io/reload` to it. Otherwise,
you must reload the instances using the `kubectl cnp reload` subcommand.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx
index 8b550eb893d..0a515fb9465 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx
@@ -50,7 +50,7 @@ EDB Postgres for Kubernetes relies on [ephemeral volumes](https://kubernetes.io/
for part of the internal activities. Ephemeral volumes exist for the sole
duration of a pod's life, without persisting across pod restarts.
-### Volume Claim Template for Temporary Storage
+# Volume Claim Template for Temporary Storage
The operator uses by default an `emptyDir` volume, which can be customized by using the `.spec.ephemeralVolumesSizeLimit field`.
This can be overridden by specifying a volume claim template in the `.spec.ephemeralVolumeSource` field.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx
index 3943ef00288..63303bd1a8b 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx
@@ -47,6 +47,7 @@ spec:
```
!!! Important
+
The pooler name can't be the same as any cluster name in the same namespace.
This example creates a `Pooler` resource called `pooler-example-rw`
@@ -62,6 +63,7 @@ and accepting up to 1000 connections each. The default pool size is 10
user/database pairs toward PostgreSQL.
!!! Important
+
The `Pooler` resource sets only the `*` fallback database in PgBouncer. This setting means that
that all parameters in the connection strings passed from the client are
relayed to the PostgreSQL server. For details, see ["Section \[databases\]"
@@ -71,6 +73,7 @@ EDB Postgres for Kubernetes also creates a secret with the same name as the pool
the configuration files used with PgBouncer.
!!! Seealso "API reference"
+
For details, see [`PgBouncerSpec`](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-PgBouncerSpec)
in the API reference.
@@ -85,11 +88,13 @@ resources are currently independent. Deleting the cluster doesn't imply the
deletion of the pooler, and vice versa.
!!! Important
+
Once you know how a pooler works, you have full freedom in terms of
possible architectures. You can have clusters without poolers, clusters with
a single pooler, or clusters with several poolers, that is, one per application.
!!! Important
+
When the operator is upgraded, the pooler pods will undergo a rolling
upgrade. This is necessary to ensure that the instance manager within the
pooler pods is also upgraded.
@@ -144,6 +149,7 @@ Internally, the implementation relies on PgBouncer's `auth_user` and
any pooler associated to it
!!! Important
+
If you specify your own secrets, the operator doesn't automatically
integrate the pooler.
@@ -180,6 +186,7 @@ GRANT EXECUTE ON FUNCTION public.user_search(text)
```
!!! Important
+
Given that `user_search` is a `SECURITY DEFINER` function, you need to
create it through a role with `SUPERUSER` privileges, such as the `postgres`
user.
@@ -226,6 +233,7 @@ spec:
```
!!! Note
+
Explicitly set `.spec.template.spec.containers` to `[]` when not modified,
as it's a required field for a `PodSpec`. If `.spec.template.spec.containers`
isn't set, the Kubernetes api-server returns the following error when trying to
@@ -302,6 +310,7 @@ The operator by default adds a `ServicePort` with the following data:
```
!!! Warning
+
Specifying a `ServicePort` with the name `pgbouncer` or the port `5432` will prevent the default `ServicePort` from being added.
This because `ServicePort` entries with the same `name` or `port` are not allowed on Kubernetes and result in errors.
@@ -315,6 +324,7 @@ using the `rw` service) or servers (if using the `ro` service with multiple
replicas).
!!! Warning
+
If your infrastructure spans multiple availability zones with high latency
across them, be aware of network hops. Consider, for example, the case of your
application running in zone 2, connecting to PgBouncer running in zone 3, and
@@ -326,6 +336,7 @@ The operator manages most of the [configuration options for PgBouncer](https://w
allowing you to modify only a subset of them.
!!! Warning
+
You are responsible for correctly setting the value of each option, as the
operator doesn't validate them.
@@ -383,6 +394,7 @@ PgBouncer instance reloads the updated configuration without disrupting the
service.
!!! Warning
+
Every PgBouncer pod has the same configuration, aligned
with the parameters in the specification. A mistake in these
parameters might disrupt the operability of the whole pooler.
@@ -403,6 +415,7 @@ Like the EDB Postgres for Kubernetes instance, the exporter runs on port
Go runtime (with the prefix `go_*`).
!!! Info
+
You can inspect the exported metrics on a pod running PgBouncer. For instructions, see
[How to inspect the exported metrics](monitoring.md/#how-to-inspect-the-exported-metrics).
Make sure that you use the correct IP and the `9127` port.
@@ -578,6 +591,7 @@ A `PodMonitor` correctly pointing to a pooler can be created by the operator by
`.spec.monitoring.enablePodMonitor` to `true` in the `Pooler` resource. The default is `false`.
!!! Important
+
Any change to `PodMonitor` created automatically is overridden by the
operator at the next reconciliation cycle. If you need to customize it, you can
do so as shown in the following example.
@@ -635,10 +649,12 @@ When the `paused` option is reset to `false`, the operator invokes the
service defined in the `Pooler` resource.
!!! Seealso "PAUSE"
+
For more information, see
[`PAUSE` in the PgBouncer documentation](https://www.pgbouncer.org/usage.html#pause-db).
!!! Important
+
In future versions, the switchover operation will be fully integrated
with the PgBouncer pooler and take advantage of the `PAUSE`/`RESUME`
features to reduce the perceived downtime by client applications.
@@ -665,6 +681,7 @@ the specific use case for the single PostgreSQL cluster, the adopted criteria
is to explicitly list the options that can be configured by users.
!!! Note
+
The adopted solution likely addresses the majority of
use cases. It leaves room for the future implementation of a separate
operator for PgBouncer to complete the gamma with more advanced and customized
diff --git a/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx b/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx
index 43442a066c0..e2d1d8f3f55 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx
@@ -27,12 +27,14 @@ with the following requirements:
- `du` (optional, for `kubectl cnp status`)
!!! Important
+
Only [PostgreSQL versions supported by the PGDG](https://postgresql.org/) are allowed.
No entry point and/or command is required in the image definition, as
EDB Postgres for Kubernetes overrides it with its instance manager.
!!! Warning
+
Application Container Images will be used by EDB Postgres for Kubernetes
in a **Primary with multiple/optional Hot Standby Servers Architecture**
only.
@@ -68,7 +70,9 @@ Examples of accepted image tags:
- `16.0`
!!! Warning
+
`latest` is not considered a valid tag for the image.
!!! Note
+
Image tag requirements do no apply for images defined in a catalog.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx b/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx
index 2dc53155226..0dc7372ae13 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx
@@ -24,6 +24,7 @@ As a result, the instructions in this section are suitable for both:
to version 15.x)
!!! Warning
+
When performing major upgrades of PostgreSQL you are responsible for making
sure that applications are compatible with the new version and that the
upgrade path of the objects contained in the database (including extensions) is
@@ -33,6 +34,7 @@ In both cases, the operation is performed on a consistent **snapshot** of the
origin database.
!!! Important
+
For this reason we suggest to stop write operations on the source before
the final import in the `Cluster` resource, as changes done to the source
database after the start of the backup will not be in the destination cluster -
@@ -62,6 +64,7 @@ The first import method is available via the `microservice` type, while the
latter by the `monolith` type.
!!! Warning
+
It is your responsibility to ensure that the destination cluster can
access the source cluster with a superuser or a user having enough
privileges to take a logical backup with `pg_dump`. Please refer to the
@@ -127,6 +130,7 @@ spec:
```
!!! Warning
+
The example above deliberately uses a source database running a version of
PostgreSQL that is not supported anymore by the Community, and consequently by
EDB Postgres for Kubernetes.
@@ -261,6 +265,7 @@ configuration, then runs `initdb --sync-only` to ensure that data is
permanently written on disk.
!!! Important
+
WAL archiving, if requested, and WAL level will be honored after the
database import process has completed. Similarly, replicas will be cloned
after the bootstrap phase, when the actual cluster resource starts.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_database_management.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_database_management.mdx
new file mode 100644
index 00000000000..1ddbf4fd3d3
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_database_management.mdx
@@ -0,0 +1,63 @@
+---
+title: 'Declarative Database Management'
+originalFilePath: 'src/declarative_database_management.md'
+---
+
+Declarative database management enables users to control the lifecycle of
+databases via a new Custom Resource Definition (CRD) called `Database`.
+
+A `Database` object is managed by the instance manager of the cluster's
+primary instance. This feature is not supported in replica clusters,
+as replica clusters lack a primary instance to manage the `Database` object.
+
+### Example: Simple Database Declaration
+
+Below is an example of a basic `Database` configuration:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Database
+metadata:
+ name: db-one
+spec:
+ name: one
+ owner: app
+ cluster:
+ name: cluster-example
+```
+
+Once the reconciliation cycle is completed successfully, the `Database`
+status will show a `applied` field set to `true` and an empty `message` field.
+
+### Database Deletion and Reclaim Policies
+
+A finalizer named `k8s.enterprisedb.io/deleteDatabase` is automatically added
+to each `Database` object to control its deletion process.
+
+By default, the `databaseReclaimPolicy` is set to `retain`, which means
+that if the `Database` object is deleted, the actual PostgreSQL database
+is retained for manual management by an administrator.
+
+Alternatively, if the `databaseReclaimPolicy` is set to `delete`,
+the PostgreSQL database will be automatically deleted when the `Database`
+object is removed.
+
+### Example: Database with Delete Reclaim Policy
+
+The following example illustrates a `Database` object with a `delete`
+reclaim policy:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Database
+metadata:
+ name: db-one-with-delete-reclaim-policy
+spec:
+ databaseReclaimPolicy: delete
+ name: two
+ owner: app
+ cluster:
+ name: cluster-example
+```
+
+In this case, when the `Database` object is deleted, the corresponding PostgreSQL database will also be removed automatically.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx
index e1b3c541101..957e100ac31 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx
@@ -16,6 +16,7 @@ The declarative hibernation feature enables saving CPU power by removing the
database Pods, while keeping the database PVCs.
!!! Note
+
Declarative hibernation is different from the existing implementation
of [imperative hibernation via the `cnp` plugin](kubectl-plugin.md#cluster-hibernation).
Imperative hibernation shuts down all Postgres instances in the High
diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx
index f6d4ede40a4..0a50756b22b 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx
@@ -153,6 +153,7 @@ never expires, mirroring the behavior of PostgreSQL. Specifically:
allowing `VALID UNTIL NULL` in the `ALTER ROLE` SQL statement)
!!! Warning
+
New roles created without `passwordSecret` will have a `NULL` password
inside PostgreSQL.
@@ -250,6 +251,7 @@ petrarca could not perform UPDATE_MEMBERSHIPS on role petrarca: role "poets" do
```
!!! Important
+
In terms of backward compatibility, declarative role management is designed
to ignore roles that exist in the database but are not included in the spec.
The lifecycle of these roles will continue to be managed within PostgreSQL,
diff --git a/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx b/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx
index c4700e73644..c5cb758fde3 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx
@@ -5,41 +5,10 @@ originalFilePath: 'src/evaluation.md'
EDB Postgres for Kubernetes is available for a free evaluation.
-The process is different between Community PostgreSQL and EDB Postgres Advanced Server.
+Use your EDB account to evaluate Postgres for Kubernetes. If you don't have an account, [register](https://www.enterprisedb.com/accounts/register) for one. Then follow the [installation guide](installation_upgrade.md) to install the operator, using the access token you obtained from your EDB account.
## Evaluating using PostgreSQL
-By default, EDB Postgres for Kubernetes installs the latest available version of Community PostgreSQL.
-
-No license key is required. The operator automatically generates an implicit trial license for the cluster that lasts for 30 days. This trial license is ideal for evaluation, proof of concept, integration with CI/CD pipelines, and so on.
+By default, EDB Postgres for Kubernetes installs the latest available version of Community Postgresql.
PostgreSQL container images are available at [quay.io/enterprisedb/postgresql](https://quay.io/repository/enterprisedb/postgresql).
-
-## Evaluating using EDB Postgres Advanced Server
-
-There are two ways to obtain the EDB Postgres Advanced Server image for evaluation purposes. The easiest is through the EDB Image Repository, where all you’ll need is an EDB account to auto generate a repository access token. The other way is to download the image through [quay.io](http://quay.io) and request a trial license key from EDB support.
-
-### EDB Image Repository
-
-You can use EDB Postgres for Kubernetes with EDB Postgres Advanced Server. You can access the image by obtaining a repository access token to EDB's image repositories.
-
-### Obtaining your access token
-
-You can request a repository access token from the [EDB Repositories Download](https://www.enterprisedb.com/repos-downloads) page. You will also need to be signed into your EDB account. If you don't have an EDB Account, you can [register for one](https://www.enterprisedb.com/accounts/register) on the EDB site.
-
-### Quay Image Repository
-
-If you want to use the Quay image repository, you’ll need a trial license key for access to use the images. To request a trial license key for EDB Postgres Kubernetes please contact your sales representative or you can contact our EDB Technical Support Team by email at [techsupport@enterprisedb.com](mailto:techsupport@enterprisedb.com) or file a ticket on our support portal . Please allow 24 hours for your license to be generated and delivered to you and if you need any additional support please do not hesitate to contact us.
-
-Once you have your license key, EDB Postgres Advanced container images will be available at
-
-You can then use EDB Postgres Advanced Server by setting in the `spec` section of the `Cluster` deployment configuration file:
-
-- `imageName` to point to the quay.io/enterprisedb/edb-postgres-advanced repository
-- `licenseKey` to your license key (in the form of a string)
-
-To see how `imageName` and `licenseKey` is set, refer to the [cluster-full-example](/postgres_for_kubernetes/latest/samples/cluster-example-full.yaml) file from the [configuration samples](/postgres_for_kubernetes/latest/samples/) section.
-
-## Further Information
-
-Refer to [License and License keys](license_keys.md) for terms and more details.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/failover.mdx b/product_docs/docs/postgres_for_kubernetes/1/failover.mdx
index 0b714acd590..c7dda46ca02 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/failover.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/failover.mdx
@@ -27,6 +27,7 @@ controller will initiate the failover process, in two steps:
the primary, and become a replica node.
!!! Important
+
The two-phase procedure helps ensure the WAL receivers can stop in an orderly
fashion, and that the failing primary will not start streaming WALs again upon
restart. These safeguards prevent timeline discrepancies between the new primary
@@ -41,6 +42,7 @@ During the time the failing primary is being shut down:
*immediate shutdown* is initiated.
!!! Info
+
"Fast" mode does not wait for PostgreSQL clients to disconnect and will
terminate an online backup in progress. All active transactions are rolled back
and clients are forcibly disconnected, then the server is shut down.
@@ -64,12 +66,14 @@ Failover may result in the service being impacted and/or data being lost:
with no data loss.
!!! Note
+
The timeout that controls fast shutdown is set by `.spec.switchoverDelay`,
as in the case of a switchover. Increasing the time for fast shutdown is safer
from an RPO point of view, but possibly delays the return to normal operation -
negatively affecting RTO.
!!! Warning
+
As already mentioned in the ["Instance Manager" section](instance_manager.md)
when explaining the switchover process, the `.spec.switchoverDelay` option
affects the RPO and RTO of your PostgreSQL database. Setting it to a low value,
diff --git a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx
index 5777b5aeb72..f5feaa5bec3 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx
@@ -7,10 +7,12 @@ This section provides an overview of the major failure scenarios that
PostgreSQL can face on a Kubernetes cluster during its lifetime.
!!! Important
+
In case the failure scenario you are experiencing is not covered by this
- section, please immediately contact EDB for support and assistance.
+ section, please immediately seek for [professional support](https://cloudnative-pg.io/support/).
!!! Seealso "Postgres instance manager"
+
Please refer to the ["Postgres instance manager" section](instance_manager.md)
for more information the liveness and readiness probes implemented by
EDB Postgres for Kubernetes.
@@ -35,6 +37,7 @@ kubectl delete -n [namespace] pvc/[cluster-name]-[serial] pod/[cluster-name]-[se
```
!!! Note
+
If you specified a dedicated WAL volume, it will also have to be deleted during this process.
```sh
@@ -70,6 +73,7 @@ The operator is notified of the deletion. A new pod belonging to the
or starting from a physical backup of the *primary* otherwise.
!!! Important
+
In case of deliberate deletion of a pod, `PodDisruptionBudget` policies
will not be enforced.
@@ -91,6 +95,7 @@ kubectl delete pod [primary pod] --grace-period=1
```
!!! Warning
+
Never use `--grace-period=0` in your failover simulation tests, as this
might produce misleading results with your PostgreSQL cluster. A grace
period of 0 guarantees that the pod is immediately removed from the
@@ -133,6 +138,7 @@ The `PodDisruptionBudget` may prevent the pod from being evicted if there
is at least another pod that is not ready.
!!! Note
+
Single instance clusters prevent node drain when `reusePVC` is
set to `false`. Refer to the [Kubernetes Upgrade section](kubernetes_upgrade.md).
@@ -175,8 +181,9 @@ In the case of undocumented failure, it might be necessary to intervene
to solve the problem manually.
!!! Important
- In such cases, please do not perform any manual operation without the
- support and assistance of EDB engineering team.
+
+ In such cases, please do not perform any manual operation without
+ [professional support](https://cloudnative-pg.io/support/).
You can use the `k8s.enterprisedb.io/reconciliationLoop` annotation to temporarily disable
the reconciliation loop for a specific PostgreSQL cluster, as shown below:
@@ -194,6 +201,7 @@ The `k8s.enterprisedb.io/reconciliationLoop` must be used with extreme care
and for the sole duration of the extraordinary/emergency operation.
!!! Warning
+
Please make sure that you use this annotation only for a limited period of
time and you remove it when the emergency has finished. Leaving this annotation
in a cluster will prevent the operator from issuing any self-healing operation,
diff --git a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
index 51afbe712ae..34575bcd9df 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
@@ -153,6 +153,7 @@ publication on GitHub:
Feel free to report any relevant missing entry as a PR.
!!! Info
+
The [Data on Kubernetes Community](https://dok.community)
(which includes some of our maintainers) is working on an independent and
vendor neutral project to list the operators called
diff --git a/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx b/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx
index c35e05abb82..685b734c40b 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx
@@ -97,6 +97,7 @@ This consists of an initial fast shutdown with a timeout set to
set to 1
!!! Warning
+
If a **primary instance** is fenced, its postmaster process
is shut down but no failover is performed, interrupting the operativity of
the applications. When the fence will be lifted, the primary instance will be
diff --git a/product_docs/docs/postgres_for_kubernetes/1/image_catalog.mdx b/product_docs/docs/postgres_for_kubernetes/1/image_catalog.mdx
index 50b1497e534..ec2ab862fba 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/image_catalog.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/image_catalog.mdx
@@ -13,6 +13,7 @@ Both share a common structure, comprising a list of images, each equipped with
a `major` field indicating the major version of the image.
!!! Warning
+
The operator places trust in the user-defined major version and refrains
from conducting any PostgreSQL version detection. It is the user's
responsibility to ensure alignment between the declared major version in
diff --git a/product_docs/docs/postgres_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/index.mdx
index 17ef3d12d5a..061f2332bed 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/index.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/index.mdx
@@ -79,8 +79,6 @@ and OpenShift. It is designed, developed, and supported by EDB and covers the
full lifecycle of a highly available Postgres database clusters with a
primary/standby architecture, using native streaming replication.
-EDB Postgres for Kubernetes was made generally available on February 4, 2021. Earlier versions were made available to selected customers prior to the GA release.
-
!!! Note
The operator has been renamed from Cloud Native PostgreSQL. Existing users
diff --git a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx
index 33b94841bed..4bd4fb2b53c 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx
@@ -4,31 +4,93 @@ originalFilePath: 'src/installation_upgrade.md'
---
!!! Seealso "OpenShift"
+
For instructions on how to install Cloud Native PostgreSQL on Red Hat
OpenShift Container Platform, please refer to the ["OpenShift"](openshift.md)
section.
!!! Warning
+
OLM (via [operatorhub.io](https://operatorhub.io/) is no longer supported
as an installation method for EDB Postgres for Kubernetes.
## Installation on Kubernetes
+### Obtaining an EDB subscription token
+
+!!! Important
+
+ You must obtain an EDB subscription token to install EDB Postgres for Kubernetes. Without a token, you will not be able to access the EDB private software repositories.
+
+Installing EDB Postgres for Kubernetes requires an EDB Repos 2.0 token to gain access to the EDB private software repositories.
+
+You can obtain the token by visiting your [EDB Account Profile](https://www.enterprisedb.com/accounts/profile). You will have to sign in if you are not already logged in.
+
+Your account profile page displays the token to use next to **Repos 2.0 Token** label. By default, the token is obscured, click the "Show" button (an eye icon) to reveal it.
+
+Your token entitles you to access one of two repositories: standard or enterprise.
+
+- `standard` - Includes the operator and the EDB Postgres Extended operand images.
+- `enterprise` - Includes the operator and the EDB Postgres Advanced and EDB Postgres Extended operand images.
+
+Set the relevant value, determined by your subscription, as an environment variable `EDB_SUBSCRIPTION_PLAN`.
+
+```shell
+EDB_SUBSCRIPTION_PLAN=enterprise
+```
+
+then set the Repos 2.0 token to an environment variable `EDB_SUBSCRIPTION_TOKEN`.
+
+```shell
+EDB_SUBSCRIPTION_TOKEN=
+```
+
+!!! Warning
+
+ The token is sensitive information. Please ensure that you don't expose it to unauthorized users.
+
+You can now proceed with the installation.
+
### Using the Helm Chart
The operator can be installed using the provided [Helm chart](https://github.com/EnterpriseDB/edb-postgres-for-kubernetes-charts).
### Directly using the operator manifest
-The operator can be installed like any other resource in Kubernetes,
-through a YAML manifest applied via `kubectl`.
+#### Install the EDB pull secret
+
+Before installing EDB Postgres for Kubernetes, you need to create a pull secret for EDB software in the `postgresql-operator-system` namespace.
-You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.24.1.yaml)
-for this minor release as follows:
+The pull secret needs to be saved in the namespace where the operator will reside. Create the `postgresql-operator-system` namespace using this command:
+
+```shell
+kubectl create namespace postgresql-operator-system
+```
+
+To create the pull secret itself, run the following command:
+
+```shell
+kubectl create secret -n postgresql-operator-system docker-registry edb-pull-secret \
+ --docker-server=docker.enterprisedb.com \
+ --docker-username=k8s_$EDB_SUBSCRIPTION_PLAN \
+ --docker-password=$EDB_SUBSCRIPTION_TOKEN
+```
+
+#### Install the operator
+
+Now that the pull-secret has been added to the namespace, the operator can be installed like any other resource in Kubernetes,
+through a YAML manifest applied via `kubectl`.
+
+There are two different manifests available depending on your subscription plan:
+
+- Standard: The [latest standard operator manifest](https://get.enterprisedb.io/pg4k/pg4k-standard-1.24.1.yaml).
+- Enterprise: The [latest enterprise operator manifest](https://get.enterprisedb.io/pg4k/pg4k-enterprise-1.24.1.yaml).
+
+You can install the manifest for the latest version of the operator by running:
```sh
kubectl apply --server-side -f \
- https://get.enterprisedb.io/cnp/postgresql-operator-1.24.1.yaml
+ https://get.enterprisedb.io/pg4k/pg4k-$EDB_SUBSCRIPTION_PLAN-1.24.1.yaml
```
You can verify that with:
@@ -55,6 +117,7 @@ Please refer to ["`cnp` plugin"](./kubectl-plugin.md#generation-of-installation-
for a more comprehensive example.
!!! Warning
+
If you are deploying EDB Postgres for Kubernetes on GKE and get an error (`... failed to
call webhook...`), be aware that by default traffic between worker nodes
and control plane is blocked by the firewall except for a few specific
@@ -66,17 +129,16 @@ for a more comprehensive example.
one of the allowed ones, or open the webhooks' port (`9443`) on the
firewall.
-
## Details about the deployment
In Kubernetes, the operator is by default installed in the `postgresql-operator-system`
namespace as a Kubernetes `Deployment`. The name of this deployment
depends on the installation method.
-When installed through the manifest or the `cnp` plugin, it is called
-`postgresql-operator-controller-manager` by default. When installed via Helm, the default name
-is `postgresql-operator-cloudnative-pg`.
+When installed through the manifest or the `cnp` plugin, by default, it is called `postgresql-operator-controller-manager`.
+When installed via Helm, by default, the deployment name is derived from the helm release name, appended with the suffix `-edb-postgres-for-kubernetes` (e.g., `-edb-postgres-for-kubernetes`).
!!! Note
+
With Helm you can customize the name of the deployment via the
`fullnameOverride` field in the [*"values.yaml"* file](https://helm.sh/docs/chart_template_guide/values_files/).
@@ -108,6 +170,7 @@ the actual PostgreSQL clusters are running (this might even include the control
plane for self-managed Kubernetes installations).
!!! Seealso "Operator configuration"
+
You can change the default behavior of the operator by overriding
some default options. For more information, please refer to the
["Operator configuration"](operator_conf.md) section.
@@ -115,6 +178,7 @@ plane for self-managed Kubernetes installations).
## Upgrades
!!! Important
+
Please carefully read the [release notes](rel_notes)
before performing an upgrade as some versions might require
extra steps.
@@ -129,18 +193,20 @@ by applying the manifest of the newer version for plain Kubernetes
installations, or using the native package manager of the used distribution
(please follow the instructions in the above sections).
-The second step is automatically executed after having updated the controller,
-by default triggering a rolling update of every deployed PostgreSQL instance to
-use the new instance manager. The rolling update procedure culminates with a
-switchover, which is controlled by the `primaryUpdateStrategy` option, by
-default set to `unsupervised`. When set to `supervised`, users need to complete
-the rolling update by manually promoting a new instance through the `cnp`
-plugin for `kubectl`.
+The second step is automatically triggered after updating the controller. By
+default, this initiates a rolling update of every deployed PostgreSQL cluster,
+upgrading one instance at a time to use the new instance manager. The rolling
+update concludes with a switchover, which is governed by the
+`primaryUpdateStrategy` option. The default value, `unsupervised`, completes
+the switchover automatically. If set to `supervised`, the user must manually
+promote the new primary instance using the `cnp` plugin for `kubectl`.
!!! Seealso "Rolling updates"
+
This process is discussed in-depth on the [Rolling Updates](rolling_update.md) page.
!!! Important
+
In case `primaryUpdateStrategy` is set to the default value of `unsupervised`,
an upgrade of the operator will trigger a switchover on your PostgreSQL cluster,
causing a (normally negligible) downtime.
@@ -150,6 +216,21 @@ the instance manager. This approach does not require a restart of the
PostgreSQL instance, thereby avoiding a switchover within the cluster. This
feature, which is disabled by default, is described in detail below.
+### Spread Upgrades
+
+By default, all PostgreSQL clusters are rolled out simultaneously, which may
+lead to a spike in resource usage, especially when managing multiple clusters.
+EDB Postgres for Kubernetes provides two configuration options at the [operator level](operator_conf.md)
+that allow you to introduce delays between cluster roll-outs or even between
+instances within the same cluster, helping to distribute resource usage over
+time:
+
+- `CLUSTERS_ROLLOUT_DELAY`: Defines the number of seconds to wait between
+ roll-outs of different PostgreSQL clusters (default: `0`).
+- `INSTANCES_ROLLOUT_DELAY`: Defines the number of seconds to wait between
+ roll-outs of individual instances within the same PostgreSQL cluster (default:
+ `0`).
+
### In-place updates of the instance manager
By default, EDB Postgres for Kubernetes issues a rolling update of the cluster
@@ -211,6 +292,7 @@ only the operator itself.
### Upgrading to 1.24 from a previous minor version
!!! Warning
+
Every time you are upgrading to a higher minor release, make sure you
go through the release notes and upgrade instructions of all the
intermediate minor releases. For example, if you want to move
diff --git a/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx b/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx
index b9591c06d6c..27d812784f9 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx
@@ -29,6 +29,7 @@ The liveness probe controls when to restart the container once
the startup probe interval has elapsed.
!!! Important
+
The liveness and readiness probes will report a failure if the probe command
fails three times with a 10-second interval between each check.
@@ -47,6 +48,7 @@ which defaults to 3600 seconds. The correct value for your cluster is
related to the time needed by PostgreSQL to start.
!!! Warning
+
If `.spec.startDelay` is too low, the liveness probe will start working
before the PostgreSQL startup is complete, and the Pod could be restarted
prematurely.
@@ -75,6 +77,7 @@ The shutdown procedure is composed of two steps:
seconds.
!!! Important
+
In order to avoid any data loss in the Postgres cluster, which impacts
the database RPO, don't delete the Pod where the primary instance is running.
In this case, perform a switchover to another instance first.
@@ -91,6 +94,7 @@ the time given to the former primary to shut down gracefully and archive all
the WAL files. By default it is set to `3600` (1 hour).
!!! Warning
+
The `.spec.switchoverDelay` option affects the RPO and RTO of your
PostgreSQL database. Setting it to a low value, might favor RTO over RPO
but lead to data loss at cluster level and/or backup level. On the contrary,
@@ -115,10 +119,12 @@ provides details on checking the disk space used by WAL segments and standard
metrics on disk usage exported to Prometheus.
!!! Important
+
In a production system, it is critical to monitor the database
continuously. Exhausted disk storage can lead to a database server shutdown.
!!! Note
+
The detection of exhausted storage relies on a storage class that
accurately reports disk size and usage. This may not be the case in simulated
Kubernetes environments like Kind or with test storage class implementations
diff --git a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx
index 47b0c615ef8..8b0692bdf16 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx
@@ -11,6 +11,7 @@ The plugin also works with `oc` in an OpenShift environment.
You can install the `cnp` plugin using a variety of methods.
!!! Note
+
For air-gapped systems, installation via package managers, using previously
downloaded files, may be a good option.
@@ -34,53 +35,53 @@ them in your systems.
#### Debian packages
-For example, let's install the 1.22.2 release of the plugin, for an Intel based
+For example, let's install the 1.24.1 release of the plugin, for an Intel based
64 bit server. First, we download the right `.deb` file.
-``` sh
-wget https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.22.1/kubectl-cnp_1.22.2_linux_x86_64.deb
+```sh
+wget https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.24.1/kubectl-cnp_1.24.1_linux_x86_64.deb \
+ --output-document kube-plugin.deb
```
-Then, install from the local file using `dpkg`:
+Then, with superuser privileges, install from the local file using `dpkg`:
-``` sh
-dpkg -i kubectl-cnp_1.22.2_linux_x86_64.deb
-(Reading database ... 702524 files and directories currently installed.)
-Preparing to unpack kubectl-cnp_1.22.2_linux_x86_64.deb ...
-Unpacking cnp (1.22.2) over (1.22.2) ...
-Setting up cnp (1.22.2) ..
+```console
+$ sudo dpkg -i kube-plugin.deb
+Selecting previously unselected package cnp.
+(Reading database ... 6688 files and directories currently installed.)
+Preparing to unpack kube-plugin.deb ...
+Unpacking kubectl-cnp (1.24.1) ...
+Setting up kubectl-cnp (1.24.1) ...
```
#### RPM packages
-As in the example for `.deb` packages, let's install the 1.22.2 release for an
+As in the example for `.rpm` packages, let's install the 1.24.1 release for an
Intel 64 bit machine. Note the `--output` flag to provide a file name.
-``` sh
-curl -L https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.22.2/kubectl-cnp_1.22.2_linux_x86_64.rpm \
+```sh
+curl -L https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.24.1/kubectl-cnp_1.24.1_linux_x86_64.rpm \
--output kube-plugin.rpm
```
-Then install with `yum`, and you're ready to use:
+Then, with superuser privileges, install with `yum`, and you're ready to use:
-``` sh
-yum --disablerepo=* localinstall kube-plugin.rpm
-yum --disablerepo=* localinstall kube-plugin.rpm
-__OUTPUT__
+```console
+$ sudo yum --disablerepo=* localinstall kube-plugin.rpm
Failed to set locale, defaulting to C.UTF-8
Dependencies resolved.
====================================================================================================
Package Architecture Version Repository Size
====================================================================================================
Installing:
- cnp x86_64 1.22.2-1 @commandline 17 M
+ cnp x86_64 1.24.1-1 @commandline 20 M
Transaction Summary
====================================================================================================
Install 1 Package
-Total size: 14 M
-Installed size: 43 M
+Total size: 20 M
+Installed size: 78 M
Is this ok [y/N]: y
```
@@ -89,20 +90,20 @@ Is this ok [y/N]: y
EDB Postgres for Kubernetes Plugin is currently built for the following
operating system and architectures:
-* Linux
- * amd64
- * arm 5/6/7
- * arm64
- * s390x
- * ppc64le
-* macOS
- * amd64
- * arm64
-* Windows
- * 386
- * amd64
- * arm 5/6/7
- * arm64
+- Linux
+ - amd64
+ - arm 5/6/7
+ - arm64
+ - s390x
+ - ppc64le
+- macOS
+ - amd64
+ - arm64
+- Windows
+ - 386
+ - amd64
+ - arm 5/6/7
+ - arm64
### Configuring auto-completion
@@ -110,7 +111,7 @@ To configure auto-completion for the plugin, a helper shell script needs to be
installed into your current PATH. Assuming the latter contains `/usr/local/bin`,
this can be done with the following commands:
-```shell
+```sh
cat > kubectl_complete-cnp <
```
!!! Note
+
The plugin automatically detects if the standard output channel is connected to a terminal.
In such cases, it may add ANSI colors to the command output. To disable colors, use the
`--color=never` option with the command.
@@ -149,28 +152,28 @@ installation namespace, namespaces to watch, and so on.
For details and available options, run:
-```shell
+```sh
kubectl cnp install generate --help
```
The main options are:
-- `-n`: specifies the namespace in which to install the operator (default:
- `cnp-system`).
-- `--control-plane`: if set to true, the operator deployment will include a
- toleration and affinity for `node-role.kubernetes.io/control-plane`.
-- `--replicas`: sets the number of replicas in the deployment.
-- `--watch-namespace`: specifies a comma-separated list of namespaces to watch
- (default: all namespaces).
-- `--version`: defines the minor version of the operator to be installed, such
- as `1.23`. If a minor version is specified, the plugin installs the latest
- patch version of that minor version. If no version is supplied, the plugin
- installs the latest `MAJOR.MINOR.PATCH` version of the operator.
+- `-n`: specifies the namespace in which to install the operator (default:
+ `postgresql-operator-system`).
+- `--control-plane`: if set to true, the operator deployment will include a
+ toleration and affinity for `node-role.kubernetes.io/control-plane`.
+- `--replicas`: sets the number of replicas in the deployment.
+- `--watch-namespace`: specifies a comma-separated list of namespaces to watch
+ (default: all namespaces).
+- `--version`: defines the minor version of the operator to be installed, such
+ as `1.23`. If a minor version is specified, the plugin installs the latest
+ patch version of that minor version. If no version is supplied, the plugin
+ installs the latest `MAJOR.MINOR.PATCH` version of the operator.
An example of the `generate` command, which will generate a YAML manifest that
will install the operator, is as follows:
-```shell
+```sh
kubectl cnp install generate \
-n king \
--version 1.23 \
@@ -180,45 +183,47 @@ kubectl cnp install generate \
```
The flags in the above command have the following meaning:
-- `-n king` install the cnp operator into the `king` namespace
-- `--version 1.23` install the latest patch version for minor version 1.23
-- `--replicas 3` install the operator with 3 replicas
-- `--watch-namespace "albert, bb, freddie"` have the operator watch for
- changes in the `albert`, `bb` and `freddie` namespaces only
+
+- `-n king` install the PG4K operator into the `king` namespace
+- `--version 1.23` install the latest patch version for minor version 1.23
+- `--replicas 3` install the operator with 3 replicas
+- `--watch-namespace "albert, bb, freddie"` have the operator watch for
+ changes in the `albert`, `bb` and `freddie` namespaces only
### Status
The `status` command provides an overview of the current status of your
cluster, including:
-* **general information**: name of the cluster, PostgreSQL's system ID, number of
- instances, current timeline and position in the WAL
-* **backup**: point of recoverability, and WAL archiving status as returned by
- the `pg_stat_archiver` view from the primary - or designated primary in the
- case of a replica cluster
-* **streaming replication**: information taken directly from the `pg_stat_replication`
- view on the primary instance
-* **instances**: information about each Postgres instance, taken directly by each
- instance manager; in the case of a standby, the `Current LSN` field corresponds
- to the latest write-ahead log location that has been replayed during recovery
- (replay LSN).
+- **general information**: name of the cluster, PostgreSQL's system ID, number of
+ instances, current timeline and position in the WAL
+- **backup**: point of recoverability, and WAL archiving status as returned by
+ the `pg_stat_archiver` view from the primary - or designated primary in the
+ case of a replica cluster
+- **streaming replication**: information taken directly from the `pg_stat_replication`
+ view on the primary instance
+- **instances**: information about each Postgres instance, taken directly by each
+ instance manager; in the case of a standby, the `Current LSN` field corresponds
+ to the latest write-ahead log location that has been replayed during recovery
+ (replay LSN).
!!! Important
+
The status information above is taken at different times and at different
locations, resulting in slightly inconsistent returned values. For example,
the `Current Write LSN` location in the main header, might be different
from the `Current LSN` field in the instances status as it is taken at
two different time intervals.
-```shell
+```sh
kubectl cnp status sandbox
```
-```shell
+```output
Cluster Summary
Name: default/sandbox
System ID: 7423474350493388827
-PostgreSQL Image: quay.io/enterprisedb/postgresql:17.0
+PostgreSQL Image: docker.enterprisedb.com/k8s_enterprise/edb-postgres-extended:16.4
Primary instance: sandbox-1
Primary start time: 2024-10-08 18:31:57 +0000 UTC (uptime 1m14s)
Status: Cluster in healthy state
@@ -248,15 +253,15 @@ sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.24.1 k
If you require more detailed status information, use the `--verbose` option (or
`-v` for short). The level of detail increases each time the flag is repeated:
-```shell
+```sh
kubectl cnp status sandbox --verbose
```
-```shell
+```output
Cluster Summary
Name: default/sandbox
System ID: 7423474350493388827
-PostgreSQL Image: quay.io/enterprisedb/postgresql:17.0
+PostgreSQL Image: docker.enterprisedb.com/k8s_enterprise/edb-postgres-extended:16.4
Primary instance: sandbox-1
Primary start time: 2024-10-08 18:31:57 +0000 UTC (uptime 2m4s)
Status: Cluster in healthy state
@@ -311,13 +316,13 @@ The command also supports output in `yaml` and `json` format.
The meaning of this command is to `promote` a pod in the cluster to primary, so you
can start with maintenance work or test a switch-over situation in your cluster
-```shell
+```sh
kubectl cnp promote cluster-example cluster-example-2
```
Or you can use the instance node number to promote
-```shell
+```sh
kubectl cnp promote cluster-example 2
```
@@ -329,19 +334,19 @@ a TLS authentication certificate.
To get a certificate, you need to provide a name for the secret to store
the credentials, the cluster name, and a user for this certificate
-```shell
+```sh
kubectl cnp certificate cluster-cert --cnp-cluster cluster-example --cnp-user appuser
```
-After the secret is created, you can get it using `kubectl`
+After the secret it's created, you can get it using `kubectl`
-```shell
+```sh
kubectl get secret cluster-cert
```
And the content of the same in plain text using the following commands:
-```shell
+```sh
kubectl get secret cluster-cert -o json | jq -r '.data | map(@base64d) | .[]'
```
@@ -349,16 +354,16 @@ kubectl get secret cluster-cert -o json | jq -r '.data | map(@base64d) | .[]'
The `kubectl cnp restart` command can be used in two cases:
-- requesting the operator to orchestrate a rollout restart
- for a certain cluster. This is useful to apply
- configuration changes to cluster dependent objects, such as ConfigMaps
- containing custom monitoring queries.
+- requesting the operator to orchestrate a rollout restart
+ for a certain cluster. This is useful to apply
+ configuration changes to cluster dependent objects, such as ConfigMaps
+ containing custom monitoring queries.
-- request a single instance restart, either in-place if the instance is
- the cluster's primary or deleting and recreating the pod if
- it is a replica.
+- request a single instance restart, either in-place if the instance is
+ the cluster's primary or deleting and recreating the pod if
+ it is a replica.
-```shell
+```sh
# this command will restart a whole cluster in a rollout fashion
kubectl cnp restart [clusterName]
@@ -371,6 +376,7 @@ a switchover, the switchover will take precedence over the in-place restart. A
common case for this will be a minor upgrade of PostgreSQL image.
!!! Note
+
If you want ConfigMaps and Secrets to be **automatically** reloaded
by instances, you can add a label with key `k8s.enterprisedb.io/reload` to it.
@@ -382,7 +388,7 @@ to cluster dependent objects, such as ConfigMaps containing custom monitoring qu
The following command will reload all configurations for a given cluster:
-```shell
+```sh
kubectl cnp reload [cluster_name]
```
@@ -392,8 +398,8 @@ The `kubectl cnp maintenance` command helps to modify one or more clusters
across namespaces and set the maintenance window values, it will change
the following fields:
-* .spec.nodeMaintenanceWindow.inProgress
-* .spec.nodeMaintenanceWindow.reusePVC
+- .spec.nodeMaintenanceWindow.inProgress
+- .spec.nodeMaintenanceWindow.reusePVC
Accepts as argument `set` and `unset` using this to set the
`inProgress` to `true` in case `set`and to `false` in case of `unset`.
@@ -407,13 +413,13 @@ all the cluster in the list.
If you want to set in maintenance all the PostgreSQL in your Kubernetes cluster,
just need to write the following command:
-```shell
+```sh
kubectl cnp maintenance set --all-namespaces
```
And you'll have the list of all the cluster to update
-```shell
+```output
The following are the new values for the clusters
Namespace Cluster Name Maintenance reusePVC
--------- ------------ ----------- --------
@@ -438,21 +444,23 @@ The `operator` sub-command requests the operator to provide information
regarding the operator deployment, configuration and events.
!!! Important
+
All confidential information in Secrets and ConfigMaps is REDACTED.
The Data map will show the **keys** but the values will be empty.
The flag `-S` / `--stopRedaction` will defeat the redaction and show the
values. Use only at your own risk, this will share private data.
!!! Note
+
By default, operator logs are not collected, but you can enable operator
log collection with the `--logs` flag
-* **deployment information**: the operator Deployment and operator Pod
-* **configuration**: the Secrets and ConfigMaps in the operator namespace
-* **events**: the Events in the operator namespace
-* **webhook configuration**: the mutating and validating webhook configurations
-* **webhook service**: the webhook service
-* **logs**: logs for the operator Pod (optional, off by default) in JSON-lines format
+- **deployment information**: the operator Deployment and operator Pod
+- **configuration**: the Secrets and ConfigMaps in the operator namespace
+- **events**: the Events in the operator namespace
+- **webhook configuration**: the mutating and validating webhook configurations
+- **webhook service**: the webhook service
+- **logs**: logs for the operator Pod (optional, off by default) in JSON-lines format
The command will generate a ZIP file containing various manifest in YAML format
(by default, but settable to JSON with the `-o` flag).
@@ -460,37 +468,38 @@ Use the `-f` flag to name a result file explicitly. If the `-f` flag is not used
default time-stamped filename is created for the zip file.
!!! Note
+
The report plugin obeys `kubectl` conventions, and will look for objects constrained
- by namespace. The CNP Operator will generally not be installed in the same
+ by namespace. The PG4K Operator will generally not be installed in the same
namespace as the clusters.
E.g. the default installation namespace is postgresql-operator-system
-```shell
+```sh
kubectl cnp report operator -n
```
results in
-```shell
+```output
Successfully written report to "report_operator_.zip" (format: "yaml")
```
With the `-f` flag set:
-```shell
+```sh
kubectl cnp report operator -n -f reportRedacted.zip
```
Unzipping the file will produce a time-stamped top-level folder to keep the
directory tidy:
-```shell
+```sh
unzip reportRedacted.zip
```
will result in:
-```shell
+```output
Archive: reportRedacted.zip
creating: report_operator_/
creating: report_operator_/manifests/
@@ -506,7 +515,7 @@ Archive: reportRedacted.zip
If you activated the `--logs` option, you'd see an extra subdirectory:
-```shell
+```output
Archive: report_operator_.zip
creating: report_operator_/operator-logs/
@@ -514,19 +523,20 @@ Archive: report_operator_.zip
```
!!! Note
+
The plugin will try to get the PREVIOUS operator's logs, which is helpful
when investigating restarted operators.
In all cases, it will also try to get the CURRENT operator logs. If current
and previous logs are available, it will show them both.
-``` json
+```output
====== Begin of Previous Log =====
-2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting EDB Postgres for Kubernetes Operator","version":"1.19.1","build":{"Version":"1.19.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
+2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting EDB Postgres for Kubernetes Operator","version":"1.24.1","build":{"Version":"1.24.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"}
====== End of Previous Log =====
-2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting EDB Postgres for Kubernetes Operator","version":"1.19.1","build":{"Version":"1.19.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
+2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting EDB Postgres for Kubernetes Operator","version":"1.24.1","build":{"Version":"1.24.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}}
2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"}
```
@@ -535,7 +545,7 @@ and `====== End …` guards, with no content inside.
You can verify that the confidential information is REDACTED by default:
-```shell
+```sh
cd report_operator_/manifests/
head postgresql-operator-ca-secret.yaml
```
@@ -554,18 +564,18 @@ metadata:
With the `-S` (`--stopRedaction`) option activated, secrets are shown:
-```shell
+```sh
kubectl cnp report operator -n -f reportNonRedacted.zip -S
```
You'll get a reminder that you're about to view confidential information:
-```shell
+```output
WARNING: secret Redaction is OFF. Use it with caution
Successfully written report to "reportNonRedacted.zip" (format: "yaml")
```
-```shell
+```sh
unzip reportNonRedacted.zip
head postgresql-operator-ca-secret.yaml
```
@@ -585,12 +595,12 @@ metadata:
The `cluster` sub-command gathers the following:
-* **cluster resources**: the cluster information, same as `kubectl get cluster -o yaml`
-* **cluster pods**: pods in the cluster namespace matching the cluster name
-* **cluster jobs**: jobs, if any, in the cluster namespace matching the cluster name
-* **events**: events in the cluster namespace
-* **pod logs**: logs for the cluster Pods (optional, off by default) in JSON-lines format
-* **job logs**: logs for the Pods created by jobs (optional, off by default) in JSON-lines format
+- **cluster resources**: the cluster information, same as `kubectl get cluster -o yaml`
+- **cluster pods**: pods in the cluster namespace matching the cluster name
+- **cluster jobs**: jobs, if any, in the cluster namespace matching the cluster name
+- **events**: events in the cluster namespace
+- **pod logs**: logs for the cluster Pods (optional, off by default) in JSON-lines format
+- **job logs**: logs for the Pods created by jobs (optional, off by default) in JSON-lines format
The `cluster` sub-command accepts the `-f` and `-o` flags, as the `operator` does.
If the `-f` flag is not used, a default timestamped report name will be used.
@@ -598,12 +608,13 @@ Note that the cluster information does not contain configuration Secrets / Confi
so the `-S` is disabled.
!!! Note
+
By default, cluster logs are not collected, but you can enable cluster
log collection with the `--logs` flag
Usage:
-```shell
+```sh
kubectl cnp report cluster [flags]
```
@@ -611,17 +622,17 @@ Note that, unlike the `operator` sub-command, for the `cluster` sub-command you
need to provide the cluster name, and very likely the namespace, unless the cluster
is in the default one.
-```shell
+```sh
kubectl cnp report cluster example -f report.zip -n example_namespace
```
and then:
-```shell
+```sh
unzip report.zip
```
-```shell
+```output
Archive: report.zip
creating: report_cluster_example_/
creating: report_cluster_example_/manifests/
@@ -633,21 +644,21 @@ Archive: report.zip
Remember that you can use the `--logs` flag to add the pod and job logs to the ZIP.
-```shell
+```sh
kubectl cnp report cluster example -n example_namespace --logs
```
will result in:
-```shell
+```output
Successfully written report to "report_cluster_example_.zip" (format: "yaml")
```
-```shell
+```sh
unzip report_cluster_.zip
```
-```shell
+```output
Archive: report_cluster_example_.zip
creating: report_cluster_example_/
creating: report_cluster_example_/manifests/
@@ -670,6 +681,7 @@ Install Plan, and add them automatically to the zip under the `openshift`
sub-folder.
!!! Note
+
the namespace becomes very important on OpenShift. The default namespace
for OpenShift in CNP is "openshift-operators". Many (most) clients will use
a different namespace for the CNP operator.
@@ -686,14 +698,14 @@ Successfully written report to "report_operator_.zip" (format: "yaml"
You can find the OpenShift-related files in the `openshift` sub-folder:
-```shell
+```sh
unzip report_operator_.zip
cd report_operator_/
cd openshift
head clusterserviceversions.yaml
```
-```text
+```yaml
apiVersion: operators.coreos.com/v1alpha1
items:
- apiVersion: operators.coreos.com/v1alpha1
@@ -726,7 +738,7 @@ the `-h` flag:
`kubectl cnp logs cluster -h`
The `logs` command will display logs in JSON-lines format, unless the
-`--timestamps` flag is used, in which case, a human readable timestamp will be
+`--timestamps` flag is used, in which case, a human-readable timestamp will be
prepended to each line. In this case, lines will no longer be valid JSON,
and tools such as `jq` may not work as desired.
@@ -749,7 +761,7 @@ The `--tail` flag can be used to specify how many log lines will be retrieved
from each pod in the cluster. By default, the `logs cluster` sub-command will
display all the logs from each pod in the cluster. If combined with the "follow"
flag `-f`, the number of logs specified by `--tail` will be retrieved until the
-current time, and and from then the new logs will be followed.
+current time, and from then the new logs will be followed.
NOTE: unlike other `cnp` plugin commands, the `-f` is used to denote "follow"
rather than specify a file. This keeps with the convention of `kubectl logs`,
@@ -757,24 +769,24 @@ which takes `-f` to mean the logs should be followed.
Usage:
-```shell
+```sh
kubectl cnp logs cluster [flags]
```
Using the `-f` option to follow:
-```shell
+```sh
kubectl cnp report cluster cluster-example -f
```
Using `--tail` option to display 3 lines from each pod and the `-f` option
to follow:
-```shell
+```sh
kubectl cnp report cluster cluster-example -f --tail 3
```
-``` json
+```output
{"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] LOG: ending log output to stderr","source":"/controller/log/postgres","logging_pod":"cluster-example-3"}
{"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] HINT: Future log output will go to log destination \"csvlog\".","source":"/controller/log/postgres","logging_pod":"cluster-example-3"}
…
@@ -783,8 +795,8 @@ kubectl cnp report cluster cluster-example -f --tail 3
With the `-o` option omitted, and with `--output` specified:
-``` sh
-kubectl cnp logs cluster cluster-example --output my-cluster.log
+```console
+$ kubectl cnp logs cluster cluster-example --output my-cluster.log
Successfully written logs to "my-cluster.log"
```
@@ -797,7 +809,7 @@ into a human-readable output, and attempts to sort the entries by timestamp.
It can be used in combination with `kubectl cnp logs cluster`, as
shown in the following example:
-``` sh
+```console
$ kubectl cnp logs cluster cluster-example | kubectl cnp logs pretty
2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting EDB Postgres for Kubernetes Instance Manager
2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL
@@ -807,10 +819,10 @@ $ kubectl cnp logs cluster cluster-example | kubectl cnp logs pretty
```
Alternatively, it can be used in combination with other commands that produce
-cnp logs in JSON format, such as `stern`, or `kubectl logs`, as in the
+PG4K logs in JSON format, such as `stern`, or `kubectl logs`, as in the
following example:
-``` sh
+```console
$ kubectl logs cluster-example-1 | kubectl cnp logs pretty
2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting EDB Postgres for Kubernetes Instance Manager
2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL
@@ -824,7 +836,7 @@ to display logs for specific pods or loggers, or to filter logs by severity
level.
Here's an example:
-``` sh
+```console
$ kubectl cnp logs cluster cluster-example | kubectl cnp logs pretty --pods cluster-example-1 --loggers postgres --log-level info
2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] LOG: redirecting log output to logging collector process
2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] HINT: Future log output will appear in directory "/controller/log"...
@@ -841,7 +853,7 @@ mode. The sub-command will add a group separator line, `---`, at the end of
each sorted group. The size of the grouping can be configured via the
`--sorting-group-size` flag (default: 1000), as illustrated in the following example:
-``` sh
+```console
$ kubectl cnp logs cluster cluster-example | kubectl cnp logs pretty --sorting-group-size=3
2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Starting EDB Postgres for Kubernetes Instance Manager
2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Checking for free disk space for WALs before starting PostgreSQL
@@ -858,6 +870,7 @@ To explore all available options, use the `-h` flag for detailed explanations
of the supported flags and their usage.
!!! Info
+
You can also increase the verbosity of the log by adding more `-v` options.
### Destroy
@@ -875,14 +888,14 @@ detached PVCs.
Usage:
-```
+```sh
kubectl cnp destroy [CLUSTER_NAME] [INSTANCE_ID]
```
The following example removes the `cluster-example-2` pod and the associated
PVCs:
-```
+```sh
kubectl cnp destroy cluster-example 2
```
@@ -901,36 +914,37 @@ instance.
You can hibernate a cluster with:
-```
+```sh
kubectl cnp hibernate on
```
This will:
-1. shutdown every PostgreSQL instance
-2. detach the PVCs containing the data of the primary instance, and annotate
- them with the latest database status and the latest cluster configuration
-3. delete the `Cluster` resource, including every generated resource - except
- the aforementioned PVCs
+1. shutdown every PostgreSQL instance
+2. detach the PVCs containing the data of the primary instance, and annotate
+ them with the latest database status and the latest cluster configuration
+3. delete the `Cluster` resource, including every generated resource - except
+ the aforementioned PVCs
When hibernated, a EDB Postgres for Kubernetes cluster is represented by just a group of
PVCs, in which the one containing the `PGDATA` is annotated with the latest
available status, including content from `pg_controldata`.
!!! Warning
+
A cluster having fenced instances cannot be hibernated, as fencing is
part of the hibernation procedure too.
In case of error the operator will not be able to revert the procedure. You can
still force the operation with:
-```
+```sh
kubectl cnp hibernate on cluster-example --force
```
A hibernated cluster can be resumed with:
-```
+```sh
kubectl cnp hibernate off
```
@@ -938,7 +952,7 @@ Once the cluster has been hibernated, it's possible to show the last
configuration and the status that PostgreSQL had after it was shut down.
That can be done with:
-```
+```sh
kubectl cnp hibernate status
```
@@ -947,7 +961,7 @@ kubectl cnp hibernate status
Pgbench can be run against an existing PostgreSQL cluster with following
command:
-```
+```sh
kubectl cnp pgbench -- --time 30 --client 1 --jobs 1
```
@@ -958,7 +972,7 @@ details.
fio can be run on an existing storage class with following command:
-```
+```sh
kubectl cnp fio -n
```
@@ -969,27 +983,22 @@ Refer to the [Benchmarking fio section](benchmarking.md#fio) for more details.
The `kubectl cnp backup` command requests a new physical backup for
an existing Postgres cluster by creating a new `Backup` resource.
-!!! Info
- From release 1.21, the `backup` command accepts a new flag, `-m`
- to specify the backup method.
- To request a backup using volume snapshots, set `-m volumeSnapshot`
-
The following example requests an on-demand backup for a given cluster:
-```shell
+```sh
kubectl cnp backup [cluster_name]
```
or, if using volume snapshots:
-```shell
+```sh
kubectl cnp backup [cluster_name] -m volumeSnapshot
```
The created backup will be named after the request time:
-```shell
-kubectl cnp backup cluster-example
+```console
+$ kubectl cnp backup cluster-example
backup/cluster-example-20230121002300 created
```
@@ -1002,7 +1011,7 @@ to request an online/hot backup or an offline/cold one: additionally, you can
also tune online backups by explicitly setting the `--immediate-checkpoint` and
`--wait-for-archive` options.
-The ["Backup" section](./backup.md) contains more information about
+The ["Backup" section](./backup.md#backup) contains more information about
the configuration settings.
### Launching psql
@@ -1012,11 +1021,12 @@ process (psql) connected to an existing Postgres cluster, as if you were running
it from the actual pod. This means that you will be using the `postgres` user.
!!! Important
+
As you will be connecting as `postgres` user, in production environments this
method should be used with extreme care, by authorized personnel only.
-```shell
-kubectl cnp psql cluster-example
+```console
+$ kubectl cnp psql cluster-example
psql (17.0 (Debian 17.0-1.pgdg110+1))
Type "help" for help.
@@ -1027,8 +1037,9 @@ postgres=#
By default, the command will connect to the primary instance. The user can
select to work against a replica by using the `--replica` option:
-```shell
-kubectl cnp psql --replica cluster-example
+```console
+$ kubectl cnp psql --replica cluster-example
+
psql (17.0 (Debian 17.0-1.pgdg110+1))
Type "help" for help.
@@ -1046,11 +1057,12 @@ This command will start `kubectl exec`, and the `kubectl` executable must be
reachable in your `PATH` variable to correctly work.
!!!Note
+
When connecting to instances running on OpenShift, you must explicitly
pass a username to the `psql` command, because of a [security measure built into
OpenShift](https://cloud.redhat.com/blog/a-guide-to-openshift-and-uids):
-```shell
+```sh
kubectl cnp psql cluster-example -- -U postgres
```
!!!
@@ -1058,6 +1070,7 @@ kubectl cnp psql cluster-example -- -U postgres
### Snapshotting a Postgres cluster
!!! Warning
+
The `kubectl cnp snapshot` command has been removed.
Please use the [`backup` command](#requesting-a-new-physical-backup) to request
backups using volume snapshots.
@@ -1074,8 +1087,9 @@ images, you can install pgAdmin in your environment as a standard
Kubernetes deployment.
!!! Important
+
Deployment of pgAdmin in Kubernetes production environments is beyond the
- scope of this document and, more broadly, of the CloudNativePG project.
+ scope of this document and, more broadly, of the EDB Postgres for Kubernetes project.
However, **for the purposes of demonstration and evaluation**, EDB Postgres for Kubernetes
offers a suitable solution. The `cnp` plugin implements the `pgadmin4`
@@ -1134,6 +1148,7 @@ kubectl cnp pgadmin4 --dry-run cluster-example | kubectl delete -f -
```
!!! Warning
+
Never deploy pgAdmin in production using the plugin.
### Logical Replication Publications
@@ -1145,6 +1160,7 @@ creation of logical replication publications, particularly on remote PostgreSQL
databases.
!!! Warning
+
It is crucial to have a solid understanding of both the capabilities and
limitations of PostgreSQL's native logical replication system before using
these commands.
@@ -1164,26 +1180,27 @@ kubectl cnp publication create \
There are two primary use cases:
-- With `--external-cluster`: Use this option to create a publication on an
- external cluster (i.e. defined in the `externalClusters` stanza). The commands
- will be issued from the ``, but the publication will be for the
- data in ``.
+- With `--external-cluster`: Use this option to create a publication on an
+ external cluster (i.e. defined in the `externalClusters` stanza). The commands
+ will be issued from the ``, but the publication will be for the
+ data in ``.
-- Without `--external-cluster`: Use this option to create a publication in the
- `` PostgreSQL `Cluster` (by default, the `app` database).
+- Without `--external-cluster`: Use this option to create a publication in the
+ `` PostgreSQL `Cluster` (by default, the `app` database).
!!! Warning
+
When connecting to an external cluster, ensure that the specified user has
sufficient permissions to execute the `CREATE PUBLICATION` command.
You have several options, similar to the [`CREATE PUBLICATION`](https://www.postgresql.org/docs/current/sql-createpublication.html)
command, to define the group of tables to replicate. Notable options include:
-- If you specify the `--all-tables` option, you create a publication `FOR ALL TABLES`.
-- Alternatively, you can specify multiple occurrences of:
- - `--table`: Add a specific table (with an expression) to the publication.
- - `--schema`: Include all tables in the specified database schema (available
- from PostgreSQL 15).
+- If you specify the `--all-tables` option, you create a publication `FOR ALL TABLES`.
+- Alternatively, you can specify multiple occurrences of:
+ - `--table`: Add a specific table (with an expression) to the publication.
+ - `--schema`: Include all tables in the specified database schema (available
+ from PostgreSQL 15).
The `--dry-run` option enables you to preview the SQL commands that the plugin
will execute.
@@ -1204,7 +1221,7 @@ to `source-cluster`.
We can run:
-``` sh
+```sh
kubectl cnp publication create destination-cluster \
--external-cluster=source-cluster --all-tables
```
@@ -1214,7 +1231,7 @@ the SQL commands on the `destination-cluster`.
Or instead, we can run:
-``` sh
+```sh
kubectl cnp publication create source-cluster \
--publication=app --all-tables
```
@@ -1223,6 +1240,7 @@ which will create a publication named `app` for all the tables in the
`source-cluster`, running the SQL commands on the source cluster.
!!! Info
+
There are two sample files that have been provided for illustration and inspiration:
[logical-source](../samples/cluster-example-logical-source.yaml) and
[logical-destination](../samples/cluster-example-logical-destination.yaml).
@@ -1257,6 +1275,7 @@ replication subscriptions, especially when dealing with remote PostgreSQL
databases.
!!! Warning
+
Before using these commands, it is essential to have a comprehensive
understanding of both the capabilities and limitations of PostgreSQL's
native logical replication system.
@@ -1299,7 +1318,7 @@ As in the section on publications, we have a `source-cluster` and a
The following command:
-``` sh
+```sh
kubectl cnp subscription create destination-cluster \
--external-cluster=source-cluster \
--publication=app --subscription=app
@@ -1308,11 +1327,13 @@ kubectl cnp subscription create destination-cluster \
will create a subscription for `app` on the destination cluster.
!!! Warning
+
Prioritize testing subscriptions in a non-production environment to ensure
their effectiveness and identify any potential issues before implementing them
in a production setting.
!!! Info
+
There are two sample files that have been provided for illustration and inspiration:
[logical-source](../samples/cluster-example-logical-source.yaml) and
[logical-destination](../samples/cluster-example-logical-destination.yaml).
@@ -1373,12 +1394,13 @@ subscription, both called `app`, are already present.
The following command will synchronize the sequences involved in the
`app` subscription, from the source cluster into the destination cluster.
-``` sh
+```sh
kubectl cnp subscription sync-sequences destination-cluster \
--subscription=app
```
!!! Warning
+
Prioritize testing subscriptions in a non-production environment to
guarantee their effectiveness and detect any potential issues before deploying
them in a production setting.
@@ -1388,4 +1410,120 @@ kubectl cnp subscription sync-sequences destination-cluster \
The `cnp` plugin can be easily integrated in [K9s](https://k9scli.io/), a
popular terminal-based UI to interact with Kubernetes clusters.
-See [`k9s/plugins.yml`](../samples/k9s/plugins.yml) for details.
+See [`k9s/plugins.yml`](samples/k9s/plugins.yml) for details.
+
+## Permissions required by the plugin
+
+The plugin requires a set of Kubernetes permissions that depends on the command
+to execute. These permissions may affect resources and sub-resources like Pods,
+PDBs, PVCs, and enable actions like `get`, `delete`, `patch`. The following
+table contains the full details:
+
+| Command | Resource Permissions |
+| :-------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| backup | clusters: get
backups: create |
+| certificate | clusters: get
secrets: get,create |
+| destroy | pods: get,delete
jobs: delete,list
PVCs: list,delete,update |
+| fencing | clusters: get,patch
pods: get |
+| fio | PVCs: create
configmaps: create
deployment: create |
+| hibernate | clusters: get,patch,delete
pods: list,get,delete
pods/exec: create
jobs: list
PVCs: get,list,update,patch,delete |
+| install | none |
+| logs | clusters: get
pods: list
pods/log: get |
+| maintenance | clusters: get,patch,list
|
+| pgadmin4 | clusters: get
configmaps: create
deployments: create
services: create
secrets: create |
+| pgbench | clusters: get
jobs: create
|
+| promote | clusters: get
clusters/status: patch
pods: get |
+| psql | pods: get,list
pods/exec: create |
+| publication | clusters: get
pods: get,list
pods/exec: create |
+| reload | clusters: get,patch |
+| report cluster | clusters: get
pods: list
pods/log: get
jobs: list
events: list
PVCs: list |
+| report operator | configmaps: get
deployments: get
events: list
pods: list
pods/log: get
secrets: get
services: get
mutatingwebhookconfigurations: list[^1]
validatingwebhookconfigurations: list[^1]
If OLM is present on the K8s cluster, also:
clusterserviceversions: list
installplans: list
subscriptions: list |
+| restart | clusters: get,patch
pods: get,delete |
+| status | clusters: get
pods: list
pods/exec: create
pods/proxy: create
PDBs: list |
+| subscription | clusters: get
pods: get,list
pods/exec: create |
+| version | none |
+
+[^1]: The permissions are cluster scope ClusterRole resources.
+
+///Footnotes Go Here///
+
+Additionally, assigning the `list` permission on the `clusters` will enable
+autocompletion for multiple commands.
+
+### Role examples
+
+It is possible to create roles with restricted permissions.
+The following example creates a role that only has access to the cluster logs:
+
+```yaml
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: cnp-log
+rules:
+ - verbs:
+ - get
+ apiGroups:
+ - postgresql.k8s.enterprisedb.io
+ resources:
+ - clusters
+ - verbs:
+ - list
+ apiGroups:
+ - ''
+ resources:
+ - pods
+ - verbs:
+ - get
+ apiGroups:
+ - ''
+ resources:
+ - pods/log
+```
+
+The next example shows a role with the minimal permissions required to get
+the cluster status using the plugin's `status` command:
+
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: cnp-status
+rules:
+ - verbs:
+ - get
+ apiGroups:
+ - postgresql.k8s.enterprisedb.io
+ resources:
+ - clusters
+ - verbs:
+ - list
+ apiGroups:
+ - ''
+ resources:
+ - pods
+ - verbs:
+ - create
+ apiGroups:
+ - ''
+ resources:
+ - pods/exec
+ - verbs:
+ - create
+ apiGroups:
+ - ''
+ resources:
+ - pods/proxy
+ - verbs:
+ - list
+ apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+```
+
+!!! Important
+
+ Keeping the verbs restricted per `resources` and per `apiGroups` helps to
+ prevent inadvertently granting more than intended permissions.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx
index 63733dfd2fd..bdbfa33b330 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx
@@ -44,6 +44,7 @@ local to the Kubernetes worker node running the PostgreSQL database. Node-local
storage, or simply *local storage*, is employed to enhance performance.
!!! Note
+
If your database files reside on shared storage accessible over the
network, the default self-healing behavior of the operator can efficiently
handle scenarios where volumes are reused by pods on different nodes after a
@@ -97,6 +98,7 @@ on draining the node during development activities.
## Node Maintenance Window
!!! Important
+
While EDB Postgres for Kubernetes will continue supporting the node maintenance window,
it is currently recommended to transition to direct control of pod disruption
budgets, as explained in the previous section. This section is retained
@@ -109,6 +111,7 @@ to avoid standard self-healing procedures to kick in, while, for example,
enlarging the partition on the physical node or updating the node itself.
!!! Warning
+
Limit the duration of the maintenance window to the shortest
amount of time possible. In this phase, some of the expected
behaviors of Kubernetes are either disabled or running with
@@ -140,12 +143,14 @@ does **not** apply to clusters with only one instance and
reusePVC disabled: see section below.
!!! Note
+
When performing the `kubectl drain` command, you will need
to add the `--delete-emptydir-data` option.
Don't be afraid: it refers to another volume internally used
by the operator - not the PostgreSQL data directory.
!!! Important
+
`PodDisruptionBudget` management can be disabled by setting the
`.spec.enablePDB` field to `false`. In that case, the operator won't
create `PodDisruptionBudgets` and will delete them if they were
@@ -154,6 +159,7 @@ reusePVC disabled: see section below.
### Single instance clusters with `reusePVC` set to `false`
!!! Important
+
We recommend to always create clusters with more
than one instance in order to guarantee high availability.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx b/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx
index cf7c1cef737..0e268c1413d 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx
@@ -9,6 +9,7 @@ can be linked together and put in relationship through *labels* and
*annotations*.
!!! info
+
For more information, see the Kubernetes documentation on
[annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) and
[labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/).
@@ -26,6 +27,7 @@ so that when you define these labels or annotations in a cluster's metadata,
they're inherited by all resources created by it (including pods).
!!! Note
+
Label and annotation inheritance is the technique adopted by EDB Postgres for Kubernetes
instead of alternative approaches such as pod templates.
@@ -224,6 +226,7 @@ The following continues from that example and limits it to the following:
- Labels: `app`, `environment`, and `workload`
!!! Note
+
Feel free to select the names that most suit your context for both
annotations and labels. You can also use wildcards
in naming and adopt strategies like using `mycompany/*` for all labels
diff --git a/product_docs/docs/postgres_for_kubernetes/1/license_keys.mdx b/product_docs/docs/postgres_for_kubernetes/1/license_keys.mdx
index ef6c3994c98..69acffd8b20 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/license_keys.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/license_keys.mdx
@@ -3,11 +3,11 @@ title: 'License and License keys'
originalFilePath: 'src/license_keys.md'
---
-A license key is always required for the operator to work.
+License keys are a legacy management mechanism for EDB Postgres for Kubernetes. You do not need a license key if you have installed using an EDB subscription token, and in this case, the licensing commands in this section can be ignored.
-The only exception is when you run the operator with Community PostgreSQL:
-in this case, if the license key is unset, a cluster will be started with the default
-trial license - which automatically expires after 30 days.
+If you are not using an EDB subscription token and installing from public repositories, then you will need a license key. The only exception is when you run the operator with Community PostgreSQL: in this case, if the license key is unset, a cluster will be started with the default trial license - which automatically expires after 30 days. This is not the recommended way of trialing EDB Postgres for Kubernetes - see the [installation guide](installation_upgrade.md) for the recommended options.
+
+The following documentation is only for users who have installed the operator using a license key.
## Company level license keys
@@ -19,6 +19,7 @@ the operator is deployed (`ConfigMap` is also available, but not recommended
for a license key).
!!! Seealso "Operator configuration"
+
For more information, refer to [Operator configuration](operator_conf.md).
Once the company level license is installed, the validity of the
diff --git a/product_docs/docs/postgres_for_kubernetes/1/logging.mdx b/product_docs/docs/postgres_for_kubernetes/1/logging.mdx
index ace768a7e17..824027eece6 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/logging.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/logging.mdx
@@ -10,6 +10,7 @@ management tools, including command line ones like
[stern](https://github.com/stern/stern).
!!! Important
+
Long-term storage and management of logs are outside the scope of the
operator and should be handled at the Kubernetes infrastructure level.
For more information, see the
@@ -28,6 +29,7 @@ Each log entry includes the following fields:
- `logging_pod` – The name of the pod where the log was generated.
!!! Info
+
If your log ingestion system requires custom field names, you can rename
the `level` and `ts` fields using the `log-field-level` and
`log-field-timestamp` flags in the operator controller. This can be configured
@@ -39,10 +41,11 @@ You can configure the log level for the instance pods in the cluster
specification using the `logLevel` option. Available log levels are: `error`,
`warning`, `info` (default), `debug`, and `trace`.
-!!!Important
- Currently, the log level can only be set at the time the instance starts.
- Changes to the log level in the cluster specification after the cluster has
- started will only apply to new pods, not existing ones.
+!!! Important
+
+ Currently, the log level can only be set at the time the instance starts.
+ Changes to the log level in the cluster specification after the cluster has
+ started will only apply to new pods, not existing ones.
## Operator Logs
@@ -96,6 +99,7 @@ Each PostgreSQL log entry is a JSON object with the `logger` key set to
```
!!! Info
+
Internally, the operator uses PostgreSQL's CSV log format. For more details,
refer to the [PostgreSQL documentation on CSV log format](https://www.postgresql.org/docs/current/runtime-config-logging.html).
@@ -108,6 +112,7 @@ To enable PGAudit, add the necessary `pgaudit` parameters in the `postgresql`
section of the cluster configuration.
!!! Important
+
The PGAudit library must be added to `shared_preload_libraries`.
EDB Postgres for Kubernetes automatically manages this based on the presence of `pgaudit.*`
parameters in the PostgreSQL configuration. The operator handles both the
@@ -117,6 +122,7 @@ Additionally, the operator manages the creation and removal of the PGAudit
extension across all databases within the cluster.
!!! Important
+
EDB Postgres for Kubernetes executes the `CREATE EXTENSION` and `DROP EXTENSION` commands
in all databases within the cluster that accept connections.
@@ -196,7 +202,7 @@ for more details about each field in a record.
## EDB Audit logs
Clusters that are running on EDB Postgres Advanced Server (EPAS)
-can enable [EDB Audit](/epas/latest/epas_security_guide/05_edb_audit_logging/) as follows:
+can enable [EDB Audit](/epas/latest/epas_guide/03_database_administration/05_edb_audit_logging/) as follows:
```yaml
apiVersion: postgresql.k8s.enterprisedb.io/v1
@@ -279,7 +285,7 @@ See the example below:
}
```
-See EDB [Audit file](/epas/latest/epas_security_guide/05_edb_audit_logging/)
+See EDB [Audit file](/epas/latest/epas_guide/03_database_administration/05_edb_audit_logging/)
for more details about the records' fields.
## Other Logs
@@ -303,6 +309,6 @@ the `logger` field indicating the process that produced them. The possible
- `wal-restore`: logs from the `wal-restore` subcommand of the instance manager
- `instance-manager`: from the [PostgreSQL instance manager](./instance_manager.md)
-With the exception of `postgres` and `edb_audit`, which follows a specific structure, all other
-`logger` values contain the `msg` field with the escaped message that is
+With the exception of `postgres` and `edb_audit`, which follows a specific structure,
+all other `logger` values contain the `msg` field with the escaped message that is
logged.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
index 73a3ff65112..85461effb39 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
@@ -4,6 +4,7 @@ originalFilePath: 'src/monitoring.md'
---
!!! Important
+
Installing Prometheus and Grafana is beyond the scope of this project.
We assume they are correctly installed in your system. However, for
experimentation we provide instructions in
@@ -19,10 +20,12 @@ more `ConfigMap` or `Secret` resources (see the
["User defined metrics" section](#user-defined-metrics) below for details).
!!! Important
+
EDB Postgres for Kubernetes, by default, installs a set of [predefined metrics](#default-set-of-metrics)
in a `ConfigMap` named `default-monitoring`.
!!! Info
+
You can inspect the exported metrics by following the instructions in
the ["How to inspect the exported metrics"](#how-to-inspect-the-exported-metrics)
section below.
@@ -53,6 +56,7 @@ The default database can always be overridden for a given user-defined metric,
by specifying a list of one or more databases in the `target_databases` option.
!!! Seealso "Prometheus/Grafana"
+
If you are interested in evaluating the integration of EDB Postgres for Kubernetes
with Prometheus and Grafana, you can find a quick setup guide
in [Part 4 of the quickstart](quickstart.md#part-4-monitor-clusters-with-prometheus-and-grafana)
@@ -67,6 +71,7 @@ A `PodMonitor` that correctly points to the Cluster can be automatically created
`.spec.monitoring.enablePodMonitor` to `true` in the Cluster resource itself (default: `false`).
!!! Important
+
Any change to the `PodMonitor` created automatically will be overridden by the Operator at the next reconciliation
cycle, in case you need to customize it, you can do so as described below.
@@ -86,10 +91,12 @@ spec:
```
!!! Important
+
Ensure you modify the example above with a unique name, as well as the
correct cluster's namespace and labels (e.g., `cluster-example`).
!!! Important
+
The `postgresql` label, used in previous versions of this document, is deprecated
and will be removed in the future. Please use the `k8s.enterprisedb.io/cluster` label
instead to select the instances.
@@ -101,6 +108,7 @@ setting to `true`. This setup ensures that the metrics exporter uses the same
server certificate used by PostgreSQL to secure communication on port 5432.
!!! Important
+
Changing the `.spec.monitoring.tls.enabled` setting will trigger a rolling restart of the Cluster.
If the `PodMonitor` is managed by the operator (`.spec.monitoring.enablePodMonitor` set to `true`),
@@ -130,10 +138,12 @@ spec:
```
!!! Important
+
Ensure you modify the example above with a unique name, as well as the
correct Cluster's namespace and labels (e.g., `cluster-example`).
!!! Important
+
The `serverName` field in the metrics endpoint must match one of the names
defined in the server certificate. If the default certificate is in use,
the `serverName` value should be in the format `-rw`.
@@ -395,12 +405,14 @@ go_threads 18
```
!!! Note
+
`cnp_collector_postgres_version` is a GaugeVec metric containing the
`Major.Minor` version of Postgres (either PostgreSQL or EPAS). The full
semantic version `Major.Minor.Patch` can be found inside one of its label
field named `full`.
!!! Note
+
`cnp_collector_first_recoverability_point` and `cnp_collector_last_available_backup_timestamp`
will be zero until your first backup to the object store. This is separate from the WAL archival.
@@ -436,11 +448,13 @@ The `customQueriesConfigMap`/`customQueriesSecret` sections contain a list of
Take care that the referred resources have to be created **in the same namespace as the Cluster** resource.
!!! Note
+
If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can
add a label with key `k8s.enterprisedb.io/reload` to it, otherwise you will have to reload
the instances using the `kubectl cnp reload` subcommand.
!!! Important
+
When a user defined metric overwrites an already existing metric the instance manager prints a json warning log,
containing the message:`Query with the same name already found. Overwriting the existing one.`
and a key `queryName` containing the overwritten query name.
@@ -525,6 +539,7 @@ datname FROM pg_database WHERE datallowconn AND NOT datistemplate` and matching
the pattern according to [path.Match()](https://pkg.go.dev/path#Match) rules.
!!! Note
+
The `*` character has a [special meaning](https://yaml.org/spec/1.2/spec.html#id2786448) in yaml,
so you need to quote (`"*"`) the `target_databases` value when it includes such a pattern.
@@ -652,6 +667,7 @@ cnp__{= ... } =4.11"
+
With Kubernetes 1.21 the `PodSecurityPolicy` has been replaced by the Pod
Security Admission Controller to become the new default way to manage the
security inside Kubernetes. On Openshift 4.11, which is running Kubernetes
@@ -900,6 +956,7 @@ the OpenShift deployment, which in turn is set by the Pod Security Admission
Controller.
!!! Note
+
Even if `nonroot-v2` and `hostnetwork-v2` are qualified as less restricted
SCCs, we don't run tests on them, and therefore we cannot guarantee that these
SCCs will work. That being said, `nonroot-v2` and `hostnetwork-v2` are a subset
@@ -912,6 +969,7 @@ By default, the `Pooler` resource creates pods having the `pgbouncer` container
that runs with the `quay.io/enterprisedb/pgbouncer` image.
!!! Note "There's more"
+
For more details about pod customization for the pooler, please refer to
the ["Pod templates"](connection_pooling.md#pod-templates) section in the
connection pooling documentation.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx b/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx
index 1cde5f5ef73..99e1979410e 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx
@@ -11,6 +11,7 @@ framework.
![Operator Capability Levels](./images/operator-capability-level.png)
!!! Important
+
Based on the [Operator Capability Levels model](operator_capability_levels.md),
you can expect a "Level V - Auto Pilot" set of capabilities from the
EDB Postgres for Kubernetes operator.
@@ -24,6 +25,7 @@ Each capability level is associated with a certain set of management features th
5. Auto pilot
!!! Note
+
We consider this framework as a guide for future work and implementations in the operator.
## Level 1: Basic install
@@ -34,6 +36,7 @@ enhancements, such as improvements in how you interact with the
operator and a PostgreSQL cluster configuration.
!!! Important
+
We consider information security part of this level.
### Operator deployment via declarative configuration
@@ -78,7 +81,7 @@ of the EDB Postgres for Kubernetes deployment in your Kubernetes infrastructure.
### Self-contained instance manager
Instead of relying on an external tool to
-coordinate PostgreSQL instances in the Kubernetes cluster pods,
+coordinate PostgreSQL instances in the Kubernetes cluster pods,
such as Patroni or Stolon, the operator
injects the operator executable inside each pod, in a file named
`/controller/manager`. The application is used to control the underlying
@@ -246,7 +249,7 @@ includes integration with cert-manager.
### Certificate authentication for streaming replication
-To authorize streaming replication connections from the standby servers,
+To authorize streaming replication connections from the standby servers,
the operator relies on TLS client certificate authentication. This method is used
instead of relying on a password (and therefore a secret).
@@ -257,6 +260,7 @@ section of the PostgreSQL configuration. Depending on the configuration option,
it also makes sure that all instances are properly reloaded or restarted.
!!! Note Current limitation
+
Changes with `ALTER SYSTEM` aren't detected, meaning
that the cluster state isn't enforced.
@@ -309,16 +313,20 @@ workload, in this case PostgreSQL servers. This includes PostgreSQL minor
release updates (security and bug fixes normally) as well as major online
upgrades.
-### Upgrade of the operator
+### Operator Upgrade
+
+Upgrading the operator is seamless and can be done as a new deployment. After
+upgrading the controller, a rolling update of all deployed PostgreSQL clusters
+is initiated. You can choose to update all clusters simultaneously or
+distribute their upgrades over time.
-You can upgrade the operator seamlessly as a new deployment. Because of the instance
-manager's injection, a change in the
-operator doesn't require a change in the operand.
-The operator can manage older versions of the operand.
+Thanks to the instance manager's injection, upgrading the operator does not
+require changes to the operand, allowing the operator to manage older versions
+of it.
-EDB Postgres for Kubernetes also supports [in-place updates of the instance manager](installation_upgrade.md#in-place-updates-of-the-instance-manager)
-following an upgrade of the operator. In-place updates don't require a rolling
-update (and subsequent switchover) of the cluster.
+Additionally, EDB Postgres for Kubernetes supports [in-place updates of the instance manager](installation_upgrade.md#in-place-updates-of-the-instance-manager)
+following an operator upgrade. In-place updates do not require a rolling update
+or a subsequent switchover of the cluster.
### Upgrade of the managed workload
@@ -379,8 +387,8 @@ user action. The operator transparently sets
the `archive_command` to rely on `barman-cloud-wal-archive` to ship WAL
files to the defined endpoint. You can decide the compression algorithm,
as well as the number of parallel jobs to concurrently upload WAL files
-in the archive. In addition, `Instance Manager` checks
-the correctness of the archive destination by performing the `barman-cloud-check-wal-archive`
+in the archive. In addition, `Instance Manager` checks
+the correctness of the archive destination by performing the `barman-cloud-check-wal-archive`
command before beginning to ship the first set of WAL files.
### PostgreSQL backups
@@ -397,7 +405,7 @@ Base backups can be saved on:
Base backups are defined at the cluster level, declaratively,
through the `backup` parameter in the cluster definition.
-You can define base backups in two ways:
+You can define base backups in two ways:
- On-demand, through the `Backup` custom resource definition
- Scheduled, through the `ScheduledBackup`custom resource definition, using a cron-like syntax
diff --git a/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx
index 6c9e50a95ea..232da323330 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx
@@ -18,6 +18,7 @@ By default, the operator is installed in the `postgresql-operator-system`
namespace as a Kubernetes `Deployment` called `postgresql-operator-controller-manager`.
!!! Note
+
In the examples below we assume the default name and namespace for the operator deployment.
The behavior of the operator can be customized through a `ConfigMap`/`Secret` that
@@ -25,12 +26,14 @@ is located in the same namespace of the operator deployment and with
`postgresql-operator-controller-manager-config` as the name.
!!! Important
+
Any change to the config's `ConfigMap`/`Secret` will not be automatically
detected by the operator, - and as such, it needs to be reloaded (see below).
Moreover, changes only apply to the resources created after the configuration
is reloaded.
!!! Important
+
The operator first processes the ConfigMap values and then the Secret’s, in this order.
As a result, if a parameter is defined in both places, the one in the Secret will be used.
@@ -38,21 +41,24 @@ is located in the same namespace of the operator deployment and with
The operator looks for the following environment variables to be defined in the `ConfigMap`/`Secret`:
-| Name | Description |
-| ----------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `EDB_LICENSE_KEY` | default license key (to be used only if the cluster does not define one, and preferably in the `Secret`) |
-| `ENABLE_REDWOOD_BY_DEFAULT` | Enable the Redwood compatibility by default when using EPAS. |
-| `INHERITED_ANNOTATIONS` | list of annotation names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods |
-| `INHERITED_LABELS` | list of label names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods |
-| `PULL_SECRET_NAME` | name of an additional pull secret to be defined in the operator's namespace and to be used to download images |
-| `ENABLE_AZURE_PVC_UPDATES` | Enables to delete Postgres pod if its PVC is stuck in Resizing condition. This feature is mainly for the Azure environment (default `false`) |
-| `ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES` | when set to `true`, enables in-place updates of the instance manager after an update of the operator, avoiding rolling updates of the cluster (default `false`) |
-| `MONITORING_QUERIES_CONFIGMAP` | The name of a ConfigMap in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters |
-| `MONITORING_QUERIES_SECRET` | The name of a Secret in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters |
-| `CERTIFICATE_DURATION` | Determines the lifetime of the generated certificates in days. Default is 90. |
-| `EXPIRING_CHECK_THRESHOLD` | Determines the threshold, in days, for identifying a certificate as expiring. Default is 7. |
-| `CREATE_ANY_SERVICE` | when set to `true`, will create `-any` service for the cluster. Default is `false` |
-| `EXTERNAL_BACKUP_ADDON_CONFIGURATION` | Configuration for the `external-backup-adapter` add-on. (See ["Customizing the adapter" in Add-ons](addons.md#customizing-the-adapter)) |
+| Name | Description |
+| ----------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `CERTIFICATE_DURATION` | Determines the lifetime of the generated certificates in days. Default is 90. |
+| `CLUSTERS_ROLLOUT_DELAY` | The duration (in seconds) to wait between the roll-outs of different clusters during an operator upgrade. This setting controls the timing of upgrades across clusters, spreading them out to reduce system impact. The default value is `0` which means no delay between PostgreSQL cluster upgrades. |
+| `CREATE_ANY_SERVICE` | When set to `true`, will create `-any` service for the cluster. Default is `false` |
+| `EDB_LICENSE_KEY` | Default license key (to be used only if the cluster does not define one, and preferably in the `Secret`) |
+| `ENABLE_AZURE_PVC_UPDATES` | Enables to delete Postgres pod if its PVC is stuck in Resizing condition. This feature is mainly for the Azure environment (default `false`) |
+| `ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES` | When set to `true`, enables in-place updates of the instance manager after an update of the operator, avoiding rolling updates of the cluster (default `false`) |
+| `ENABLE_REDWOOD_BY_DEFAULT` | Enable the Redwood compatibility by default when using EPAS. |
+| `EXPIRING_CHECK_THRESHOLD` | Determines the threshold, in days, for identifying a certificate as expiring. Default is 7. |
+| `EXTERNAL_BACKUP_ADDON_CONFIGURATION` | Configuration for the `external-backup-adapter` add-on. (See ["Customizing the adapter" in Add-ons](addons.md#customizing-the-adapter)) |
+| `INCLUDE_PLUGINS` | A comma-separated list of plugins to be always included in the Cluster's reconciliation. |
+| `INHERITED_ANNOTATIONS` | List of annotation names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods |
+| `INHERITED_LABELS` | List of label names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods |
+| `INSTANCES_ROLLOUT_DELAY` | The duration (in seconds) to wait between roll-outs of individual PostgreSQL instances within the same cluster during an operator upgrade. The default value is `0`, meaning no delay between upgrades of instances in the same PostgreSQL cluster. |
+| `MONITORING_QUERIES_CONFIGMAP` | The name of a ConfigMap in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters |
+| `MONITORING_QUERIES_SECRET` | The name of a Secret in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters |
+| `PULL_SECRET_NAME` | Name of an additional pull secret to be defined in the operator's namespace and to be used to download images |
Values in `INHERITED_ANNOTATIONS` and `INHERITED_LABELS` support path-like wildcards. For example, the value `example.com/*` will match
both the value `example.com/one` and `example.com/two`.
@@ -67,11 +73,13 @@ will ignore the configuration parameter.
## Defining an operator config map
-The example below customizes the behavior of the operator, by defining a
-default license key (namely a company key), the label/annotation names to be
-inherited by the resources created by any `Cluster` object that is deployed
-at a later time, and by enabling
-[in-place updates for the instance manager](installation_upgrade.md#in-place-updates-of-the-instance-manager).
+The example below customizes the behavior of the operator, by defining
+a default license key (namely a company key),
+the label/annotation names to be inherited by the resources created by
+any `Cluster` object that is deployed at a later time, by enabling
+[in-place updates for the instance
+manager](installation_upgrade.md#in-place-updates-of-the-instance-manager),
+and by spreading upgrades.
```yaml
apiVersion: v1
@@ -80,15 +88,23 @@ metadata:
name: postgresql-operator-controller-manager-config
namespace: postgresql-operator-system
data:
+ CLUSTERS_ROLLOUT_DELAY: '60'
+ ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES: 'true'
+ EDB_LICENSE_KEY:
INHERITED_ANNOTATIONS: categories
INHERITED_LABELS: environment, workload, app
- ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES: 'true'
+ INSTANCES_ROLLOUT_DELAY: '10'
```
## Defining an operator secret
-The example below customizes the behavior of the operator, by defining a
-default license key.
+The example below customizes the behavior of the operator, by defining
+a default license key (namely a company key),
+the label/annotation names to be inherited by the resources created by
+any `Cluster` object that is deployed at a later time, and by enabling
+[in-place updates for the instance
+manager](installation_upgrade.md#in-place-updates-of-the-instance-manager),
+and by spreading upgrades.
```yaml
apiVersion: v1
@@ -97,8 +113,13 @@ metadata:
name: postgresql-operator-controller-manager-config
namespace: postgresql-operator-system
type: Opaque
-data:
+stringData:
+ CLUSTERS_ROLLOUT_DELAY: '60'
+ ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES: 'true'
EDB_LICENSE_KEY:
+ INHERITED_ANNOTATIONS: categories
+ INHERITED_LABELS: environment, workload, app
+ INSTANCES_ROLLOUT_DELAY: '10'
```
## Restarting the operator to reload configs
@@ -123,6 +144,7 @@ kubectl delete pods -n [NAMESPACE_NAME_HERE] \
```
!!! Warning
+
Customizations will be applied only to `Cluster` resources created
after the reload of the operator deployment.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1/index.mdx
new file mode 100644
index 00000000000..ede0ccd92c4
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1/index.mdx
@@ -0,0 +1,5241 @@
+---
+title: API Reference - v1.24.2
+originalFilePath: src/pg4k.v1.md
+navTitle: API Reference
+navigation:
+ - v1.24.2
+
+---
+
+Package v1 contains API Schema definitions for the postgresql v1 API group
+
+## Resource Types
+
+- [Backup](#postgresql-k8s-enterprisedb-io-v1-Backup)
+- [Cluster](#postgresql-k8s-enterprisedb-io-v1-Cluster)
+- [ClusterImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ClusterImageCatalog)
+- [Database](#postgresql-k8s-enterprisedb-io-v1-Database)
+- [ImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ImageCatalog)
+- [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler)
+- [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup)
+
+
+
+## Backup
+
+Backup is the Schema for the backups API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | Backup |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+BackupSpec
+ |
+
+ Specification of the desired behavior of the backup.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+status
+BackupStatus
+ |
+
+ Most recently observed status of the backup. This data may not be up to
+date. Populated by the system. Read-only.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+
+
+## Cluster
+
+Cluster is the Schema for the PostgreSQL API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | Cluster |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+ClusterSpec
+ |
+
+ Specification of the desired behavior of the cluster.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+status
+ClusterStatus
+ |
+
+ Most recently observed status of the cluster. This data may not be up
+to date. Populated by the system. Read-only.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+
+
+## ClusterImageCatalog
+
+ClusterImageCatalog is the Schema for the clusterimagecatalogs API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | ClusterImageCatalog |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+ImageCatalogSpec
+ |
+
+ Specification of the desired behavior of the ClusterImageCatalog.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+
+
+## Database
+
+Database is the Schema for the databases API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | Database |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+DatabaseSpec
+ |
+
+ Specification of the desired Database.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+status
+DatabaseStatus
+ |
+
+ Most recently observed status of the Database. This data may not be up to
+date. Populated by the system. Read-only.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+
+
+## ImageCatalog
+
+ImageCatalog is the Schema for the imagecatalogs API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | ImageCatalog |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+ImageCatalogSpec
+ |
+
+ Specification of the desired behavior of the ImageCatalog.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+
+
+## Pooler
+
+Pooler is the Schema for the poolers API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | Pooler |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+PoolerSpec
+ |
+
+ Specification of the desired behavior of the Pooler.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+status
+PoolerStatus
+ |
+
+ Most recently observed status of the Pooler. This data may not be up to
+date. Populated by the system. Read-only.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+
+
+## ScheduledBackup
+
+ScheduledBackup is the Schema for the scheduledbackups API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | ScheduledBackup |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+ScheduledBackupSpec
+ |
+
+ Specification of the desired behavior of the ScheduledBackup.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+status
+ScheduledBackupStatus
+ |
+
+ Most recently observed status of the ScheduledBackup. This data may not be up
+to date. Populated by the system. Read-only.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+
+
+## AffinityConfiguration
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+AffinityConfiguration contains the info we need to create the
+affinity rules for Pods
+
+
+Field | Description |
+
+enablePodAntiAffinity
+bool
+ |
+
+ Activates anti-affinity for the pods. The operator will define pods
+anti-affinity unless this field is explicitly set to false
+ |
+
+topologyKey
+string
+ |
+
+ TopologyKey to use for anti-affinity configuration. See k8s documentation
+for more info on that
+ |
+
+nodeSelector
+map[string]string
+ |
+
+ NodeSelector is map of key-value pairs used to define the nodes on which
+the pods can run.
+More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ |
+
+nodeAffinity
+core/v1.NodeAffinity
+ |
+
+ NodeAffinity describes node affinity scheduling rules for the pod.
+More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ |
+
+tolerations
+[]core/v1.Toleration
+ |
+
+ Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run
+on tainted nodes.
+More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ |
+
+podAntiAffinityType
+string
+ |
+
+ PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be
+considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or
+"required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are
+added if all the existing nodes don't match the required pod anti-affinity rule.
+More info:
+https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ |
+
+additionalPodAntiAffinity
+core/v1.PodAntiAffinity
+ |
+
+ AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated
+by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false.
+ |
+
+additionalPodAffinity
+core/v1.PodAffinity
+ |
+
+ AdditionalPodAffinity allows to specify pod affinity terms to be passed to all the cluster's pods.
+ |
+
+
+
+
+
+
+## AvailableArchitecture
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+AvailableArchitecture represents the state of a cluster's architecture
+
+
+Field | Description |
+
+goArch [Required]
+string
+ |
+
+ GoArch is the name of the executable architecture
+ |
+
+hash [Required]
+string
+ |
+
+ Hash is the hash of the executable
+ |
+
+
+
+
+
+
+## BackupConfiguration
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+BackupConfiguration defines how the backup of the cluster are taken.
+The supported backup methods are BarmanObjectStore and VolumeSnapshot.
+For details and examples refer to the Backup and Recovery section of the
+documentation
+
+
+Field | Description |
+
+volumeSnapshot
+VolumeSnapshotConfiguration
+ |
+
+ VolumeSnapshot provides the configuration for the execution of volume snapshot backups.
+ |
+
+barmanObjectStore
+github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration
+ |
+
+ The configuration for the barman-cloud tool suite
+ |
+
+retentionPolicy
+string
+ |
+
+ RetentionPolicy is the retention policy to be used for backups
+and WALs (i.e. '60d'). The retention policy is expressed in the form
+of XXu where XX is a positive integer and u is in [dwm] -
+days, weeks, months.
+It's currently only applicable when using the BarmanObjectStore method.
+ |
+
+target
+BackupTarget
+ |
+
+ The policy to decide which instance should perform backups. Available
+options are empty string, which will default to prefer-standby policy,
+primary to have backups run always on primary instances, prefer-standby
+to have backups run preferably on the most updated standby, if available.
+ |
+
+
+
+
+
+
+## BackupMethod
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec)
+
+- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus)
+
+- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec)
+
+BackupMethod defines the way of executing the physical base backups of
+the selected PostgreSQL instance
+
+
+
+## BackupPhase
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus)
+
+BackupPhase is the phase of the backup
+
+
+
+## BackupPluginConfiguration
+
+**Appears in:**
+
+- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec)
+
+- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec)
+
+BackupPluginConfiguration contains the backup configuration used by
+the backup plugin
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the name of the plugin managing this backup
+ |
+
+parameters
+map[string]string
+ |
+
+ Parameters are the configuration parameters passed to the backup
+plugin for this backup
+ |
+
+
+
+
+
+
+## BackupSnapshotElementStatus
+
+**Appears in:**
+
+- [BackupSnapshotStatus](#postgresql-k8s-enterprisedb-io-v1-BackupSnapshotStatus)
+
+BackupSnapshotElementStatus is a volume snapshot that is part of a volume snapshot method backup
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the snapshot resource name
+ |
+
+type [Required]
+string
+ |
+
+ Type is tho role of the snapshot in the cluster, such as PG_DATA, PG_WAL and PG_TABLESPACE
+ |
+
+tablespaceName
+string
+ |
+
+ TablespaceName is the name of the snapshotted tablespace. Only set
+when type is PG_TABLESPACE
+ |
+
+
+
+
+
+
+## BackupSnapshotStatus
+
+**Appears in:**
+
+- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus)
+
+BackupSnapshotStatus the fields exclusive to the volumeSnapshot method backup
+
+
+
+
+
+## BackupSource
+
+**Appears in:**
+
+- [BootstrapRecovery](#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery)
+
+BackupSource contains the backup we need to restore from, plus some
+information that could be needed to correctly restore it.
+
+
+
+
+
+## BackupSpec
+
+**Appears in:**
+
+- [Backup](#postgresql-k8s-enterprisedb-io-v1-Backup)
+
+BackupSpec defines the desired state of Backup
+
+
+Field | Description |
+
+cluster [Required]
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+ |
+
+ The cluster to backup
+ |
+
+target
+BackupTarget
+ |
+
+ The policy to decide which instance should perform this backup. If empty,
+it defaults to cluster.spec.backup.target .
+Available options are empty string, primary and prefer-standby .
+primary to have backups run always on primary instances,
+prefer-standby to have backups run preferably on the most updated
+standby, if available.
+ |
+
+method
+BackupMethod
+ |
+
+ The backup method to be used, possible options are barmanObjectStore ,
+volumeSnapshot or plugin . Defaults to: barmanObjectStore .
+ |
+
+pluginConfiguration
+BackupPluginConfiguration
+ |
+
+ Configuration parameters passed to the plugin managing this backup
+ |
+
+online
+bool
+ |
+
+ Whether the default type of backup with volume snapshots is
+online/hot (true , default) or offline/cold (false )
+Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ |
+
+onlineConfiguration
+OnlineConfiguration
+ |
+
+ Configuration parameters to control the online/hot backup with volume snapshots
+Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ |
+
+
+
+
+
+
+## BackupStatus
+
+**Appears in:**
+
+- [Backup](#postgresql-k8s-enterprisedb-io-v1-Backup)
+
+BackupStatus defines the observed state of Backup
+
+
+Field | Description |
+
+BarmanCredentials
+github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanCredentials
+ |
+(Members of BarmanCredentials are embedded into this type.)
+ The potential credentials for each cloud provider
+ |
+
+endpointCA
+github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
+ |
+
+ EndpointCA store the CA bundle of the barman endpoint.
+Useful when using self-signed certificates to avoid
+errors with certificate issuer and barman-cloud-wal-archive.
+ |
+
+endpointURL
+string
+ |
+
+ Endpoint to be used to upload data to the cloud,
+overriding the automatic endpoint discovery
+ |
+
+destinationPath
+string
+ |
+
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+this path, with different destination folders, will be used for WALs
+and for data. This may not be populated in case of errors.
+ |
+
+serverName
+string
+ |
+
+ The server name on S3, the cluster name is used if this
+parameter is omitted
+ |
+
+encryption
+string
+ |
+
+ Encryption method required to S3 API
+ |
+
+backupId
+string
+ |
+
+ The ID of the Barman backup
+ |
+
+backupName
+string
+ |
+
+ The Name of the Barman backup
+ |
+
+phase
+BackupPhase
+ |
+
+ The last backup status
+ |
+
+startedAt
+meta/v1.Time
+ |
+
+ When the backup was started
+ |
+
+stoppedAt
+meta/v1.Time
+ |
+
+ When the backup was terminated
+ |
+
+beginWal
+string
+ |
+
+ The starting WAL
+ |
+
+endWal
+string
+ |
+
+ The ending WAL
+ |
+
+beginLSN
+string
+ |
+
+ The starting xlog
+ |
+
+endLSN
+string
+ |
+
+ The ending xlog
+ |
+
+error
+string
+ |
+
+ The detected error
+ |
+
+commandOutput
+string
+ |
+
+ Unused. Retained for compatibility with old versions.
+ |
+
+commandError
+string
+ |
+
+ The backup command output in case of error
+ |
+
+backupLabelFile
+[]byte
+ |
+
+ Backup label file content as returned by Postgres in case of online (hot) backups
+ |
+
+tablespaceMapFile
+[]byte
+ |
+
+ Tablespace map file content as returned by Postgres in case of online (hot) backups
+ |
+
+instanceID
+InstanceID
+ |
+
+ Information to identify the instance where the backup has been taken from
+ |
+
+snapshotBackupStatus
+BackupSnapshotStatus
+ |
+
+ Status of the volumeSnapshot backup
+ |
+
+method
+BackupMethod
+ |
+
+ The backup method being used
+ |
+
+online
+bool
+ |
+
+ Whether the backup was online/hot (true ) or offline/cold (false )
+ |
+
+pluginMetadata
+map[string]string
+ |
+
+ A map containing the plugin metadata
+ |
+
+
+
+
+
+
+## BackupTarget
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [BackupConfiguration](#postgresql-k8s-enterprisedb-io-v1-BackupConfiguration)
+
+- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec)
+
+- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec)
+
+BackupTarget describes the preferred targets for a backup
+
+
+
+## BootstrapConfiguration
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+BootstrapConfiguration contains information about how to create the PostgreSQL
+cluster. Only a single bootstrap method can be defined among the supported
+ones. initdb
will be used as the bootstrap method if left
+unspecified. Refer to the Bootstrap page of the documentation for more
+information.
+
+
+Field | Description |
+
+initdb
+BootstrapInitDB
+ |
+
+ Bootstrap the cluster via initdb
+ |
+
+recovery
+BootstrapRecovery
+ |
+
+ Bootstrap the cluster from a backup
+ |
+
+pg_basebackup
+BootstrapPgBaseBackup
+ |
+
+ Bootstrap the cluster taking a physical backup of another compatible
+PostgreSQL instance
+ |
+
+
+
+
+
+
+## BootstrapInitDB
+
+**Appears in:**
+
+- [BootstrapConfiguration](#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration)
+
+BootstrapInitDB is the configuration of the bootstrap process when
+initdb is used
+Refer to the Bootstrap page of the documentation for more information.
+
+
+Field | Description |
+
+database
+string
+ |
+
+ Name of the database used by the application. Default: app .
+ |
+
+owner
+string
+ |
+
+ Name of the owner of the database in the instance to be used
+by applications. Defaults to the value of the database key.
+ |
+
+secret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+ |
+
+ Name of the secret containing the initial credentials for the
+owner of the user database. If empty a new secret will be
+created from scratch
+ |
+
+redwood
+bool
+ |
+
+ If we need to enable/disable Redwood compatibility. Requires
+EPAS and for EPAS defaults to true
+ |
+
+options
+[]string
+ |
+
+ The list of options that must be passed to initdb when creating the cluster.
+Deprecated: This could lead to inconsistent configurations,
+please use the explicit provided parameters instead.
+If defined, explicit values will be ignored.
+ |
+
+dataChecksums
+bool
+ |
+
+ Whether the -k option should be passed to initdb,
+enabling checksums on data pages (default: false )
+ |
+
+encoding
+string
+ |
+
+ The value to be passed as option --encoding for initdb (default:UTF8 )
+ |
+
+localeCollate
+string
+ |
+
+ The value to be passed as option --lc-collate for initdb (default:C )
+ |
+
+localeCType
+string
+ |
+
+ The value to be passed as option --lc-ctype for initdb (default:C )
+ |
+
+walSegmentSize
+int
+ |
+
+ The value in megabytes (1 to 1024) to be passed to the --wal-segsize
+option for initdb (default: empty, resulting in PostgreSQL default: 16MB)
+ |
+
+postInitSQL
+[]string
+ |
+
+ List of SQL queries to be executed as a superuser in the postgres
+database right after the cluster has been created - to be used with extreme care
+(by default empty)
+ |
+
+postInitApplicationSQL
+[]string
+ |
+
+ List of SQL queries to be executed as a superuser in the application
+database right after the cluster has been created - to be used with extreme care
+(by default empty)
+ |
+
+postInitTemplateSQL
+[]string
+ |
+
+ List of SQL queries to be executed as a superuser in the template1
+database right after the cluster has been created - to be used with extreme care
+(by default empty)
+ |
+
+import
+Import
+ |
+
+ Bootstraps the new cluster by importing data from an existing PostgreSQL
+instance using logical backup (pg_dump and pg_restore )
+ |
+
+postInitApplicationSQLRefs
+SQLRefs
+ |
+
+ List of references to ConfigMaps or Secrets containing SQL files
+to be executed as a superuser in the application database right after
+the cluster has been created. The references are processed in a specific order:
+first, all Secrets are processed, followed by all ConfigMaps.
+Within each group, the processing order follows the sequence specified
+in their respective arrays.
+(by default empty)
+ |
+
+postInitTemplateSQLRefs
+SQLRefs
+ |
+
+ List of references to ConfigMaps or Secrets containing SQL files
+to be executed as a superuser in the template1 database right after
+the cluster has been created. The references are processed in a specific order:
+first, all Secrets are processed, followed by all ConfigMaps.
+Within each group, the processing order follows the sequence specified
+in their respective arrays.
+(by default empty)
+ |
+
+postInitSQLRefs
+SQLRefs
+ |
+
+ List of references to ConfigMaps or Secrets containing SQL files
+to be executed as a superuser in the postgres database right after
+the cluster has been created. The references are processed in a specific order:
+first, all Secrets are processed, followed by all ConfigMaps.
+Within each group, the processing order follows the sequence specified
+in their respective arrays.
+(by default empty)
+ |
+
+
+
+
+
+
+## BootstrapPgBaseBackup
+
+**Appears in:**
+
+- [BootstrapConfiguration](#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration)
+
+BootstrapPgBaseBackup contains the configuration required to take
+a physical backup of an existing PostgreSQL cluster
+
+
+Field | Description |
+
+source [Required]
+string
+ |
+
+ The name of the server of which we need to take a physical backup
+ |
+
+database
+string
+ |
+
+ Name of the database used by the application. Default: app .
+ |
+
+owner
+string
+ |
+
+ Name of the owner of the database in the instance to be used
+by applications. Defaults to the value of the database key.
+ |
+
+secret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+ |
+
+ Name of the secret containing the initial credentials for the
+owner of the user database. If empty a new secret will be
+created from scratch
+ |
+
+
+
+
+
+
+## BootstrapRecovery
+
+**Appears in:**
+
+- [BootstrapConfiguration](#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration)
+
+BootstrapRecovery contains the configuration required to restore
+from an existing cluster using 3 methodologies: external cluster,
+volume snapshots or backup objects. Full recovery and Point-In-Time
+Recovery are supported.
+The method can be also be used to create clusters in continuous recovery
+(replica clusters), also supporting cascading replication when instances
>
+
+- Once the cluster exits recovery, the password for the superuser
+will be changed through the provided secret.
+Refer to the Bootstrap page of the documentation for more information.
+
+
+
+Field | Description |
+
+backup
+BackupSource
+ |
+
+ The backup object containing the physical base backup from which to
+initiate the recovery procedure.
+Mutually exclusive with source and volumeSnapshots .
+ |
+
+source
+string
+ |
+
+ The external cluster whose backup we will restore. This is also
+used as the name of the folder under which the backup is stored,
+so it must be set to the name of the source cluster
+Mutually exclusive with backup .
+ |
+
+volumeSnapshots
+DataSource
+ |
+
+ The static PVC data source(s) from which to initiate the
+recovery procedure. Currently supporting VolumeSnapshot
+and PersistentVolumeClaim resources that map an existing
+PVC group, compatible with EDB Postgres for Kubernetes, and taken with
+a cold backup copy on a fenced Postgres instance (limitation
+which will be removed in the future when online backup
+will be implemented).
+Mutually exclusive with backup .
+ |
+
+recoveryTarget
+RecoveryTarget
+ |
+
+ By default, the recovery process applies all the available
+WAL files in the archive (full recovery). However, you can also
+end the recovery as soon as a consistent state is reached or
+recover to a point-in-time (PITR) by specifying a RecoveryTarget object,
+as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...).
+More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET
+ |
+
+database
+string
+ |
+
+ Name of the database used by the application. Default: app .
+ |
+
+owner
+string
+ |
+
+ Name of the owner of the database in the instance to be used
+by applications. Defaults to the value of the database key.
+ |
+
+secret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+ |
+
+ Name of the secret containing the initial credentials for the
+owner of the user database. If empty a new secret will be
+created from scratch
+ |
+
+
+
+
+
+
+## CatalogImage
+
+**Appears in:**
+
+- [ImageCatalogSpec](#postgresql-k8s-enterprisedb-io-v1-ImageCatalogSpec)
+
+CatalogImage defines the image and major version
+
+
+Field | Description |
+
+image [Required]
+string
+ |
+
+ The image reference
+ |
+
+major [Required]
+int
+ |
+
+ The PostgreSQL major version of the image. Must be unique within the catalog.
+ |
+
+
+
+
+
+
+## CertificatesConfiguration
+
+**Appears in:**
+
+- [CertificatesStatus](#postgresql-k8s-enterprisedb-io-v1-CertificatesStatus)
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+CertificatesConfiguration contains the needed configurations to handle server certificates.
+
+
+Field | Description |
+
+serverCASecret
+string
+ |
+
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+Contains:
+
+
+
+ca.crt : CA that should be used to validate the server certificate,
+used as sslrootcert in client connection strings.
+ca.key : key used to generate Server SSL certs, if ServerTLSSecret is provided,
+this can be omitted.
+
+ |
+
+serverTLSSecret
+string
+ |
+
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ssl_cert_file and ssl_key_file so that clients can connect to postgres securely.
+If not defined, ServerCASecret must provide also ca.key and a new secret will be
+created using the provided CA.
+ |
+
+replicationTLSSecret
+string
+ |
+
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+the streaming_replica user.
+If not defined, ClientCASecret must provide also ca.key , and a new secret will be
+created using the provided CA.
+ |
+
+clientCASecret
+string
+ |
+
+ The secret containing the Client CA certificate. If not defined, a new secret will be created
+with a self-signed CA and will be used to generate all the client certificates.
+
+Contains:
+
+
+
+ca.crt : CA that should be used to validate the client certificates,
+used as ssl_ca_file of all the instances.
+ca.key : key used to generate client certificates, if ReplicationTLSSecret is provided,
+this can be omitted.
+
+ |
+
+serverAltDNSNames
+[]string
+ |
+
+ The list of the server alternative DNS names to be added to the generated server TLS certificates, when required.
+ |
+
+
+
+
+
+
+## CertificatesStatus
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+CertificatesStatus contains configuration certificates and related expiration dates.
+
+
+Field | Description |
+
+CertificatesConfiguration
+CertificatesConfiguration
+ |
+(Members of CertificatesConfiguration are embedded into this type.)
+ Needed configurations to handle server certificates, initialized with default values, if needed.
+ |
+
+expirations
+map[string]string
+ |
+
+ Expiration dates for all certificates.
+ |
+
+
+
+
+
+
+## ClusterMonitoringTLSConfiguration
+
+**Appears in:**
+
+- [MonitoringConfiguration](#postgresql-k8s-enterprisedb-io-v1-MonitoringConfiguration)
+
+ClusterMonitoringTLSConfiguration is the type containing the TLS configuration
+for the cluster's monitoring
+
+
+Field | Description |
+
+enabled
+bool
+ |
+
+ Enable TLS for the monitoring endpoint.
+Changing this option will force a rollout of all instances.
+ |
+
+
+
+
+
+
+## ClusterSpec
+
+**Appears in:**
+
+- [Cluster](#postgresql-k8s-enterprisedb-io-v1-Cluster)
+
+ClusterSpec defines the desired state of Cluster
+
+
+Field | Description |
+
+description
+string
+ |
+
+ Description of this PostgreSQL cluster
+ |
+
+inheritedMetadata
+EmbeddedObjectMetadata
+ |
+
+ Metadata that will be inherited by all objects related to the Cluster
+ |
+
+imageName
+string
+ |
+
+ Name of the container image, supporting both tags (<image>:<tag> )
+and digests for deterministic and repeatable deployments
+(<image>:<tag>@sha256:<digestValue> )
+ |
+
+imageCatalogRef
+ImageCatalogRef
+ |
+
+ Defines the major PostgreSQL version we want to use within an ImageCatalog
+ |
+
+imagePullPolicy
+core/v1.PullPolicy
+ |
+
+ Image pull policy.
+One of Always , Never or IfNotPresent .
+If not defined, it defaults to IfNotPresent .
+Cannot be updated.
+More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ |
+
+schedulerName
+string
+ |
+
+ If specified, the pod will be dispatched by specified Kubernetes
+scheduler. If not specified, the pod will be dispatched by the default
+scheduler. More info:
+https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/
+ |
+
+postgresUID
+int64
+ |
+
+ The UID of the postgres user inside the image, defaults to 26
+ |
+
+postgresGID
+int64
+ |
+
+ The GID of the postgres user inside the image, defaults to 26
+ |
+
+instances [Required]
+int
+ |
+
+ Number of instances required in the cluster
+ |
+
+minSyncReplicas
+int
+ |
+
+ Minimum number of instances required in synchronous replication with the
+primary. Undefined or 0 allow writes to complete when no standby is
+available.
+ |
+
+maxSyncReplicas
+int
+ |
+
+ The target value for the synchronous replication quorum, that can be
+decreased if the number of ready standbys is lower than this.
+Undefined or 0 disable synchronous replication.
+ |
+
+postgresql
+PostgresConfiguration
+ |
+
+ Configuration of the PostgreSQL server
+ |
+
+replicationSlots
+ReplicationSlotsConfiguration
+ |
+
+ Replication slots management configuration
+ |
+
+bootstrap
+BootstrapConfiguration
+ |
+
+ Instructions to bootstrap this cluster
+ |
+
+replica
+ReplicaClusterConfiguration
+ |
+
+ Replica cluster configuration
+ |
+
+superuserSecret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+ |
+
+ The secret containing the superuser password. If not defined a new
+secret will be created with a randomly generated password
+ |
+
+enableSuperuserAccess
+bool
+ |
+
+ When this option is enabled, the operator will use the SuperuserSecret
+to update the postgres user password (if the secret is
+not present, the operator will automatically create one). When this
+option is disabled, the operator will ignore the SuperuserSecret content, delete
+it when automatically created, and then blank the password of the postgres
+user by setting it to NULL . Disabled by default.
+ |
+
+certificates
+CertificatesConfiguration
+ |
+
+ The configuration for the CA and related certificates
+ |
+
+imagePullSecrets
+[]github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+ |
+
+ The list of pull secrets to be used to pull the images. If the license key
+contains a pull secret that secret will be automatically included.
+ |
+
+storage
+StorageConfiguration
+ |
+
+ Configuration of the storage of the instances
+ |
+
+serviceAccountTemplate
+ServiceAccountTemplate
+ |
+
+ Configure the generation of the service account
+ |
+
+walStorage
+StorageConfiguration
+ |
+
+ Configuration of the storage for PostgreSQL WAL (Write-Ahead Log)
+ |
+
+ephemeralVolumeSource
+core/v1.EphemeralVolumeSource
+ |
+
+ EphemeralVolumeSource allows the user to configure the source of ephemeral volumes.
+ |
+
+startDelay
+int32
+ |
+
+ The time in seconds that is allowed for a PostgreSQL instance to
+successfully start up (default 3600).
+The startup probe failure threshold is derived from this value using the formula:
+ceiling(startDelay / 10).
+ |
+
+stopDelay
+int32
+ |
+
+ The time in seconds that is allowed for a PostgreSQL instance to
+gracefully shutdown (default 1800)
+ |
+
+smartStopDelay
+int32
+ |
+
+ Deprecated: please use SmartShutdownTimeout instead
+ |
+
+smartShutdownTimeout
+int32
+ |
+
+ The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete.
+Make sure you reserve enough time for the operator to request a fast shutdown of Postgres
+(that is: stopDelay - smartShutdownTimeout ).
+ |
+
+switchoverDelay
+int32
+ |
+
+ The time in seconds that is allowed for a primary PostgreSQL instance
+to gracefully shutdown during a switchover.
+Default value is 3600 seconds (1 hour).
+ |
+
+failoverDelay
+int32
+ |
+
+ The amount of time (in seconds) to wait before triggering a failover
+after the primary PostgreSQL instance in the cluster was detected
+to be unhealthy
+ |
+
+livenessProbeTimeout
+int32
+ |
+
+ LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance
+to successfully respond to the liveness probe (default 30).
+The Liveness probe failure threshold is derived from this value using the formula:
+ceiling(livenessProbe / 10).
+ |
+
+affinity
+AffinityConfiguration
+ |
+
+ Affinity/Anti-affinity rules for Pods
+ |
+
+topologySpreadConstraints
+[]core/v1.TopologySpreadConstraint
+ |
+
+ TopologySpreadConstraints specifies how to spread matching pods among the given topology.
+More info:
+https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
+ |
+
+resources
+core/v1.ResourceRequirements
+ |
+
+ Resources requirements of every generated Pod. Please refer to
+https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+for more information.
+ |
+
+ephemeralVolumesSizeLimit
+EphemeralVolumesSizeLimitConfiguration
+ |
+
+ EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral
+volumes
+ |
+
+priorityClassName
+string
+ |
+
+ Name of the priority class which will be used in every generated Pod, if the PriorityClass
+specified does not exist, the pod will not be able to schedule. Please refer to
+https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
+for more information
+ |
+
+primaryUpdateStrategy
+PrimaryUpdateStrategy
+ |
+
+ Deployment strategy to follow to upgrade the primary server during a rolling
+update procedure, after all replicas have been successfully updated:
+it can be automated (unsupervised - default) or manual (supervised )
+ |
+
+primaryUpdateMethod
+PrimaryUpdateMethod
+ |
+
+ Method to follow to upgrade the primary server during a rolling
+update procedure, after all replicas have been successfully updated:
+it can be with a switchover (switchover ) or in-place (restart - default)
+ |
+
+backup
+BackupConfiguration
+ |
+
+ The configuration to be used for backups
+ |
+
+nodeMaintenanceWindow
+NodeMaintenanceWindow
+ |
+
+ Define a maintenance window for the Kubernetes nodes
+ |
+
+licenseKey
+string
+ |
+
+ The license key of the cluster. When empty, the cluster operates in
+trial mode and after the expiry date (default 30 days) the operator
+will cease any reconciliation attempt. For details, please refer to
+the license agreement that comes with the operator.
+ |
+
+licenseKeySecret
+core/v1.SecretKeySelector
+ |
+
+ The reference to the license key. When this is set it take precedence over LicenseKey.
+ |
+
+monitoring
+MonitoringConfiguration
+ |
+
+ The configuration of the monitoring infrastructure of this cluster
+ |
+
+externalClusters
+ExternalClusterList
+ |
+
+ The list of external clusters which are used in the configuration
+ |
+
+logLevel
+string
+ |
+
+ The instances' log level, one of the following values: error, warning, info (default), debug, trace
+ |
+
+projectedVolumeTemplate
+core/v1.ProjectedVolumeSource
+ |
+
+ Template to be used to define projected volumes, projected volumes will be mounted
+under /projected base folder
+ |
+
+env
+[]core/v1.EnvVar
+ |
+
+ Env follows the Env format to pass environment variables
+to the pods created in the cluster
+ |
+
+envFrom
+[]core/v1.EnvFromSource
+ |
+
+ EnvFrom follows the EnvFrom format to pass environment variables
+sources to the pods to be used by Env
+ |
+
+managed
+ManagedConfiguration
+ |
+
+ The configuration that is used by the portions of PostgreSQL that are managed by the instance manager
+ |
+
+seccompProfile
+core/v1.SeccompProfile
+ |
+
+ The SeccompProfile applied to every Pod and Container.
+Defaults to: RuntimeDefault
+ |
+
+tablespaces
+[]TablespaceConfiguration
+ |
+
+ The tablespaces configuration
+ |
+
+enablePDB
+bool
+ |
+
+ Manage the PodDisruptionBudget resources within the cluster. When
+configured as true (default setting), the pod disruption budgets
+will safeguard the primary node from being terminated. Conversely,
+setting it to false will result in the absence of any
+PodDisruptionBudget resource, permitting the shutdown of all nodes
+hosting the PostgreSQL cluster. This latter configuration is
+advisable for any PostgreSQL cluster employed for
+development/staging purposes.
+ |
+
+plugins
+PluginConfigurationList
+ |
+
+ The plugins configuration, containing
+any plugin to be loaded with the corresponding configuration
+ |
+
+
+
+
+
+
+## ClusterStatus
+
+**Appears in:**
+
+- [Cluster](#postgresql-k8s-enterprisedb-io-v1-Cluster)
+
+ClusterStatus defines the observed state of Cluster
+
+
+Field | Description |
+
+instances
+int
+ |
+
+ The total number of PVC Groups detected in the cluster. It may differ from the number of existing instance pods.
+ |
+
+readyInstances
+int
+ |
+
+ The total number of ready instances in the cluster. It is equal to the number of ready instance pods.
+ |
+
+instancesStatus
+map[PodStatus][]string
+ |
+
+ InstancesStatus indicates in which status the instances are
+ |
+
+instancesReportedState
+map[PodName]InstanceReportedState
+ |
+
+ The reported state of the instances during the last reconciliation loop
+ |
+
+managedRolesStatus
+ManagedRoles
+ |
+
+ ManagedRolesStatus reports the state of the managed roles in the cluster
+ |
+
+tablespacesStatus
+[]TablespaceState
+ |
+
+ TablespacesStatus reports the state of the declarative tablespaces in the cluster
+ |
+
+timelineID
+int
+ |
+
+ The timeline of the Postgres cluster
+ |
+
+topology
+Topology
+ |
+
+ Instances topology.
+ |
+
+latestGeneratedNode
+int
+ |
+
+ ID of the latest generated node (used to avoid node name clashing)
+ |
+
+currentPrimary
+string
+ |
+
+ Current primary instance
+ |
+
+targetPrimary
+string
+ |
+
+ Target primary instance, this is different from the previous one
+during a switchover or a failover
+ |
+
+lastPromotionToken
+string
+ |
+
+ LastPromotionToken is the last verified promotion token that
+was used to promote a replica cluster
+ |
+
+pvcCount
+int32
+ |
+
+ How many PVCs have been created by this cluster
+ |
+
+jobCount
+int32
+ |
+
+ How many Jobs have been created by this cluster
+ |
+
+danglingPVC
+[]string
+ |
+
+ List of all the PVCs created by this cluster and still available
+which are not attached to a Pod
+ |
+
+resizingPVC
+[]string
+ |
+
+ List of all the PVCs that have ResizingPVC condition.
+ |
+
+initializingPVC
+[]string
+ |
+
+ List of all the PVCs that are being initialized by this cluster
+ |
+
+healthyPVC
+[]string
+ |
+
+ List of all the PVCs not dangling nor initializing
+ |
+
+unusablePVC
+[]string
+ |
+
+ List of all the PVCs that are unusable because another PVC is missing
+ |
+
+licenseStatus
+github.com/EnterpriseDB/cloud-native-postgres/pkg/licensekey.Status
+ |
+
+ Status of the license
+ |
+
+writeService
+string
+ |
+
+ Current write pod
+ |
+
+readService
+string
+ |
+
+ Current list of read pods
+ |
+
+phase
+string
+ |
+
+ Current phase of the cluster
+ |
+
+phaseReason
+string
+ |
+
+ Reason for the current phase
+ |
+
+secretsResourceVersion
+SecretsResourceVersion
+ |
+
+ The list of resource versions of the secrets
+managed by the operator. Every change here is done in the
+interest of the instance manager, which will refresh the
+secret data
+ |
+
+configMapResourceVersion
+ConfigMapResourceVersion
+ |
+
+ The list of resource versions of the configmaps,
+managed by the operator. Every change here is done in the
+interest of the instance manager, which will refresh the
+configmap data
+ |
+
+certificates
+CertificatesStatus
+ |
+
+ The configuration for the CA and related certificates, initialized with defaults.
+ |
+
+firstRecoverabilityPoint
+string
+ |
+
+ The first recoverability point, stored as a date in RFC3339 format.
+This field is calculated from the content of FirstRecoverabilityPointByMethod
+ |
+
+firstRecoverabilityPointByMethod
+map[BackupMethod]meta/v1.Time
+ |
+
+ The first recoverability point, stored as a date in RFC3339 format, per backup method type
+ |
+
+lastSuccessfulBackup
+string
+ |
+
+ Last successful backup, stored as a date in RFC3339 format
+This field is calculated from the content of LastSuccessfulBackupByMethod
+ |
+
+lastSuccessfulBackupByMethod
+map[BackupMethod]meta/v1.Time
+ |
+
+ Last successful backup, stored as a date in RFC3339 format, per backup method type
+ |
+
+lastFailedBackup
+string
+ |
+
+ Stored as a date in RFC3339 format
+ |
+
+cloudNativePostgresqlCommitHash
+string
+ |
+
+ The commit hash number of which this operator running
+ |
+
+currentPrimaryTimestamp
+string
+ |
+
+ The timestamp when the last actual promotion to primary has occurred
+ |
+
+currentPrimaryFailingSinceTimestamp
+string
+ |
+
+ The timestamp when the primary was detected to be unhealthy
+This field is reported when .spec.failoverDelay is populated or during online upgrades
+ |
+
+targetPrimaryTimestamp
+string
+ |
+
+ The timestamp when the last request for a new primary has occurred
+ |
+
+poolerIntegrations
+PoolerIntegrations
+ |
+
+ The integration needed by poolers referencing the cluster
+ |
+
+cloudNativePostgresqlOperatorHash
+string
+ |
+
+ The hash of the binary of the operator
+ |
+
+availableArchitectures
+[]AvailableArchitecture
+ |
+
+ AvailableArchitectures reports the available architectures of a cluster
+ |
+
+conditions
+[]meta/v1.Condition
+ |
+
+ Conditions for cluster object
+ |
+
+instanceNames
+[]string
+ |
+
+ List of instance names in the cluster
+ |
+
+onlineUpdateEnabled
+bool
+ |
+
+ OnlineUpdateEnabled shows if the online upgrade is enabled inside the cluster
+ |
+
+azurePVCUpdateEnabled
+bool
+ |
+
+ AzurePVCUpdateEnabled shows if the PVC online upgrade is enabled for this cluster
+ |
+
+image
+string
+ |
+
+ Image contains the image name used by the pods
+ |
+
+pluginStatus
+[]PluginStatus
+ |
+
+ PluginStatus is the status of the loaded plugins
+ |
+
+switchReplicaClusterStatus
+SwitchReplicaClusterStatus
+ |
+
+ SwitchReplicaClusterStatus is the status of the switch to replica cluster
+ |
+
+demotionToken
+string
+ |
+
+ DemotionToken is a JSON token containing the information
+from pg_controldata such as Database system identifier, Latest checkpoint's
+TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO
+WAL file, and Time of latest checkpoint
+ |
+
+
+
+
+
+
+## ConfigMapResourceVersion
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+ConfigMapResourceVersion is the resource versions of the secrets
+managed by the operator
+
+
+Field | Description |
+
+metrics
+map[string]string
+ |
+
+ A map with the versions of all the config maps used to pass metrics.
+Map keys are the config map names, map values are the versions
+ |
+
+
+
+
+
+
+## DataDurabilityLevel
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [SynchronousReplicaConfiguration](#postgresql-k8s-enterprisedb-io-v1-SynchronousReplicaConfiguration)
+
+DataDurabilityLevel specifies how strictly to enforce synchronous replication
+when cluster instances are unavailable. Options are required
or preferred
.
+
+
+
+## DataSource
+
+**Appears in:**
+
+- [BootstrapRecovery](#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery)
+
+DataSource contains the configuration required to bootstrap a
+PostgreSQL cluster from an existing storage
+
+
+
+
+
+## DatabaseReclaimPolicy
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [DatabaseSpec](#postgresql-k8s-enterprisedb-io-v1-DatabaseSpec)
+
+DatabaseReclaimPolicy describes a policy for end-of-life maintenance of databases.
+
+
+
+## DatabaseRoleRef
+
+**Appears in:**
+
+- [TablespaceConfiguration](#postgresql-k8s-enterprisedb-io-v1-TablespaceConfiguration)
+
+DatabaseRoleRef is a reference an a role available inside PostgreSQL
+
+
+Field | Description |
+
+name
+string
+ |
+
+ No description provided. |
+
+
+
+
+
+
+## DatabaseSpec
+
+**Appears in:**
+
+- [Database](#postgresql-k8s-enterprisedb-io-v1-Database)
+
+DatabaseSpec is the specification of a Postgresql Database
+
+
+Field | Description |
+
+cluster [Required]
+core/v1.LocalObjectReference
+ |
+
+ The corresponding cluster
+ |
+
+ensure
+EnsureOption
+ |
+
+ Ensure the PostgreSQL database is present or absent - defaults to "present"
+ |
+
+name [Required]
+string
+ |
+
+ The name inside PostgreSQL
+ |
+
+owner [Required]
+string
+ |
+
+ The owner
+ |
+
+template
+string
+ |
+
+ The name of the template from which to create the new database
+ |
+
+encoding
+string
+ |
+
+ The encoding (cannot be changed)
+ |
+
+locale
+string
+ |
+
+ The locale (cannot be changed)
+ |
+
+locale_provider
+string
+ |
+
+ The locale provider (cannot be changed)
+ |
+
+lc_collate
+string
+ |
+
+ The LC_COLLATE (cannot be changed)
+ |
+
+lc_ctype
+string
+ |
+
+ The LC_CTYPE (cannot be changed)
+ |
+
+icu_locale
+string
+ |
+
+ The ICU_LOCALE (cannot be changed)
+ |
+
+icu_rules
+string
+ |
+
+ The ICU_RULES (cannot be changed)
+ |
+
+builtin_locale
+string
+ |
+
+ The BUILTIN_LOCALE (cannot be changed)
+ |
+
+collation_version
+string
+ |
+
+ The COLLATION_VERSION (cannot be changed)
+ |
+
+isTemplate
+bool
+ |
+
+ True when the database is a template
+ |
+
+allowConnections
+bool
+ |
+
+ True when connections to this database are allowed
+ |
+
+connectionLimit
+int
+ |
+
+ Connection limit, -1 means no limit and -2 means the
+database is not valid
+ |
+
+tablespace
+string
+ |
+
+ The default tablespace of this database
+ |
+
+databaseReclaimPolicy
+DatabaseReclaimPolicy
+ |
+
+ The policy for end-of-life maintenance of this database
+ |
+
+
+
+
+
+
+## DatabaseStatus
+
+**Appears in:**
+
+- [Database](#postgresql-k8s-enterprisedb-io-v1-Database)
+
+DatabaseStatus defines the observed state of Database
+
+
+Field | Description |
+
+observedGeneration
+int64
+ |
+
+ A sequence number representing the latest
+desired state that was synchronized
+ |
+
+applied
+bool
+ |
+
+ Applied is true if the database was reconciled correctly
+ |
+
+message
+string
+ |
+
+ Message is the reconciliation output message
+ |
+
+
+
+
+
+
+## EPASConfiguration
+
+**Appears in:**
+
+- [PostgresConfiguration](#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration)
+
+EPASConfiguration contains EDB Postgres Advanced Server specific configurations
+
+
+Field | Description |
+
+audit
+bool
+ |
+
+ If true enables edb_audit logging
+ |
+
+tde
+TDEConfiguration
+ |
+
+ TDE configuration
+ |
+
+
+
+
+
+
+## EmbeddedObjectMetadata
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+EmbeddedObjectMetadata contains metadata to be inherited by all resources related to a Cluster
+
+
+Field | Description |
+
+labels
+map[string]string
+ |
+
+ No description provided. |
+
+annotations
+map[string]string
+ |
+
+ No description provided. |
+
+
+
+
+
+
+## EnsureOption
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [DatabaseSpec](#postgresql-k8s-enterprisedb-io-v1-DatabaseSpec)
+
+- [RoleConfiguration](#postgresql-k8s-enterprisedb-io-v1-RoleConfiguration)
+
+EnsureOption represents whether we should enforce the presence or absence of
+a Role in a PostgreSQL instance
+
+
+
+## EphemeralVolumesSizeLimitConfiguration
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+EphemeralVolumesSizeLimitConfiguration contains the configuration of the ephemeral
+storage
+
+
+
+
+
+## ImageCatalogRef
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+ImageCatalogRef defines the reference to a major version in an ImageCatalog
+
+
+Field | Description |
+
+TypedLocalObjectReference
+core/v1.TypedLocalObjectReference
+ |
+(Members of TypedLocalObjectReference are embedded into this type.)
+ No description provided. |
+
+major [Required]
+int
+ |
+
+ The major version of PostgreSQL we want to use from the ImageCatalog
+ |
+
+
+
+
+
+
+## ImageCatalogSpec
+
+**Appears in:**
+
+- [ClusterImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ClusterImageCatalog)
+
+- [ImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ImageCatalog)
+
+ImageCatalogSpec defines the desired ImageCatalog
+
+
+Field | Description |
+
+images [Required]
+[]CatalogImage
+ |
+
+ List of CatalogImages available in the catalog
+ |
+
+
+
+
+
+
+## Import
+
+**Appears in:**
+
+- [BootstrapInitDB](#postgresql-k8s-enterprisedb-io-v1-BootstrapInitDB)
+
+Import contains the configuration to init a database from a logic snapshot of an externalCluster
+
+
+Field | Description |
+
+source [Required]
+ImportSource
+ |
+
+ The source of the import
+ |
+
+type [Required]
+SnapshotType
+ |
+
+ The import type. Can be microservice or monolith .
+ |
+
+databases [Required]
+[]string
+ |
+
+ The databases to import
+ |
+
+roles
+[]string
+ |
+
+ The roles to import
+ |
+
+postImportApplicationSQL
+[]string
+ |
+
+ List of SQL queries to be executed as a superuser in the application
+database right after is imported - to be used with extreme care
+(by default empty). Only available in microservice type.
+ |
+
+schemaOnly
+bool
+ |
+
+ When set to true, only the pre-data and post-data sections of
+pg_restore are invoked, avoiding data import. Default: false .
+ |
+
+
+
+
+
+
+## ImportSource
+
+**Appears in:**
+
+- [Import](#postgresql-k8s-enterprisedb-io-v1-Import)
+
+ImportSource describes the source for the logical snapshot
+
+
+Field | Description |
+
+externalCluster [Required]
+string
+ |
+
+ The name of the externalCluster used for import
+ |
+
+
+
+
+
+
+## InstanceID
+
+**Appears in:**
+
+- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus)
+
+InstanceID contains the information to identify an instance
+
+
+Field | Description |
+
+podName
+string
+ |
+
+ The pod name
+ |
+
+ContainerID
+string
+ |
+
+ The container ID
+ |
+
+
+
+
+
+
+## InstanceReportedState
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+InstanceReportedState describes the last reported state of an instance during a reconciliation loop
+
+
+Field | Description |
+
+isPrimary [Required]
+bool
+ |
+
+ indicates if an instance is the primary one
+ |
+
+timeLineID
+int
+ |
+
+ indicates on which TimelineId the instance is
+ |
+
+
+
+
+
+
+## LDAPBindAsAuth
+
+**Appears in:**
+
+- [LDAPConfig](#postgresql-k8s-enterprisedb-io-v1-LDAPConfig)
+
+LDAPBindAsAuth provides the required fields to use the
+bind authentication for LDAP
+
+
+Field | Description |
+
+prefix
+string
+ |
+
+ Prefix for the bind authentication option
+ |
+
+suffix
+string
+ |
+
+ Suffix for the bind authentication option
+ |
+
+
+
+
+
+
+## LDAPBindSearchAuth
+
+**Appears in:**
+
+- [LDAPConfig](#postgresql-k8s-enterprisedb-io-v1-LDAPConfig)
+
+LDAPBindSearchAuth provides the required fields to use
+the bind+search LDAP authentication process
+
+
+Field | Description |
+
+baseDN
+string
+ |
+
+ Root DN to begin the user search
+ |
+
+bindDN
+string
+ |
+
+ DN of the user to bind to the directory
+ |
+
+bindPassword
+core/v1.SecretKeySelector
+ |
+
+ Secret with the password for the user to bind to the directory
+ |
+
+searchAttribute
+string
+ |
+
+ Attribute to match against the username
+ |
+
+searchFilter
+string
+ |
+
+ Search filter to use when doing the search+bind authentication
+ |
+
+
+
+
+
+
+## LDAPConfig
+
+**Appears in:**
+
+- [PostgresConfiguration](#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration)
+
+LDAPConfig contains the parameters needed for LDAP authentication
+
+
+Field | Description |
+
+server
+string
+ |
+
+ LDAP hostname or IP address
+ |
+
+port
+int
+ |
+
+ LDAP server port
+ |
+
+scheme
+LDAPScheme
+ |
+
+ LDAP schema to be used, possible options are ldap and ldaps
+ |
+
+bindAsAuth
+LDAPBindAsAuth
+ |
+
+ Bind as authentication configuration
+ |
+
+bindSearchAuth
+LDAPBindSearchAuth
+ |
+
+ Bind+Search authentication configuration
+ |
+
+tls
+bool
+ |
+
+ Set to 'true' to enable LDAP over TLS. 'false' is default
+ |
+
+
+
+
+
+
+## LDAPScheme
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [LDAPConfig](#postgresql-k8s-enterprisedb-io-v1-LDAPConfig)
+
+LDAPScheme defines the possible schemes for LDAP
+
+
+
+## ManagedConfiguration
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+ManagedConfiguration represents the portions of PostgreSQL that are managed
+by the instance manager
+
+
+Field | Description |
+
+roles
+[]RoleConfiguration
+ |
+
+ Database roles managed by the Cluster
+ |
+
+services
+ManagedServices
+ |
+
+ Services roles managed by the Cluster
+ |
+
+
+
+
+
+
+## ManagedRoles
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+ManagedRoles tracks the status of a cluster's managed roles
+
+
+Field | Description |
+
+byStatus
+map[RoleStatus][]string
+ |
+
+ ByStatus gives the list of roles in each state
+ |
+
+cannotReconcile
+map[string][]string
+ |
+
+ CannotReconcile lists roles that cannot be reconciled in PostgreSQL,
+with an explanation of the cause
+ |
+
+passwordStatus
+map[string]PasswordState
+ |
+
+ PasswordStatus gives the last transaction id and password secret version for each managed role
+ |
+
+
+
+
+
+
+## ManagedService
+
+**Appears in:**
+
+- [ManagedServices](#postgresql-k8s-enterprisedb-io-v1-ManagedServices)
+
+ManagedService represents a specific service managed by the cluster.
+It includes the type of service and its associated template specification.
+
+
+Field | Description |
+
+selectorType [Required]
+ServiceSelectorType
+ |
+
+ SelectorType specifies the type of selectors that the service will have.
+Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services.
+ |
+
+updateStrategy
+ServiceUpdateStrategy
+ |
+
+ UpdateStrategy describes how the service differences should be reconciled
+ |
+
+serviceTemplate [Required]
+ServiceTemplateSpec
+ |
+
+ ServiceTemplate is the template specification for the service.
+ |
+
+
+
+
+
+
+## ManagedServices
+
+**Appears in:**
+
+- [ManagedConfiguration](#postgresql-k8s-enterprisedb-io-v1-ManagedConfiguration)
+
+ManagedServices represents the services managed by the cluster.
+
+
+Field | Description |
+
+disabledDefaultServices
+[]ServiceSelectorType
+ |
+
+ DisabledDefaultServices is a list of service types that are disabled by default.
+Valid values are "r", and "ro", representing read, and read-only services.
+ |
+
+additional
+[]ManagedService
+ |
+
+ Additional is a list of additional managed services specified by the user.
+ |
+
+
+
+
+
+
+## Metadata
+
+**Appears in:**
+
+- [PodTemplateSpec](#postgresql-k8s-enterprisedb-io-v1-PodTemplateSpec)
+
+- [ServiceAccountTemplate](#postgresql-k8s-enterprisedb-io-v1-ServiceAccountTemplate)
+
+- [ServiceTemplateSpec](#postgresql-k8s-enterprisedb-io-v1-ServiceTemplateSpec)
+
+Metadata is a structure similar to the metav1.ObjectMeta, but still
+parseable by controller-gen to create a suitable CRD for the user.
+The comment of PodTemplateSpec has an explanation of why we are
+not using the core data types.
+
+
+Field | Description |
+
+name
+string
+ |
+
+ The name of the resource. Only supported for certain types
+ |
+
+labels
+map[string]string
+ |
+
+ Map of string keys and values that can be used to organize and categorize
+(scope and select) objects. May match selectors of replication controllers
+and services.
+More info: http://kubernetes.io/docs/user-guide/labels
+ |
+
+annotations
+map[string]string
+ |
+
+ Annotations is an unstructured key value map stored with a resource that may be
+set by external tools to store and retrieve arbitrary metadata. They are not
+queryable and should be preserved when modifying objects.
+More info: http://kubernetes.io/docs/user-guide/annotations
+ |
+
+
+
+
+
+
+## MonitoringConfiguration
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+MonitoringConfiguration is the type containing all the monitoring
+configuration for a certain cluster
+
+
+
+
+
+## NodeMaintenanceWindow
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+NodeMaintenanceWindow contains information that the operator
+will use while upgrading the underlying node.
+This option is only useful when the chosen storage prevents the Pods
+from being freely moved across nodes.
+
+
+Field | Description |
+
+reusePVC
+bool
+ |
+
+ Reuse the existing PVC (wait for the node to come
+up again) or not (recreate it elsewhere - when instances >1)
+ |
+
+inProgress
+bool
+ |
+
+ Is there a node maintenance activity in progress?
+ |
+
+
+
+
+
+
+## OnlineConfiguration
+
+**Appears in:**
+
+- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec)
+
+- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec)
+
+- [VolumeSnapshotConfiguration](#postgresql-k8s-enterprisedb-io-v1-VolumeSnapshotConfiguration)
+
+OnlineConfiguration contains the configuration parameters for the online volume snapshot
+
+
+Field | Description |
+
+waitForArchive
+bool
+ |
+
+ If false, the function will return immediately after the backup is completed,
+without waiting for WAL to be archived.
+This behavior is only useful with backup software that independently monitors WAL archiving.
+Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+enabled.
+On a standby, this means that it will wait only when archive_mode = always.
+If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+an immediate segment switch.
+ |
+
+immediateCheckpoint
+bool
+ |
+
+ Control whether the I/O workload for the backup initial checkpoint will
+be limited, according to the checkpoint_completion_target setting on
+the PostgreSQL server. If set to true, an immediate checkpoint will be
+used, meaning PostgreSQL will complete the checkpoint as soon as
+possible. false by default.
+ |
+
+
+
+
+
+
+## PasswordState
+
+**Appears in:**
+
+- [ManagedRoles](#postgresql-k8s-enterprisedb-io-v1-ManagedRoles)
+
+PasswordState represents the state of the password of a managed RoleConfiguration
+
+
+Field | Description |
+
+transactionID
+int64
+ |
+
+ the last transaction ID to affect the role definition in PostgreSQL
+ |
+
+resourceVersion
+string
+ |
+
+ the resource version of the password secret
+ |
+
+
+
+
+
+
+## PgBouncerIntegrationStatus
+
+**Appears in:**
+
+- [PoolerIntegrations](#postgresql-k8s-enterprisedb-io-v1-PoolerIntegrations)
+
+PgBouncerIntegrationStatus encapsulates the needed integration for the pgbouncer poolers referencing the cluster
+
+
+Field | Description |
+
+secrets
+[]string
+ |
+
+ No description provided. |
+
+
+
+
+
+
+## PgBouncerPoolMode
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [PgBouncerSpec](#postgresql-k8s-enterprisedb-io-v1-PgBouncerSpec)
+
+PgBouncerPoolMode is the mode of PgBouncer
+
+
+
+## PgBouncerSecrets
+
+**Appears in:**
+
+- [PoolerSecrets](#postgresql-k8s-enterprisedb-io-v1-PoolerSecrets)
+
+PgBouncerSecrets contains the versions of the secrets used
+by pgbouncer
+
+
+Field | Description |
+
+authQuery
+SecretVersion
+ |
+
+ The auth query secret version
+ |
+
+
+
+
+
+
+## PgBouncerSpec
+
+**Appears in:**
+
+- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec)
+
+PgBouncerSpec defines how to configure PgBouncer
+
+
+Field | Description |
+
+poolMode
+PgBouncerPoolMode
+ |
+
+ The pool mode. Default: session .
+ |
+
+authQuerySecret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+ |
+
+ The credentials of the user that need to be used for the authentication
+query. In case it is specified, also an AuthQuery
+(e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1")
+has to be specified and no automatic CNP Cluster integration will be triggered.
+ |
+
+authQuery
+string
+ |
+
+ The query that will be used to download the hash of the password
+of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)".
+In case it is specified, also an AuthQuerySecret has to be specified and
+no automatic CNP Cluster integration will be triggered.
+ |
+
+parameters
+map[string]string
+ |
+
+ Additional parameters to be passed to PgBouncer - please check
+the CNP documentation for a list of options you can configure
+ |
+
+pg_hba
+[]string
+ |
+
+ PostgreSQL Host Based Authentication rules (lines to be appended
+to the pg_hba.conf file)
+ |
+
+paused
+bool
+ |
+
+ When set to true , PgBouncer will disconnect from the PostgreSQL
+server, first waiting for all queries to complete, and pause all new
+client connections until this value is set to false (default). Internally,
+the operator calls PgBouncer's PAUSE and RESUME commands.
+ |
+
+
+
+
+
+
+## PluginConfiguration
+
+**Appears in:**
+
+PluginConfiguration specifies a plugin that need to be loaded for this
+cluster to be reconciled
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the plugin name
+ |
+
+enabled
+bool
+ |
+
+ Enabled is true if this plugin will be used
+ |
+
+parameters
+map[string]string
+ |
+
+ Parameters is the configuration of the plugin
+ |
+
+
+
+
+
+
+## PluginConfigurationList
+
+(Alias of `[]github.com/EnterpriseDB/cloud-native-postgres/api/v1.PluginConfiguration`)
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+PluginConfigurationList represent a set of plugin with their
+configuration parameters
+
+
+
+## PluginStatus
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+PluginStatus is the status of a loaded plugin
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the name of the plugin
+ |
+
+version [Required]
+string
+ |
+
+ Version is the version of the plugin loaded by the
+latest reconciliation loop
+ |
+
+capabilities
+[]string
+ |
+
+ Capabilities are the list of capabilities of the
+plugin
+ |
+
+operatorCapabilities
+[]string
+ |
+
+ OperatorCapabilities are the list of capabilities of the
+plugin regarding the reconciler
+ |
+
+walCapabilities
+[]string
+ |
+
+ WALCapabilities are the list of capabilities of the
+plugin regarding the WAL management
+ |
+
+backupCapabilities
+[]string
+ |
+
+ BackupCapabilities are the list of capabilities of the
+plugin regarding the Backup management
+ |
+
+status
+string
+ |
+
+ Status contain the status reported by the plugin through the SetStatusInCluster interface
+ |
+
+
+
+
+
+
+## PodTemplateSpec
+
+**Appears in:**
+
+- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec)
+
+PodTemplateSpec is a structure allowing the user to set
+a template for Pod generation.
+Unfortunately we can't use the corev1.PodTemplateSpec
+type because the generated CRD won't have the field for the
+metadata section.
+References:
+https://github.com/kubernetes-sigs/controller-tools/issues/385
+https://github.com/kubernetes-sigs/controller-tools/issues/448
+https://github.com/prometheus-operator/prometheus-operator/issues/3041
+
+
+Field | Description |
+
+metadata
+Metadata
+ |
+
+ Standard object's metadata.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ |
+
+spec
+core/v1.PodSpec
+ |
+
+ Specification of the desired behavior of the pod.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+
+
+## PodTopologyLabels
+
+(Alias of `map[string]string`)
+
+**Appears in:**
+
+- [Topology](#postgresql-k8s-enterprisedb-io-v1-Topology)
+
+PodTopologyLabels represent the topology of a Pod. map[labelName]labelValue
+
+
+
+## PoolerIntegrations
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+PoolerIntegrations encapsulates the needed integration for the poolers referencing the cluster
+
+
+
+
+
+## PoolerMonitoringConfiguration
+
+**Appears in:**
+
+- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec)
+
+PoolerMonitoringConfiguration is the type containing all the monitoring
+configuration for a certain Pooler.
+Mirrors the Cluster's MonitoringConfiguration but without the custom queries
+part for now.
+
+
+
+
+
+## PoolerSecrets
+
+**Appears in:**
+
+- [PoolerStatus](#postgresql-k8s-enterprisedb-io-v1-PoolerStatus)
+
+PoolerSecrets contains the versions of all the secrets used
+
+
+Field | Description |
+
+serverTLS
+SecretVersion
+ |
+
+ The server TLS secret version
+ |
+
+serverCA
+SecretVersion
+ |
+
+ The server CA secret version
+ |
+
+clientCA
+SecretVersion
+ |
+
+ The client CA secret version
+ |
+
+pgBouncerSecrets
+PgBouncerSecrets
+ |
+
+ The version of the secrets used by PgBouncer
+ |
+
+
+
+
+
+
+## PoolerSpec
+
+**Appears in:**
+
+- [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler)
+
+PoolerSpec defines the desired state of Pooler
+
+
+Field | Description |
+
+cluster [Required]
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+ |
+
+ This is the cluster reference on which the Pooler will work.
+Pooler name should never match with any cluster name within the same namespace.
+ |
+
+type
+PoolerType
+ |
+
+ Type of service to forward traffic to. Default: rw .
+ |
+
+instances
+int32
+ |
+
+ The number of replicas we want. Default: 1.
+ |
+
+template
+PodTemplateSpec
+ |
+
+ The template of the Pod to be created
+ |
+
+pgbouncer [Required]
+PgBouncerSpec
+ |
+
+ The PgBouncer configuration
+ |
+
+deploymentStrategy
+apps/v1.DeploymentStrategy
+ |
+
+ The deployment strategy to use for pgbouncer to replace existing pods with new ones
+ |
+
+monitoring
+PoolerMonitoringConfiguration
+ |
+
+ The configuration of the monitoring infrastructure of this pooler.
+ |
+
+serviceTemplate
+ServiceTemplateSpec
+ |
+
+ Template for the Service to be created
+ |
+
+
+
+
+
+
+## PoolerStatus
+
+**Appears in:**
+
+- [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler)
+
+PoolerStatus defines the observed state of Pooler
+
+
+Field | Description |
+
+secrets
+PoolerSecrets
+ |
+
+ The resource version of the config object
+ |
+
+instances
+int32
+ |
+
+ The number of pods trying to be scheduled
+ |
+
+
+
+
+
+
+## PoolerType
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec)
+
+PoolerType is the type of the connection pool, meaning the service
+we are targeting. Allowed values are rw
and ro
.
+
+
+
+## PostgresConfiguration
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+PostgresConfiguration defines the PostgreSQL configuration
+
+
+Field | Description |
+
+parameters
+map[string]string
+ |
+
+ PostgreSQL configuration options (postgresql.conf)
+ |
+
+synchronous
+SynchronousReplicaConfiguration
+ |
+
+ Configuration of the PostgreSQL synchronous replication feature
+ |
+
+pg_hba
+[]string
+ |
+
+ PostgreSQL Host Based Authentication rules (lines to be appended
+to the pg_hba.conf file)
+ |
+
+pg_ident
+[]string
+ |
+
+ PostgreSQL User Name Maps rules (lines to be appended
+to the pg_ident.conf file)
+ |
+
+epas
+EPASConfiguration
+ |
+
+ EDB Postgres Advanced Server specific configurations
+ |
+
+syncReplicaElectionConstraint
+SyncReplicaElectionConstraints
+ |
+
+ Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be
+set up.
+ |
+
+shared_preload_libraries
+[]string
+ |
+
+ Lists of shared preload libraries to add to the default ones
+ |
+
+ldap
+LDAPConfig
+ |
+
+ Options to specify LDAP configuration
+ |
+
+promotionTimeout
+int32
+ |
+
+ Specifies the maximum number of seconds to wait when promoting an instance to primary.
+Default value is 40000000, greater than one year in seconds,
+big enough to simulate an infinite timeout
+ |
+
+enableAlterSystem
+bool
+ |
+
+ If this parameter is true, the user will be able to invoke ALTER SYSTEM
+on this EDB Postgres for Kubernetes Cluster.
+This should only be used for debugging and troubleshooting.
+Defaults to false.
+ |
+
+
+
+
+
+
+## PrimaryUpdateMethod
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+PrimaryUpdateMethod contains the method to use when upgrading
+the primary server of the cluster as part of rolling updates
+
+
+
+## PrimaryUpdateStrategy
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+PrimaryUpdateStrategy contains the strategy to follow when upgrading
+the primary server of the cluster as part of rolling updates
+
+
+
+## RecoveryTarget
+
+**Appears in:**
+
+- [BootstrapRecovery](#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery)
+
+RecoveryTarget allows to configure the moment where the recovery process
+will stop. All the target options except TargetTLI are mutually exclusive.
+
+
+Field | Description |
+
+backupID
+string
+ |
+
+ The ID of the backup from which to start the recovery process.
+If empty (default) the operator will automatically detect the backup
+based on targetTime or targetLSN if specified. Otherwise use the
+latest available backup in chronological order.
+ |
+
+targetTLI
+string
+ |
+
+ The target timeline ("latest" or a positive integer)
+ |
+
+targetXID
+string
+ |
+
+ The target transaction ID
+ |
+
+targetName
+string
+ |
+
+ The target name (to be previously created
+with pg_create_restore_point )
+ |
+
+targetLSN
+string
+ |
+
+ The target LSN (Log Sequence Number)
+ |
+
+targetTime
+string
+ |
+
+ The target time as a timestamp in the RFC3339 standard
+ |
+
+targetImmediate
+bool
+ |
+
+ End recovery as soon as a consistent state is reached
+ |
+
+exclusive
+bool
+ |
+
+ Set the target to be exclusive. If omitted, defaults to false, so that
+in Postgres, recovery_target_inclusive will be true
+ |
+
+
+
+
+
+
+## ReplicaClusterConfiguration
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+ReplicaClusterConfiguration encapsulates the configuration of a replica
+cluster
+
+
+Field | Description |
+
+self
+string
+ |
+
+ Self defines the name of this cluster. It is used to determine if this is a primary
+or a replica cluster, comparing it with primary
+ |
+
+primary
+string
+ |
+
+ Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the
+topology specified in externalClusters
+ |
+
+source [Required]
+string
+ |
+
+ The name of the external cluster which is the replication origin
+ |
+
+enabled
+bool
+ |
+
+ If replica mode is enabled, this cluster will be a replica of an
+existing cluster. Replica cluster can be created from a recovery
+object store or via streaming through pg_basebackup.
+Refer to the Replica clusters page of the documentation for more information.
+ |
+
+promotionToken
+string
+ |
+
+ A demotion token generated by an external cluster used to
+check if the promotion requirements are met.
+ |
+
+minApplyDelay
+meta/v1.Duration
+ |
+
+ When replica mode is enabled, this parameter allows you to replay
+transactions only when the system time is at least the configured
+time past the commit time. This provides an opportunity to correct
+data loss errors. Note that when this parameter is set, a promotion
+token cannot be used.
+ |
+
+
+
+
+
+
+## ReplicationSlotsConfiguration
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+ReplicationSlotsConfiguration encapsulates the configuration
+of replication slots
+
+
+Field | Description |
+
+highAvailability
+ReplicationSlotsHAConfiguration
+ |
+
+ Replication slots for high availability configuration
+ |
+
+updateInterval
+int
+ |
+
+ Standby will update the status of the local replication slots
+every updateInterval seconds (default 30).
+ |
+
+synchronizeReplicas
+SynchronizeReplicasConfiguration
+ |
+
+ Configures the synchronization of the user defined physical replication slots
+ |
+
+
+
+
+
+
+## ReplicationSlotsHAConfiguration
+
+**Appears in:**
+
+- [ReplicationSlotsConfiguration](#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration)
+
+ReplicationSlotsHAConfiguration encapsulates the configuration
+of the replication slots that are automatically managed by
+the operator to control the streaming replication connections
+with the standby instances for high availability (HA) purposes.
+Replication slots are a PostgreSQL feature that makes sure
+that PostgreSQL automatically keeps WAL files in the primary
+when a streaming client (in this specific case a replica that
+is part of the HA cluster) gets disconnected.
+
+
+Field | Description |
+
+enabled
+bool
+ |
+
+ If enabled (default), the operator will automatically manage replication slots
+on the primary instance and use them in streaming replication
+connections with all the standby instances that are part of the HA
+cluster. If disabled, the operator will not take advantage
+of replication slots in streaming connections with the replicas.
+This feature also controls replication slots in replica cluster,
+from the designated primary to its cascading replicas.
+ |
+
+slotPrefix
+string
+ |
+
+ Prefix for replication slots managed by the operator for HA.
+It may only contain lower case letters, numbers, and the underscore character.
+This can only be set at creation time. By default set to _cnp_ .
+ |
+
+
+
+
+
+
+## RoleConfiguration
+
+**Appears in:**
+
+- [ManagedConfiguration](#postgresql-k8s-enterprisedb-io-v1-ManagedConfiguration)
+
+RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role
+with the additional field Ensure specifying whether to ensure the presence or
+absence of the role in the database
+The defaults of the CREATE ROLE command are applied
+Reference: https://www.postgresql.org/docs/current/sql-createrole.html
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name of the role
+ |
+
+comment
+string
+ |
+
+ Description of the role
+ |
+
+ensure
+EnsureOption
+ |
+
+ Ensure the role is present or absent - defaults to "present"
+ |
+
+passwordSecret
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+ |
+
+ Secret containing the password of the role (if present)
+If null, the password will be ignored unless DisablePassword is set
+ |
+
+connectionLimit
+int64
+ |
+
+ If the role can log in, this specifies how many concurrent
+connections the role can make. -1 (the default) means no limit.
+ |
+
+validUntil
+meta/v1.Time
+ |
+
+ Date and time after which the role's password is no longer valid.
+When omitted, the password will never expire (default).
+ |
+
+inRoles
+[]string
+ |
+
+ List of one or more existing roles to which this role will be
+immediately added as a new member. Default empty.
+ |
+
+inherit
+bool
+ |
+
+ Whether a role "inherits" the privileges of roles it is a member of.
+Defaults is true .
+ |
+
+disablePassword
+bool
+ |
+
+ DisablePassword indicates that a role's password should be set to NULL in Postgres
+ |
+
+superuser
+bool
+ |
+
+ Whether the role is a superuser who can override all access
+restrictions within the database - superuser status is dangerous and
+should be used only when really needed. You must yourself be a
+superuser to create a new superuser. Defaults is false .
+ |
+
+createdb
+bool
+ |
+
+ When set to true , the role being defined will be allowed to create
+new databases. Specifying false (default) will deny a role the
+ability to create databases.
+ |
+
+createrole
+bool
+ |
+
+ Whether the role will be permitted to create, alter, drop, comment
+on, change the security label for, and grant or revoke membership in
+other roles. Default is false .
+ |
+
+login
+bool
+ |
+
+ Whether the role is allowed to log in. A role having the login
+attribute can be thought of as a user. Roles without this attribute
+are useful for managing database privileges, but are not users in
+the usual sense of the word. Default is false .
+ |
+
+replication
+bool
+ |
+
+ Whether a role is a replication role. A role must have this
+attribute (or be a superuser) in order to be able to connect to the
+server in replication mode (physical or logical replication) and in
+order to be able to create or drop replication slots. A role having
+the replication attribute is a very highly privileged role, and
+should only be used on roles actually used for replication. Default
+is false .
+ |
+
+bypassrls
+bool
+ |
+
+ Whether a role bypasses every row-level security (RLS) policy.
+Default is false .
+ |
+
+
+
+
+
+
+## SQLRefs
+
+**Appears in:**
+
+- [BootstrapInitDB](#postgresql-k8s-enterprisedb-io-v1-BootstrapInitDB)
+
+SQLRefs holds references to ConfigMaps or Secrets
+containing SQL files. The references are processed in a specific order:
+first, all Secrets are processed, followed by all ConfigMaps.
+Within each group, the processing order follows the sequence specified
+in their respective arrays.
+
+
+
+
+
+## ScheduledBackupSpec
+
+**Appears in:**
+
+- [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup)
+
+ScheduledBackupSpec defines the desired state of ScheduledBackup
+
+
+Field | Description |
+
+suspend
+bool
+ |
+
+ If this backup is suspended or not
+ |
+
+immediate
+bool
+ |
+
+ If the first backup has to be immediately start after creation or not
+ |
+
+schedule [Required]
+string
+ |
+
+ The schedule does not follow the same format used in Kubernetes CronJobs
+as it includes an additional seconds specifier,
+see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
+ |
+
+cluster [Required]
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+ |
+
+ The cluster to backup
+ |
+
+backupOwnerReference
+string
+ |
+
+ Indicates which ownerReference should be put inside the created backup resources.
+
+- none: no owner reference for created backup objects (same behavior as before the field was introduced)
+- self: sets the Scheduled backup object as owner of the backup
+- cluster: set the cluster as owner of the backup
+
+ |
+
+target
+BackupTarget
+ |
+
+ The policy to decide which instance should perform this backup. If empty,
+it defaults to cluster.spec.backup.target .
+Available options are empty string, primary and prefer-standby .
+primary to have backups run always on primary instances,
+prefer-standby to have backups run preferably on the most updated
+standby, if available.
+ |
+
+method
+BackupMethod
+ |
+
+ The backup method to be used, possible options are barmanObjectStore ,
+volumeSnapshot or plugin . Defaults to: barmanObjectStore .
+ |
+
+pluginConfiguration
+BackupPluginConfiguration
+ |
+
+ Configuration parameters passed to the plugin managing this backup
+ |
+
+online
+bool
+ |
+
+ Whether the default type of backup with volume snapshots is
+online/hot (true , default) or offline/cold (false )
+Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ |
+
+onlineConfiguration
+OnlineConfiguration
+ |
+
+ Configuration parameters to control the online/hot backup with volume snapshots
+Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ |
+
+
+
+
+
+
+## ScheduledBackupStatus
+
+**Appears in:**
+
+- [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup)
+
+ScheduledBackupStatus defines the observed state of ScheduledBackup
+
+
+Field | Description |
+
+lastCheckTime
+meta/v1.Time
+ |
+
+ The latest time the schedule
+ |
+
+lastScheduleTime
+meta/v1.Time
+ |
+
+ Information when was the last time that backup was successfully scheduled.
+ |
+
+nextScheduleTime
+meta/v1.Time
+ |
+
+ Next time we will run a backup
+ |
+
+
+
+
+
+
+## SecretVersion
+
+**Appears in:**
+
+- [PgBouncerSecrets](#postgresql-k8s-enterprisedb-io-v1-PgBouncerSecrets)
+
+- [PoolerSecrets](#postgresql-k8s-enterprisedb-io-v1-PoolerSecrets)
+
+SecretVersion contains a secret name and its ResourceVersion
+
+
+Field | Description |
+
+name
+string
+ |
+
+ The name of the secret
+ |
+
+version
+string
+ |
+
+ The ResourceVersion of the secret
+ |
+
+
+
+
+
+
+## SecretsResourceVersion
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+SecretsResourceVersion is the resource versions of the secrets
+managed by the operator
+
+
+Field | Description |
+
+superuserSecretVersion
+string
+ |
+
+ The resource version of the "postgres" user secret
+ |
+
+replicationSecretVersion
+string
+ |
+
+ The resource version of the "streaming_replica" user secret
+ |
+
+applicationSecretVersion
+string
+ |
+
+ The resource version of the "app" user secret
+ |
+
+managedRoleSecretVersion
+map[string]string
+ |
+
+ The resource versions of the managed roles secrets
+ |
+
+caSecretVersion
+string
+ |
+
+ Unused. Retained for compatibility with old versions.
+ |
+
+clientCaSecretVersion
+string
+ |
+
+ The resource version of the PostgreSQL client-side CA secret version
+ |
+
+serverCaSecretVersion
+string
+ |
+
+ The resource version of the PostgreSQL server-side CA secret version
+ |
+
+serverSecretVersion
+string
+ |
+
+ The resource version of the PostgreSQL server-side secret version
+ |
+
+barmanEndpointCA
+string
+ |
+
+ The resource version of the Barman Endpoint CA if provided
+ |
+
+externalClusterSecretVersion
+map[string]string
+ |
+
+ The resource versions of the external cluster secrets
+ |
+
+metrics
+map[string]string
+ |
+
+ A map with the versions of all the secrets used to pass metrics.
+Map keys are the secret names, map values are the versions
+ |
+
+
+
+
+
+
+## ServiceAccountTemplate
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+ServiceAccountTemplate contains the template needed to generate the service accounts
+
+
+Field | Description |
+
+metadata [Required]
+Metadata
+ |
+
+ Metadata are the metadata to be used for the generated
+service account
+ |
+
+
+
+
+
+
+## ServiceSelectorType
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [ManagedService](#postgresql-k8s-enterprisedb-io-v1-ManagedService)
+
+- [ManagedServices](#postgresql-k8s-enterprisedb-io-v1-ManagedServices)
+
+ServiceSelectorType describes a valid value for generating the service selectors.
+It indicates which type of service the selector applies to, such as read-write, read, or read-only
+
+
+
+## ServiceTemplateSpec
+
+**Appears in:**
+
+- [ManagedService](#postgresql-k8s-enterprisedb-io-v1-ManagedService)
+
+- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec)
+
+ServiceTemplateSpec is a structure allowing the user to set
+a template for Service generation.
+
+
+Field | Description |
+
+metadata
+Metadata
+ |
+
+ Standard object's metadata.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ |
+
+spec
+core/v1.ServiceSpec
+ |
+
+ Specification of the desired behavior of the service.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+
+
+## ServiceUpdateStrategy
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [ManagedService](#postgresql-k8s-enterprisedb-io-v1-ManagedService)
+
+ServiceUpdateStrategy describes how the changes to the managed service should be handled
+
+
+
+## SnapshotOwnerReference
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [VolumeSnapshotConfiguration](#postgresql-k8s-enterprisedb-io-v1-VolumeSnapshotConfiguration)
+
+SnapshotOwnerReference defines the reference type for the owner of the snapshot.
+This specifies which owner the processed resources should relate to.
+
+
+
+## SnapshotType
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [Import](#postgresql-k8s-enterprisedb-io-v1-Import)
+
+SnapshotType is a type of allowed import
+
+
+
+## StorageConfiguration
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+- [TablespaceConfiguration](#postgresql-k8s-enterprisedb-io-v1-TablespaceConfiguration)
+
+StorageConfiguration is the configuration used to create and reconcile PVCs,
+usable for WAL volumes, PGDATA volumes, or tablespaces
+
+
+Field | Description |
+
+storageClass
+string
+ |
+
+ StorageClass to use for PVCs. Applied after
+evaluating the PVC template, if available.
+If not specified, the generated PVCs will use the
+default storage class
+ |
+
+size
+string
+ |
+
+ Size of the storage. Required if not already specified in the PVC template.
+Changes to this field are automatically reapplied to the created PVCs.
+Size cannot be decreased.
+ |
+
+resizeInUseVolumes
+bool
+ |
+
+ Resize existent PVCs, defaults to true
+ |
+
+pvcTemplate
+core/v1.PersistentVolumeClaimSpec
+ |
+
+ Template to be used to generate the Persistent Volume Claim
+ |
+
+
+
+
+
+
+## SwitchReplicaClusterStatus
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+SwitchReplicaClusterStatus contains all the statuses regarding the switch of a cluster to a replica cluster
+
+
+Field | Description |
+
+inProgress
+bool
+ |
+
+ InProgress indicates if there is an ongoing procedure of switching a cluster to a replica cluster.
+ |
+
+
+
+
+
+
+## SyncReplicaElectionConstraints
+
+**Appears in:**
+
+- [PostgresConfiguration](#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration)
+
+SyncReplicaElectionConstraints contains the constraints for sync replicas election.
+For anti-affinity parameters two instances are considered in the same location
+if all the labels values match.
+In future synchronous replica election restriction by name will be supported.
+
+
+Field | Description |
+
+nodeLabelsAntiAffinity
+[]string
+ |
+
+ A list of node labels values to extract and compare to evaluate if the pods reside in the same topology or not
+ |
+
+enabled [Required]
+bool
+ |
+
+ This flag enables the constraints for sync replicas
+ |
+
+
+
+
+
+
+## SynchronizeReplicasConfiguration
+
+**Appears in:**
+
+- [ReplicationSlotsConfiguration](#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration)
+
+SynchronizeReplicasConfiguration contains the configuration for the synchronization of user defined
+physical replication slots
+
+
+Field | Description |
+
+enabled [Required]
+bool
+ |
+
+ When set to true, every replication slot that is on the primary is synchronized on each standby
+ |
+
+excludePatterns
+[]string
+ |
+
+ List of regular expression patterns to match the names of replication slots to be excluded (by default empty)
+ |
+
+
+
+
+
+
+## SynchronousReplicaConfiguration
+
+**Appears in:**
+
+- [PostgresConfiguration](#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration)
+
+SynchronousReplicaConfiguration contains the configuration of the
+PostgreSQL synchronous replication feature.
+Important: at this moment, also .spec.minSyncReplicas
and .spec.maxSyncReplicas
+need to be considered.
+
+
+Field | Description |
+
+method [Required]
+SynchronousReplicaConfigurationMethod
+ |
+
+ Method to select synchronous replication standbys from the listed
+servers, accepting 'any' (quorum-based synchronous replication) or
+'first' (priority-based synchronous replication) as values.
+ |
+
+number [Required]
+int
+ |
+
+ Specifies the number of synchronous standby servers that
+transactions must wait for responses from.
+ |
+
+maxStandbyNamesFromCluster
+int
+ |
+
+ Specifies the maximum number of local cluster pods that can be
+automatically included in the synchronous_standby_names option in
+PostgreSQL.
+ |
+
+standbyNamesPre
+[]string
+ |
+
+ A user-defined list of application names to be added to
+synchronous_standby_names before local cluster pods (the order is
+only useful for priority-based synchronous replication).
+ |
+
+standbyNamesPost
+[]string
+ |
+
+ A user-defined list of application names to be added to
+synchronous_standby_names after local cluster pods (the order is
+only useful for priority-based synchronous replication).
+ |
+
+dataDurability
+DataDurabilityLevel
+ |
+
+ If set to "required", data durability is strictly enforced. Write operations
+with synchronous commit settings (on , remote_write , or remote_apply ) will
+block if there are insufficient healthy replicas, ensuring data persistence.
+If set to "preferred", data durability is maintained when healthy replicas
+are available, but the required number of instances will adjust dynamically
+if replicas become unavailable. This setting relaxes strict durability enforcement
+to allow for operational continuity. This setting is only applicable if both
+standbyNamesPre and standbyNamesPost are unset (empty).
+ |
+
+
+
+
+
+
+## SynchronousReplicaConfigurationMethod
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [SynchronousReplicaConfiguration](#postgresql-k8s-enterprisedb-io-v1-SynchronousReplicaConfiguration)
+
+SynchronousReplicaConfigurationMethod configures whether to use
+quorum based replication or a priority list
+
+
+
+## TDEConfiguration
+
+**Appears in:**
+
+- [EPASConfiguration](#postgresql-k8s-enterprisedb-io-v1-EPASConfiguration)
+
+TDEConfiguration contains the Transparent Data Encryption configuration
+
+
+Field | Description |
+
+enabled
+bool
+ |
+
+ True if we want to have TDE enabled
+ |
+
+secretKeyRef
+core/v1.SecretKeySelector
+ |
+
+ Reference to the secret that contains the encryption key
+ |
+
+wrapCommand
+core/v1.SecretKeySelector
+ |
+
+ WrapCommand is the encrypt command provided by the user
+ |
+
+unwrapCommand
+core/v1.SecretKeySelector
+ |
+
+ UnwrapCommand is the decryption command provided by the user
+ |
+
+passphraseCommand
+core/v1.SecretKeySelector
+ |
+
+ PassphraseCommand is the command executed to get the passphrase that will be
+passed to the OpenSSL command to encrypt and decrypt
+ |
+
+
+
+
+
+
+## TablespaceConfiguration
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+TablespaceConfiguration is the configuration of a tablespace, and includes
+the storage specification for the tablespace
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ The name of the tablespace
+ |
+
+storage [Required]
+StorageConfiguration
+ |
+
+ The storage configuration for the tablespace
+ |
+
+owner
+DatabaseRoleRef
+ |
+
+ Owner is the PostgreSQL user owning the tablespace
+ |
+
+temporary
+bool
+ |
+
+ When set to true, the tablespace will be added as a temp_tablespaces
+entry in PostgreSQL, and will be available to automatically house temp
+database objects, or other temporary files. Please refer to PostgreSQL
+documentation for more information on the temp_tablespaces GUC.
+ |
+
+
+
+
+
+
+## TablespaceState
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+TablespaceState represents the state of a tablespace in a cluster
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the name of the tablespace
+ |
+
+owner
+string
+ |
+
+ Owner is the PostgreSQL user owning the tablespace
+ |
+
+state [Required]
+TablespaceStatus
+ |
+
+ State is the latest reconciliation state
+ |
+
+error
+string
+ |
+
+ Error is the reconciliation error, if any
+ |
+
+
+
+
+
+
+## TablespaceStatus
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [TablespaceState](#postgresql-k8s-enterprisedb-io-v1-TablespaceState)
+
+TablespaceStatus represents the status of a tablespace in the cluster
+
+
+
+## Topology
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+Topology contains the cluster topology
+
+
+Field | Description |
+
+instances
+map[PodName]PodTopologyLabels
+ |
+
+ Instances contains the pod topology of the instances
+ |
+
+nodesUsed
+int32
+ |
+
+ NodesUsed represents the count of distinct nodes accommodating the instances.
+A value of '1' suggests that all instances are hosted on a single node,
+implying the absence of High Availability (HA). Ideally, this value should
+be the same as the number of instances in the Postgres HA cluster, implying
+shared nothing architecture on the compute side.
+ |
+
+successfullyExtracted
+bool
+ |
+
+ SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors
+in synchronous replica election in case of failures
+ |
+
+
+
+
+
+
+## VolumeSnapshotConfiguration
+
+**Appears in:**
+
+- [BackupConfiguration](#postgresql-k8s-enterprisedb-io-v1-BackupConfiguration)
+
+VolumeSnapshotConfiguration represents the configuration for the execution of snapshot backups.
+
+
+Field | Description |
+
+labels
+map[string]string
+ |
+
+ Labels are key-value pairs that will be added to .metadata.labels snapshot resources.
+ |
+
+annotations
+map[string]string
+ |
+
+ Annotations key-value pairs that will be added to .metadata.annotations snapshot resources.
+ |
+
+className
+string
+ |
+
+ ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim.
+It is the default class for the other types if no specific class is present
+ |
+
+walClassName
+string
+ |
+
+ WalClassName specifies the Snapshot Class to be used for the PG_WAL PersistentVolumeClaim.
+ |
+
+tablespaceClassName
+map[string]string
+ |
+
+ TablespaceClassName specifies the Snapshot Class to be used for the tablespaces.
+defaults to the PGDATA Snapshot Class, if set
+ |
+
+snapshotOwnerReference
+SnapshotOwnerReference
+ |
+
+ SnapshotOwnerReference indicates the type of owner reference the snapshot should have
+ |
+
+online
+bool
+ |
+
+ Whether the default type of backup with volume snapshots is
+online/hot (true , default) or offline/cold (false )
+ |
+
+onlineConfiguration
+OnlineConfiguration
+ |
+
+ Configuration parameters to control the online/hot backup with volume snapshots
+ |
+
+
+
diff --git a/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1.mdx b/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1/v1.24.2.mdx
similarity index 94%
rename from product_docs/docs/postgres_for_kubernetes/1/pg4k.v1.mdx
rename to product_docs/docs/postgres_for_kubernetes/1/pg4k.v1/v1.24.2.mdx
index 311de8d01b8..c1993cff8e1 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/pg4k.v1/v1.24.2.mdx
@@ -1,6 +1,8 @@
---
-title: 'API Reference'
-originalFilePath: 'src/pg4k.v1.md'
+title: API Reference - v1.24.2
+navTitle: v1.24.2
+pdfExclude: 'true'
+
---
Package v1 contains API Schema definitions for the postgresql v1 API group
@@ -10,6 +12,7 @@ originalFilePath: 'src/pg4k.v1.md'
- [Backup](#postgresql-k8s-enterprisedb-io-v1-Backup)
- [Cluster](#postgresql-k8s-enterprisedb-io-v1-Cluster)
- [ClusterImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ClusterImageCatalog)
+- [Database](#postgresql-k8s-enterprisedb-io-v1-Database)
- [ImageCatalog](#postgresql-k8s-enterprisedb-io-v1-ImageCatalog)
- [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler)
- [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup)
@@ -116,6 +119,43 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
+
+
+## Database
+
+Database is the Schema for the databases API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | Database |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+DatabaseSpec
+ |
+
+ Specification of the desired Database.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+status
+DatabaseStatus
+ |
+
+ Most recently observed status of the Database. This data may not be up to
+date. Populated by the system. Read-only.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
## ImageCatalog
@@ -478,7 +518,7 @@ plugin for this backup
Type is tho role of the snapshot in the cluster, such as PG_DATA, PG_WAL and PG_TABLESPACE
-tablespaceName [Required]
+ |
tablespaceName
string
|
@@ -791,13 +831,20 @@ parameter is omitted
The backup method being used
|
-online [Required]
+ |
online
bool
|
Whether the backup was online/hot (true ) or offline/cold (false )
|
+pluginMetadata
+map[string]string
+ |
+
+ A map containing the plugin metadata
+ |
+
@@ -1226,6 +1273,7 @@ created from scratch
with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
Contains:
+
ca.crt
: CA that should be used to validate the server certificate,
@@ -1263,6 +1311,7 @@ created using the provided CA.
with a self-signed CA and will be used to generate all the client certificates.
Contains:
+
ca.crt
: CA that should be used to validate the client certificates,
@@ -1618,7 +1667,7 @@ https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
for more information.
-ephemeralVolumesSizeLimit [Required]
+ |
ephemeralVolumesSizeLimit
EphemeralVolumesSizeLimitConfiguration
|
@@ -1693,7 +1742,7 @@ the license agreement that comes with the operator.
|
externalClusters
-[]ExternalCluster
+ExternalClusterList
|
The list of external clusters which are used in the configuration
@@ -1766,7 +1815,7 @@ advisable for any PostgreSQL cluster employed for
development/staging purposes.
|
-plugins [Required]
+ |
plugins
PluginConfigurationList
|
@@ -1868,7 +1917,7 @@ any plugin to be loaded with the corresponding configuration
during a switchover or a failover
|
-lastPromotionToken [Required]
+ |
lastPromotionToken
string
|
@@ -2110,7 +2159,7 @@ This field is reported when .spec.failoverDelay is populated or dur
Image contains the image name used by the pods
|
-pluginStatus [Required]
+ |
pluginStatus
[]PluginStatus
|
@@ -2162,6 +2211,19 @@ Map keys are the config map names, map values are the versions
+
+
+## DataDurabilityLevel
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [SynchronousReplicaConfiguration](#postgresql-k8s-enterprisedb-io-v1-SynchronousReplicaConfiguration)
+
+ DataDurabilityLevel specifies how strictly to enforce synchronous replication
+when cluster instances are unavailable. Options are required or preferred .
+
## DataSource
@@ -2200,6 +2262,18 @@ PostgreSQL cluster from an existing storage
+
+
+## DatabaseReclaimPolicy
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [DatabaseSpec](#postgresql-k8s-enterprisedb-io-v1-DatabaseSpec)
+
+DatabaseReclaimPolicy describes a policy for end-of-life maintenance of databases.
+
## DatabaseRoleRef
@@ -2222,6 +2296,194 @@ PostgreSQL cluster from an existing storage
+
+
+## DatabaseSpec
+
+**Appears in:**
+
+- [Database](#postgresql-k8s-enterprisedb-io-v1-Database)
+
+DatabaseSpec is the specification of a Postgresql Database
+
+
+Field | Description |
+
+cluster [Required]
+core/v1.LocalObjectReference
+ |
+
+ The corresponding cluster
+ |
+
+ensure
+EnsureOption
+ |
+
+ Ensure the PostgreSQL database is present or absent - defaults to "present"
+ |
+
+name [Required]
+string
+ |
+
+ The name inside PostgreSQL
+ |
+
+owner [Required]
+string
+ |
+
+ The owner
+ |
+
+template
+string
+ |
+
+ The name of the template from which to create the new database
+ |
+
+encoding
+string
+ |
+
+ The encoding (cannot be changed)
+ |
+
+locale
+string
+ |
+
+ The locale (cannot be changed)
+ |
+
+locale_provider
+string
+ |
+
+ The locale provider (cannot be changed)
+ |
+
+lc_collate
+string
+ |
+
+ The LC_COLLATE (cannot be changed)
+ |
+
+lc_ctype
+string
+ |
+
+ The LC_CTYPE (cannot be changed)
+ |
+
+icu_locale
+string
+ |
+
+ The ICU_LOCALE (cannot be changed)
+ |
+
+icu_rules
+string
+ |
+
+ The ICU_RULES (cannot be changed)
+ |
+
+builtin_locale
+string
+ |
+
+ The BUILTIN_LOCALE (cannot be changed)
+ |
+
+collation_version
+string
+ |
+
+ The COLLATION_VERSION (cannot be changed)
+ |
+
+isTemplate
+bool
+ |
+
+ True when the database is a template
+ |
+
+allowConnections
+bool
+ |
+
+ True when connections to this database are allowed
+ |
+
+connectionLimit
+int
+ |
+
+ Connection limit, -1 means no limit and -2 means the
+database is not valid
+ |
+
+tablespace
+string
+ |
+
+ The default tablespace of this database
+ |
+
+databaseReclaimPolicy
+DatabaseReclaimPolicy
+ |
+
+ The policy for end-of-life maintenance of this database
+ |
+
+
+
+
+
+
+## DatabaseStatus
+
+**Appears in:**
+
+- [Database](#postgresql-k8s-enterprisedb-io-v1-Database)
+
+DatabaseStatus defines the observed state of Database
+
+
+Field | Description |
+
+observedGeneration
+int64
+ |
+
+ A sequence number representing the latest
+desired state that was synchronized
+ |
+
+applied
+bool
+ |
+
+ Applied is true if the database was reconciled correctly
+ |
+
+message
+string
+ |
+
+ Message is the reconciliation output message
+ |
+
+
+
+
## EPASConfiguration
@@ -2288,6 +2550,8 @@ PostgreSQL cluster from an existing storage
**Appears in:**
+- [DatabaseSpec](#postgresql-k8s-enterprisedb-io-v1-DatabaseSpec)
+
- [RoleConfiguration](#postgresql-k8s-enterprisedb-io-v1-RoleConfiguration)
EnsureOption represents whether we should enforce the presence or absence of
@@ -2307,14 +2571,14 @@ storage
-
-
-## ExternalCluster
-
-**Appears in:**
-
-- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
-
-ExternalCluster represents the connection parameters to an
-external cluster which is used in the other sections of the configuration
-
-
-Field | Description |
-
-name [Required]
-string
- |
-
- The server name, required
- |
-
-connectionParameters
-map[string]string
- |
-
- The list of connection parameters, such as dbname, host, username, etc
- |
-
-sslCert
-core/v1.SecretKeySelector
- |
-
- The reference to an SSL certificate to be used to connect to this
-instance
- |
-
-sslKey
-core/v1.SecretKeySelector
- |
-
- The reference to an SSL private key to be used to connect to this
-instance
- |
-
-sslRootCert
-core/v1.SecretKeySelector
- |
-
- The reference to an SSL CA public key to be used to connect to this
-instance
- |
-
-password
-core/v1.SecretKeySelector
- |
-
- The reference to the password to be used to connect to the server.
-If a password is provided, EDB Postgres for Kubernetes creates a PostgreSQL
-passfile at /controller/external/NAME/pass (where "NAME" is the
-cluster's name). This passfile is automatically referenced in the
-connection string when establishing a connection to the remote
-PostgreSQL server from the current PostgreSQL Cluster . This ensures
-secure and efficient password management for external clusters.
- |
-
-barmanObjectStore
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration
- |
-
- The configuration for the barman-cloud tool suite
- |
-
-
-
-
## ImageCatalogRef
@@ -2841,7 +3030,7 @@ It includes the type of service and its associated template specification.
Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services.
|
-updateStrategy [Required]
+ |
updateStrategy
ServiceUpdateStrategy
|
@@ -2879,7 +3068,7 @@ Valid values are "rw", "r", and "ro", representing
Valid values are "r", and "ro", representing read, and read-only services.
|
-additional [Required]
+ |
additional
[]ManagedService
|
@@ -2909,7 +3098,7 @@ not using the core data types.
Field | Description |
-name [Required]
+ | name
string
|
@@ -3245,6 +3434,55 @@ the operator calls PgBouncer's PAUSE and RESUME comman
|
+
+
+## PluginConfiguration
+
+**Appears in:**
+
+PluginConfiguration specifies a plugin that need to be loaded for this
+cluster to be reconciled
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the plugin name
+ |
+
+enabled
+bool
+ |
+
+ Enabled is true if this plugin will be used
+ |
+
+parameters
+map[string]string
+ |
+
+ Parameters is the configuration of the plugin
+ |
+
+
+
+
+
+
+## PluginConfigurationList
+
+(Alias of `[]github.com/EnterpriseDB/cloud-native-postgres/api/v1.PluginConfiguration`)
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+PluginConfigurationList represent a set of plugin with their
+configuration parameters
+
## PluginStatus
@@ -3273,7 +3511,7 @@ the operator calls PgBouncer's PAUSE and RESUME comman
latest reconciliation loop
|
-capabilities [Required]
+ |
capabilities
[]string
|
@@ -3281,7 +3519,7 @@ latest reconciliation loop
plugin
|
-operatorCapabilities [Required]
+ |
operatorCapabilities
[]string
|
@@ -3289,7 +3527,7 @@ plugin
plugin regarding the reconciler
|
-walCapabilities [Required]
+ |
walCapabilities
[]string
|
@@ -3297,7 +3535,7 @@ plugin regarding the reconciler
plugin regarding the WAL management
|
-backupCapabilities [Required]
+ |
backupCapabilities
[]string
|
@@ -3305,7 +3543,7 @@ plugin regarding the WAL management
plugin regarding the Backup management
|
-status [Required]
+ |
status
string
|
@@ -3801,7 +4039,7 @@ cluster
Field | Description |
-self [Required]
+ | self
string
|
@@ -3809,7 +4047,7 @@ cluster
or a replica cluster, comparing it with primary
|
-primary [Required]
+ | primary
string
|
@@ -3824,7 +4062,7 @@ topology specified in externalClusters
The name of the external cluster which is the replication origin
|
-enabled [Required]
+ | enabled
bool
|
@@ -3834,7 +4072,7 @@ object store or via streaming through pg_basebackup.
Refer to the Replica clusters page of the documentation for more information.
|
-promotionToken [Required]
+ | promotionToken
string
|
@@ -3842,7 +4080,7 @@ Refer to the Replica clusters page of the documentation for more information.
|
-minApplyDelay [Required]
+ | minApplyDelay
meta/v1.Duration
|
@@ -4634,12 +4872,6 @@ physical replication slots
List of regular expression patterns to match the names of replication slots to be excluded (by default empty)
|
-- [Required]
-synchronizeReplicasCache
- |
-
- No description provided. |
-
@@ -4703,6 +4935,20 @@ only useful for priority-based synchronous replication).
only useful for priority-based synchronous replication).
|
+dataDurability
+DataDurabilityLevel
+ |
+
+ If set to "required", data durability is strictly enforced. Write operations
+with synchronous commit settings (on , remote_write , or remote_apply ) will
+block if there are insufficient healthy replicas, ensuring data persistence.
+If set to "preferred", data durability is maintained when healthy replicas
+are available, but the required number of instances will adjust dynamically
+if replicas become unavailable. This setting relaxes strict durability enforcement
+to allow for operational continuity. This setting is only applicable if both
+standbyNamesPre and standbyNamesPost are unset (empty).
+ |
+
diff --git a/product_docs/docs/postgres_for_kubernetes/1/postgis.mdx b/product_docs/docs/postgres_for_kubernetes/1/postgis.mdx
index 4a5be64b430..8bad4faf94c 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/postgis.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/postgis.mdx
@@ -8,6 +8,7 @@ for PostgreSQL that introduces support for storing GIS (Geographic Information
Systems) objects in the database and be queried via SQL.
!!! Important
+
This section assumes you are familiar with PostGIS and provides some basic
information about how to create a new PostgreSQL cluster with a PostGIS database
in Kubernetes via EDB Postgres for Kubernetes.
@@ -48,6 +49,7 @@ do this in two ways:
architecture where the instance is shared by multiple databases
!!! Info
+
For more information on the microservice vs monolith architecture in the database
please refer to the ["How many databases should be hosted in a single PostgreSQL instance?" FAQ](faq.md)
or the ["Database import" section](database_import.md).
@@ -64,6 +66,7 @@ The [`postgis-example.yaml` manifest](../samples/postgis-example.yaml) below
provides some guidance on how the creation of a PostGIS cluster can be done.
!!! Warning
+
Please consider that, although convention over configuration applies in
EDB Postgres for Kubernetes, you should spend time configuring and tuning your system for
production. Also the `imageName` in the example below deliberately points
@@ -97,6 +100,7 @@ manifest and the cluster is up, you will have the above extensions installed in
both the template database and the application database, ready for use.
!!! Info
+
Take some time and look at the available options in `.spec.bootstrap.initdb`
from the [API reference](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-BootstrapInitDB), such as
`postInitApplicationSQL`.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx
index 73761f40a61..94bc76fb3d9 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx
@@ -19,6 +19,7 @@ via the `parameters`, the `pg_hba`, and the `pg_ident` keys.
These settings are the same across all instances.
!!! Warning
+
Please don't use the `ALTER SYSTEM` query to change the configuration of
the PostgreSQL instances in an imperative way. Changing some of the options
that are normally controlled by the operator might indeed lead to an
@@ -30,6 +31,7 @@ A reference for custom settings usage is included in the samples, see
[`cluster-example-custom.yaml`](../samples/cluster-example-custom.yaml).
!!! Warning
+
**OpenShift users:** due to a current limitation of the OpenShift user interface,
it is possible to change PostgreSQL settings from the YAML pane only.
@@ -55,6 +57,7 @@ The `custom.conf` file will contain the user-defined settings in the
```
!!! Seealso "PostgreSQL GUCs: Grand Unified Configuration"
+
Refer to the PostgreSQL documentation for
[more information on the available parameters](https://www.postgresql.org/docs/current/runtime-config.html),
also known as GUC (Grand Unified Configuration).
@@ -94,6 +97,7 @@ wal_receiver_timeout = '5s'
```
!!! Warning
+
It is your duty to plan for WAL segments retention in your PostgreSQL
cluster and properly configure either `wal_keep_size` or `wal_keep_segments`,
depending on the server version, based on the expected and observed workloads.
@@ -167,6 +171,7 @@ default. Although you can override the content of `shared_preload_libraries`,
we recommend that only expert Postgres users take advantage of this option.
!!! Important
+
In case a specified library is not found, the server fails to start,
preventing EDB Postgres for Kubernetes from any self-healing attempt and requiring
manual intervention. Please make sure you always test both the extensions and
@@ -184,6 +189,7 @@ library. The operator will also remove the library as soon as no actual paramete
requires it.
!!! Important
+
Please always keep in mind that removing libraries from
`shared_preload_libraries` requires a restart of all instances in the cluster
in order to be effective.
@@ -217,6 +223,7 @@ SELECT datname FROM pg_database WHERE datallowconn
```
!!! Note
+
The above query also includes template databases like `template1`.
#### Enabling `auto_explain`
@@ -240,6 +247,7 @@ to complete):
```
!!! Note
+
Enabling auto_explain can lead to performance issues. Please refer to [`the auto explain documentation`](https://www.postgresql.org/docs/current/auto-explain.html)
#### Enabling `pg_stat_statements`
@@ -319,6 +327,7 @@ you need to add this entry in the `pg_hba` section:
used to create the `pg_hba.conf` used by the pods.
!!! Important
+
See the PostgreSQL documentation for
[more information on `pg_hba.conf`](https://www.postgresql.org/docs/current/auth-pg-hba-conf.html).
@@ -415,6 +424,7 @@ generate and maintain the ident map file (known as `pg_ident.conf`) inside the
data directory.
!!! Important
+
See the PostgreSQL documentation for
[more information on `pg_ident.conf`](https://www.postgresql.org/docs/current/auth-username-maps.html).
@@ -482,6 +492,7 @@ potential risks associated with this command. To enable the use of `ALTER SYSTEM
you can explicitly set `.spec.postgresql.enableAlterSystem` to `true`.
!!! Warning
+
Proceed with caution when utilizing `ALTER SYSTEM`. This command operates
directly on the connected instance and does not undergo replication.
EDB Postgres for Kubernetes assumes responsibility for certain fixed parameters and complete
diff --git a/product_docs/docs/postgres_for_kubernetes/1/preview_version.mdx b/product_docs/docs/postgres_for_kubernetes/1/preview_version.mdx
index 6ebbec0d22b..99d7ca7a42d 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/preview_version.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/preview_version.mdx
@@ -9,6 +9,7 @@ These versions are feature-frozen, meaning no new features are added, and are
intended for public testing prior to the final release.
!!! Important
+
EDB Postgres for Kubernetes release candidates are not intended for use in production
systems.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/private_edb_registry.mdx b/product_docs/docs/postgres_for_kubernetes/1/private_edb_registries.mdx
similarity index 96%
rename from product_docs/docs/postgres_for_kubernetes/1/private_edb_registry.mdx
rename to product_docs/docs/postgres_for_kubernetes/1/private_edb_registries.mdx
index 950c836ac0f..834fb6438c4 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/private_edb_registry.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/private_edb_registries.mdx
@@ -1,6 +1,6 @@
---
title: 'EDB private container registries'
-originalFilePath: 'src/private_edb_registry.md'
+originalFilePath: 'src/private_edb_registries.md'
---
The images for the *EDB Postgres for Kubernetes* operator, as well as various
@@ -8,16 +8,19 @@ operands, are kept in private container image registries under
`docker.enterprisedb.com`.
!!! Important
+
Access to the private registries requires an account with EDB and is
reserved to EDB customers with a valid [subscription plan](https://www.enterprisedb.com/products/plans-comparison#selfmanagedenterpriseplan).
Credentials will be funneled through your EDB account.
!!! Important
+
There is a bandwidth quota of 10GB/month per registry.
!!! Note
+
When installing the operator and operands from the private registry, the
- [license keys](./license_keys.md) are not needed.
+ [license keys](license_keys.md) are not needed.
## Which repository to choose?
@@ -37,6 +40,7 @@ login to the EDB container registry, for example through `docker login` or a
[`kubernetes.io/dockerconfigjson` pull secret](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types).
!!! Important
+
Each repository contains all the images you can access with your plan.
You don't need to connect to different repositories to access different
images, such as operator or operand images.
@@ -79,12 +83,13 @@ images available from the same private registries:
- EDB Postgres Extended (PGE)
!!! Note
+
PostgreSQL images are not available in the private registries, but are
readily available on `quay.io/enterprisedb/postgresql` or
`ghcr.io/enterprisedb/postgresql`.
These images follow the requirements and the conventions described in the
-["Container image requirements"](/postgres_for_kubernetes/latest/container_images/)
+["Container image requirements"](container_images.md)
page of the EDB Postgres for Kubernetes documentation.
In the table below you can find the image name prefix for each Postgres distribution:
@@ -146,7 +151,7 @@ and the token is the *password*.
The same credentials can be used for kubernetes to access the registry by
setting up a [`kubernetes.io/dockerconfigjson` pull secret](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types).
-As mentioned in the [installation document](./installation_upgrade.md), there
+As mentioned in the [installation document](installation_upgrade.md), there
are several different ways to install the operator.
If you are going to install using images from the private registry, you will
diff --git a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx
index aedcc448496..2696b45c442 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx
@@ -12,6 +12,7 @@ Red Hat OpenShift Container Platform users can test the certified operator for
EDB Postgres for Kubernetes on the [Red Hat OpenShift Local](https://developers.redhat.com/products/openshift-local/overview) (formerly Red Hat CodeReady Containers).
!!! Warning
+
The instructions contained in this section are for demonstration,
testing, and practice purposes only and must not be used in production.
@@ -22,19 +23,20 @@ By following these instructions you should be able to start a PostgreSQL
cluster on your local Kubernetes/Openshift installation and experiment with it.
!!! Important
+
Make sure that you have `kubectl` installed on your machine in order
to connect to the Kubernetes cluster, or `oc` if using OpenShift Local.
Please follow the Kubernetes documentation on [how to install `kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
or the Openshift documentation on [how to install `oc`](https://docs.openshift.com/container-platform/4.6/cli_reference/openshift_cli/getting-started-cli.html).
!!! Note
+
If you are running Openshift, use `oc` every time `kubectl` is mentioned
in this documentation. `kubectl` commands are compatible with `oc` ones.
## Part 1 - Setup the local Kubernetes/Openshift Local playground
-The first part is about installing Minikube, Kind, or OpenShift Local. Please spend some time
-reading about the systems and decide which one to proceed with.
+The first part is about installing Minikube, Kind, or OpenShift Local. Please spend some time reading about the systems and decide which one to proceed with.
After setting up one of them, please proceed with part 2.
We also provide instructions for setting up monitoring with Prometheus and
@@ -112,6 +114,8 @@ with the deployment of a PostgreSQL cluster.
## Part 3: Deploy a PostgreSQL cluster
+Unless specified in a cluster configuration file, EDB Postgres for Kubernetes will currently deploy Community Postgresql operands by default. See the section [Deploying EDB Postgres servers](#deploying-edb-postgres-servers) for more information.
+
As with any other deployment in Kubernetes, to deploy a PostgreSQL cluster
you need to apply a configuration file that defines your desired `Cluster`.
@@ -131,18 +135,12 @@ spec:
size: 1Gi
```
-!!! Note "Installing other operands"
- EDB Postgres for Kubernetes supports not just PostgreSQL, but EDB Postgres
- Extended (PGE) and EDB Postgres Advanced (EPAS).
- The images for those operands are kept in private registries. Please refer
- to the [private registry](private_edb_registry.md) document for instructions
- on deploying clusters using PGE or EPAS as operands.
-
!!! Note "There's more"
+
For more detailed information about the available options, please refer
to the ["API Reference" section](pg4k.v1.md).
-In order to create the 3-node PostgreSQL cluster, you need to run the following command:
+In order to create the 3-node Community PostgreSQL cluster, you need to run the following command:
```sh
kubectl apply -f cluster-example.yaml
@@ -165,14 +163,30 @@ kubectl get pods -l k8s.enterprisedb.io/cluster=
```
!!! Important
+
Note that we are using `k8s.enterprisedb.io/cluster` as the label. In the past you may
have seen or used `postgresql`. This label is being deprecated, and
will be dropped in the future. Please use `k8s.enterprisedb.io/cluster`.
+### Deploying EDB Postgres servers
+
By default, the operator will install the latest available minor version
-of the latest major version of PostgreSQL when the operator was released.
+of the latest major version of Community PostgreSQL when the operator was released.
You can override this by setting the `imageName` key in the `spec` section of
-the `Cluster` definition. For example, to install PostgreSQL 13.6:
+the `Cluster` definition. For example, to install EDB Postgres Advanced 16.4 you can use:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ # [...]
+spec:
+ # [...]
+ imageName: docker.enterprisedb.com/k8s_enterprise/edb-postgres-advanced:16
+ #[...]
+```
+
+And to install EDB Postgres Extended 16 you can use:
```yaml
apiVersion: postgresql.k8s.enterprisedb.io/v1
@@ -181,11 +195,12 @@ metadata:
# [...]
spec:
# [...]
- imageName: quay.io/enterprisedb/postgresql:13.6
+ imageName: docker.enterprisedb.com/k8s_enterprise/edb-postgres-extended:16
#[...]
```
!!! Important
+
The immutable infrastructure paradigm requires that you always
point to a specific version of the container image.
Never use tags like `latest` or `13` in a production environment
@@ -195,12 +210,14 @@ spec:
to the image name, through the `:@sha256:` format.
!!! Note "There's more"
+
There are some examples cluster configurations bundled with the operator.
Please refer to the ["Examples" section](samples.md).
## Part 4: Monitor clusters with Prometheus and Grafana
!!! Important
+
Installing Prometheus and Grafana is beyond the scope of this project.
The instructions in this section are provided for experimentation and
illustration only.
@@ -249,6 +266,7 @@ After completion, you will have Prometheus, Grafana and Alert Manager installed
- The Grafana installation will be watching for a Grafana dashboard `ConfigMap`.
!!! Seealso
+
For further information about the above command, refer to the [helm install](https://helm.sh/docs/helm/helm_install/)
documentation.
@@ -353,6 +371,7 @@ EDB Postgres for Kubernetes provides a default dashboard for Grafana as part of
file and manually importing it via the GUI.
!!! Warning
+
Some graphs in the previous dashboard make use of metrics that are in alpha stage by the time
this was created, like `kubelet_volume_stats_available_bytes` and `kubelet_volume_stats_capacity_bytes`
producing some graphs to show `No data`.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx b/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
index e5df505549f..33c20399269 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
@@ -15,6 +15,7 @@ cluster. Recovery is instead a way to bootstrap a new Postgres cluster
starting from an available physical backup.
!!! Note
+
For details on the `bootstrap` stanza, see
[Bootstrap](bootstrap.md).
@@ -39,6 +40,7 @@ When performing a full recovery, you can also start the cluster
in replica mode (see [replica clusters](replica_cluster.md) for reference).
!!! Important
+
If using replica mode, make sure that the PostgreSQL configuration
(`.spec.postgresql.parameters`) of the recovered cluster is compatible with
the original one from a physical replication standpoint.
@@ -96,6 +98,7 @@ different names, you must specify these names before exiting the recovery phase,
as documented in ["Configure the application database"](#configure-the-application-database).
!!! Important
+
By default, the `recovery` method strictly uses the `name` of the
cluster in the `externalClusters` section as the name of the main folder
of the backup data within the object store. This name is normally reserved
@@ -103,6 +106,7 @@ as documented in ["Configure the application database"](#configure-the-applicati
using the `barmanObjectStore.serverName` property.
!!! Note
+
This example takes advantage of the parallel WAL restore feature,
dedicating up to 8 jobs to concurrently fetch the required WAL files from the
archive. This feature can appreciably reduce the recovery time. Make sure that
@@ -112,6 +116,7 @@ as documented in ["Configure the application database"](#configure-the-applicati
## Recovery from `VolumeSnapshot` objects
!!! Warning
+
When creating replicas after recovering the primary instance from
the volume snapshot, the operator might end up using `pg_basebackup`
to synchronize them. This behavior results in a slower process, depending
@@ -171,6 +176,7 @@ different names, you must specify these names before exiting the recovery phase,
as documented in ["Configure the application database"](#configure-the-application-database).
!!! Warning
+
If bootstrapping a replica-mode cluster from snapshots, to leverage
snapshots for the standby instances and not just the primary,
we recommend that you:
@@ -231,6 +237,7 @@ target timeline (`latest`). You can optionally specify a `recoveryTarget` to
perform a point-in-time recovery (see [Point in Time Recovery (PITR)](#point-in-time-recovery-pitr)).
!!! Important
+
Consider using the `barmanObjectStore.wal.maxParallel` option to speed
up WAL fetching from the archive by concurrently downloading the transaction
logs from the recovery object store.
@@ -243,6 +250,7 @@ time. PostgreSQL uses this technique to achieve PITR. The presence of a WAL
archive is mandatory.
!!! Important
+
PITR requires you to specify a recovery target by using the options
described in [Recovery targets](#recovery-targets).
@@ -301,6 +309,7 @@ If you assign a value to it (in the form of a Barman backup ID), the operator
uses that backup as the base for the recovery.
!!! Important
+
You need to make sure that such a backup exists and is accessible.
If you don't specify the backup ID, the operator detects the base backup for
@@ -355,11 +364,13 @@ spec:
```
!!! Note
+
If the backed-up cluster had `walStorage` enabled, you also must specify
the volume snapshot containing the `PGWAL` directory, as mentioned in
[Recovery from VolumeSnapshot objects](#recovery-from-volumesnapshot-objects).
!!! Warning
+
It's your responsibility to ensure that the end time of the base backup in
the volume snapshot is before the recovery target timestamp.
@@ -394,6 +405,7 @@ targetImmediate
taking the backup ended.
!!! Important
+
The operator can retrieve the closest backup when you specify either
`targetTime` or `targetLSN`. However, this isn't possible for the remaining
targets: `targetName`, `targetXID`, and `targetImmediate`. In such cases, it's
@@ -474,6 +486,7 @@ See [Bootstrap an empty cluster](bootstrap.md#bootstrap-an-empty-cluster-initdb)
for more information about secrets.
!!! Important
+
While the `Cluster` is in recovery mode, no changes to the database,
including the catalog, are permitted. This restriction includes any role
overrides, which are deferred until the `Cluster` transitions to primary.
@@ -524,6 +537,7 @@ For details and instructions on the `recovery` bootstrap method, see
[Bootstrap from a backup](bootstrap.md#bootstrap-from-a-backup-recovery).
!!! Important
+
If you're not familiar with how
[PostgreSQL PITR](https://www.postgresql.org/docs/current/continuous-archiving.html#BACKUP-PITR-RECOVERY)
works, we suggest that you configure the recovery cluster as the original
@@ -535,6 +549,7 @@ instance of the new cluster, and the init container starts recovering the
backup from the object storage.
!!! Important
+
The duration of the base backup copy in the new PVC depends on
the size of the backup, as well as the speed of both the network and the
storage.
@@ -599,6 +614,7 @@ There might be cases where the existing information in the storage buckets
could be overwritten by the new cluster.
!!! Warning
+
The operator includes a safety check to ensure a cluster doesn't overwrite
a storage bucket that contained information. A cluster that would overwrite
existing storage remains in the state `Setting up primary` with pods in an
@@ -606,6 +622,7 @@ could be overwritten by the new cluster.
recoveredCluster: Expected empty archive`.
!!! Important
+
If you set the `k8s.enterprisedb.io/skipEmptyWalArchiveCheck` annotation to `enabled`
in the recovered cluster, you can skip the safety check. We don't recommend
skipping the check because, for the general use case, the check works fine.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx b/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx
index 51dd6a5e3b7..169034068e0 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx
@@ -166,6 +166,7 @@ continuous recovery are thoroughly explained below.
## Distributed Topology
!!! Important
+
The Distributed Topology strategy was introduced in EDB Postgres for Kubernetes 1.24.
### Planning for a Distributed PostgreSQL Database
@@ -249,6 +250,7 @@ involving:
These processes are described in the next sections.
!!! Important
+
Before you proceed, ensure you review the ["About PostgreSQL Roles" section](#about-postgresql-roles)
above and use identical role definitions, including secrets, in all
`Cluster` objects participating in the distributed topology.
@@ -305,6 +307,7 @@ You can obtain the `demotionToken` using the `cnp` plugin by checking the
cluster's status. The token is listed under the `Demotion token` section.
!!! Note
+
The `demotionToken` obtained from `cluster-eu-south` will serve as the
`promotionToken` for `cluster-eu-central`.
@@ -337,6 +340,7 @@ replica:
```
!!! Warning
+
It is crucial to apply the changes to the `primary` and `promotionToken`
fields simultaneously. If the promotion token is omitted, a failover will be
triggered, necessitating a rebuild of the former primary.
@@ -367,6 +371,7 @@ clusters.
## Standalone Replica Clusters
!!! Important
+
Standalone Replica Clusters were previously known as Replica Clusters
before the introduction of the Distributed Topology strategy in EDB Postgres for Kubernetes
1.24.
@@ -383,11 +388,13 @@ continuous recovery mode and becomes a primary cluster, completely detached
from the original source.
!!! Warning
+
Disabling replication is an **irreversible** operation. Once replication is
disabled and the designated primary is promoted to primary, the replica cluster
and the source cluster become two independent clusters definitively.
!!! Important
+
Standalone replica clusters are suitable for several use cases, primarily
involving read-only workloads. If you are planning to setup a disaster
recovery solution, look into "Distributed Topology" above.
@@ -515,6 +522,7 @@ a backup of the source cluster has been created already.
```
!!! Note
+
To use streaming replication between the source cluster and the replica
cluster, we need to make sure there is network connectivity between the two
clusters, and that all the necessary secrets which hold passwords or
@@ -595,6 +603,7 @@ The main use cases of delayed replicas can be summarized into:
undesirable changes.
!!! Warning
+
The `minApplyDelay` option of delayed replicas cannot be used in
conjunction with `promotionToken`.
@@ -604,6 +613,7 @@ Adjust the delay duration based on your specific needs and the criticality of
your data.
!!! Important
+
Always measure your goals. Depending on your environment, it might be more
efficient to rely on volume snapshot-based recovery for faster outcomes.
Evaluate and choose the approach that best aligns with your unique requirements
diff --git a/product_docs/docs/postgres_for_kubernetes/1/replication.mdx b/product_docs/docs/postgres_for_kubernetes/1/replication.mdx
index e09d7946a96..2d997db6eb5 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/replication.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/replication.mdx
@@ -4,46 +4,47 @@ originalFilePath: 'src/replication.md'
---
Physical replication is one of the strengths of PostgreSQL and one of the
-reasons why some of the largest organizations in the world have chosen
-it for the management of their data in business continuity contexts.
-Primarily used to achieve high availability, physical replication also allows
-scale-out of read-only workloads and offloading of some work from the primary.
+reasons why some of the largest organizations in the world have chosen it for
+the management of their data in business continuity contexts. Primarily used to
+achieve high availability, physical replication also allows scale-out of
+read-only workloads and offloading of some work from the primary.
!!! Important
+
This section is about replication within the same `Cluster` resource
managed in the same Kubernetes cluster. For information about how to
replicate with another Postgres `Cluster` resource, even across different
- Kubernetes clusters, please refer to the ["Replica clusters"](replica_cluster.md)
- section.
+ Kubernetes clusters, please refer to the
+ ["Replica clusters"](replica_cluster.md) section.
## Application-level replication
-Having contributed throughout the years to the replication feature in PostgreSQL,
-we have decided to build high availability in EDB Postgres for Kubernetes on top of
-the native physical replication technology, and integrate it
-directly in the Kubernetes API.
+Having contributed throughout the years to the replication feature in
+PostgreSQL, we have decided to build high availability in EDB Postgres for Kubernetes on top
+of the native physical replication technology, and integrate it directly in the
+Kubernetes API.
-In Kubernetes terms, this is referred to as **application-level replication**, in
-contrast with *storage-level replication*.
+In Kubernetes terms, this is referred to as **application-level replication**,
+in contrast with *storage-level replication*.
## A very mature technology
PostgreSQL has a very robust and mature native framework for replicating data
-from the primary instance to one or more replicas, built around the
-concept of transactional changes continuously stored in the WAL (Write Ahead Log).
+from the primary instance to one or more replicas, built around the concept of
+transactional changes continuously stored in the WAL (Write Ahead Log).
Started as the evolution of crash recovery and point in time recovery
technologies, physical replication was first introduced in PostgreSQL 8.2
-(2006) through WAL shipping from the primary to a warm standby in
-continuous recovery.
+(2006) through WAL shipping from the primary to a warm standby in continuous
+recovery.
PostgreSQL 9.0 (2010) introduced WAL streaming and read-only replicas through
*hot standby*. In 2011, PostgreSQL 9.1 brought synchronous replication at the
-transaction level, supporting RPO=0 clusters. Cascading replication was added
-in PostgreSQL 9.2 (2012). The foundations for logical replication were
-established in PostgreSQL 9.4 (2014), and version 10 (2017) introduced native
-support for the publisher/subscriber pattern to replicate data from an origin
-to a destination. The table below summarizes these milestones.
+transaction level, supporting RPO=0 clusters. Cascading replication was added in
+PostgreSQL 9.2 (2012). The foundations for logical replication were established
+in PostgreSQL 9.4 (2014), and version 10 (2017) introduced native support for
+the publisher/subscriber pattern to replicate data from an origin to a
+destination. The table below summarizes these milestones.
| Version | Year | Feature |
| :-----: | :--: | --------------------------------------------------------------------- |
@@ -59,9 +60,9 @@ versions.
## Streaming replication support
-At the moment, EDB Postgres for Kubernetes natively and transparently manages
-physical streaming replicas within a cluster in a declarative way, based on
-the number of provided `instances` in the `spec`:
+At the moment, EDB Postgres for Kubernetes natively and transparently manages physical
+streaming replicas within a cluster in a declarative way, based on the number of
+provided `instances` in the `spec`:
```
replicas = instances - 1 (where instances > 0)
@@ -72,13 +73,13 @@ called `streaming_replica` as follows:
```sql
CREATE USER streaming_replica WITH REPLICATION;
- -- NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOBYPASSRLS
+-- NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOBYPASSRLS
```
Out of the box, the operator automatically sets up streaming replication within
the cluster over an encrypted channel and enforces TLS client certificate
-authentication for the `streaming_replica` user - as highlighted by the following
-excerpt taken from `pg_hba.conf`:
+authentication for the `streaming_replica` user - as highlighted by the
+following excerpt taken from `pg_hba.conf`:
```
# Require client certificate authentication for the streaming_replica user
@@ -87,6 +88,7 @@ hostssl replication streaming_replica all cert
```
!!! Seealso "Certificates"
+
For details on how EDB Postgres for Kubernetes manages certificates, please refer
to the ["Certificates" section](certificates.md#client-streaming_replica-certificate)
in the documentation.
@@ -96,6 +98,7 @@ HA cluster, ensuring that WAL files required by each standby are retained on
the primary's storage, even after a failover or switchover.
!!! Seealso "Replication slots for High Availability"
+
For details on how EDB Postgres for Kubernetes automatically manages replication slots for the
High Availability replicas, please refer to the
["Replication slots for High Availability" section](#replication-slots-for-high-availability)
@@ -104,9 +107,9 @@ the primary's storage, even after a failover or switchover.
### Continuous backup integration
In case continuous backup is configured in the cluster, EDB Postgres for Kubernetes
-transparently configures replicas to take advantage of `restore_command` when
-in continuous recovery. As a result, PostgreSQL can use the WAL archive
-as a fallback option whenever pulling WALs via streaming replication fails.
+transparently configures replicas to take advantage of `restore_command` when in
+continuous recovery. As a result, PostgreSQL can use the WAL archive as a
+fallback option whenever pulling WALs via streaming replication fails.
## Synchronous Replication
@@ -114,16 +117,20 @@ EDB Postgres for Kubernetes supports both
[quorum-based and priority-based synchronous replication for PostgreSQL](https://www.postgresql.org/docs/current/warm-standby.html#SYNCHRONOUS-REPLICATION).
!!! Warning
- Please be aware that synchronous replication will halt your write
- operations if the required number of standby nodes to replicate WAL data for
- transaction commits is unavailable. In such cases, write operations for your
- applications will hang. This behavior differs from the previous implementation
- in EDB Postgres for Kubernetes but aligns with the expectations of a PostgreSQL DBA for this
- capability.
-
-While direct configuration of the `synchronous_standby_names` option is
-prohibited, EDB Postgres for Kubernetes allows you to customize its content and extend
-synchronous replication beyond the `Cluster` resource through the
+
+ By default, synchronous replication pauses write operations if the required
+ number of standby nodes for WAL replication during transaction commits is
+ unavailable. This behavior prioritizes data durability and aligns with
+ PostgreSQL DBA best practices. However, if self-healing is a higher priority
+ than strict data durability in your setup, this setting can be adjusted. For
+ details on managing this behavior, refer to the [Data Durability and Synchronous Replication](#data-durability-and-synchronous-replication)
+ section.
+
+Direct configuration of the `synchronous_standby_names` option is not
+permitted. However, EDB Postgres for Kubernetes automatically populates this option with the
+names of local pods, while also allowing customization to extend synchronous
+replication beyond the `Cluster` resource.
+This can be achieved through the
[`.spec.postgresql.synchronous` stanza](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-SynchronousReplicaConfiguration).
Synchronous replication is disabled by default (the `synchronous` stanza is not
@@ -135,17 +142,39 @@ defined). When defined, two options are mandatory:
### Quorum-based Synchronous Replication
-PostgreSQL's quorum-based synchronous replication makes transaction commits
-wait until their WAL records are replicated to at least a certain number of
-standbys. To use this method, set `method` to `any`.
+In PostgreSQL, quorum-based synchronous replication ensures that transaction
+commits wait until their WAL records are replicated to a specified number of
+standbys. To enable this, set the `method` to `any`.
+
+This replication method is the most common setup for a EDB Postgres for Kubernetes cluster.
+
+#### Example
+
+The example below, based on a typical `cluster-example` configuration with
+three instances, sets up quorum-based synchronous replication with at least one
+instance:
+
+```yaml
+postgresql:
+ synchronous:
+ method: any
+ number: 1
+```
+
+With this configuration, EDB Postgres for Kubernetes automatically sets the content of
+`synchronous_standby_names` as follows:
+
+```console
+ANY 1 (cluster-example-2, cluster-example-3, cluster-example-1)
+```
-#### Migrating from the Deprecated Synchronous Replication Implementation
+#### Migrating from Deprecated Synchronous Replication Implementation
-This section provides instructions on migrating your existing quorum-based
-synchronous replication, defined using the deprecated form, to the new and more
-robust capability in EDB Postgres for Kubernetes.
+This section outlines how to migrate from the deprecated quorum-based
+synchronous replication format to the newer, more robust implementation in
+EDB Postgres for Kubernetes.
-Suppose you have the following manifest:
+Given the following manifest:
```yaml
apiVersion: postgresql.k8s.enterprisedb.io/v1
@@ -154,7 +183,6 @@ metadata:
name: angus
spec:
instances: 3
-
minSyncReplicas: 1
maxSyncReplicas: 1
@@ -162,7 +190,7 @@ spec:
size: 1G
```
-You can convert it to the new quorum-based format as follows:
+You can update it to the new format as follows:
```yaml
apiVersion: postgresql.k8s.enterprisedb.io/v1
@@ -179,14 +207,11 @@ spec:
synchronous:
method: any
number: 1
+ dataDurability: required
```
-!!! Important
- The primary difference with the new capability is that PostgreSQL will
- always prioritize data durability over high availability. Consequently, if no
- replica is available, write operations on the primary will be blocked. However,
- this behavior is consistent with the expectations of a PostgreSQL DBA for this
- capability.
+To prioritize self-healing over strict data durability, set `dataDurability`
+to `preferred` instead.
### Priority-based Synchronous Replication
@@ -199,6 +224,7 @@ immediately replaced by the next-highest-priority standby. To use this method,
set `method` to `first`.
!!! Important
+
Currently, this method is most useful when extending
synchronous replication beyond the current cluster using the
`maxStandbyNamesFromCluster`, `standbyNamesPre`, and `standbyNamesPost`
@@ -224,32 +250,18 @@ the PostgreSQL cluster. You can customize the content of
operator.
!!! Warning
+
You are responsible for ensuring the correct names in `standbyNamesPre` and
- `standbyNamesPost`. EDB Postgres for Kubernetes expects that you manage any standby with an
- `application_name` listed here, ensuring their high availability. Incorrect
- entries can jeopardize your PostgreSQL database uptime.
+ `standbyNamesPost`. EDB Postgres for Kubernetes expects that you manage any standby with
+ an `application_name` listed here, ensuring their high availability.
+ Incorrect entries can jeopardize your PostgreSQL database uptime.
-### Examples
+#### Examples
Here are some examples, all based on a `cluster-example` with three instances:
If you set:
-```yaml
-postgresql:
- synchronous:
- method: any
- number: 1
-```
-
-The content of `synchronous_standby_names` will be:
-
-```console
-ANY 1 (cluster-example-2, cluster-example-3)
-```
-
-If you set:
-
```yaml
postgresql:
synchronous:
@@ -305,19 +317,160 @@ The `synchronous_standby_names` option will look like:
FIRST 2 (angus, cluster-example-2, malcolm)
```
+### Data Durability and Synchronous Replication
+
+The `dataDurability` option in the `.spec.postgresql.synchronous` stanza
+controls the trade-off between data safety and availability for synchronous
+replication. It can be set to `required` or `preferred`, with the default being
+`required` if not specified.
+
+!!! Important
+
+ `preferred` can only be used when `standbyNamesPre` and `standbyNamesPost`
+ are unset.
+
+#### Required Data Durability
+
+When `dataDurability` is set to `required`, PostgreSQL only considers
+transactions committed once WAL (Write-Ahead Log) records have been replicated
+to the specified number of synchronous standbys. This setting prioritizes data
+safety over availability, meaning write operations will pause if the required
+number of synchronous standbys is unavailable. This ensures zero data loss
+(RPO=0) but may reduce database availability during network disruptions or
+standby failures.
+
+Synchronous standbys are selected in this priority order:
+
+1. Healthy instances
+2. Unhealthy instances
+3. Primary
+
+The list is then truncated based on `maxStandbyNamesFromCluster` if this value
+is set, prioritizing healthy instances and ensuring `synchronous_standby_names`
+is populated.
+
+##### Example
+
+Consider the following example:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: foo
+spec:
+ instances: 3
+ postgresql:
+ synchronous:
+ method: any
+ number: 1
+ dataDurability: required
+```
+
+1. Initial state. The content of `synchronous_standby_names` is:
+
+ ```
+ ANY 1 ("foo-2","foo-3","foo-1")
+ ```
+
+2. `foo-2` becomes unavailable. It gets pushed back in priority:
+
+ ```
+ ANY 1 ("foo-3","foo-2","foo-1")
+ ```
+
+3. `foo-3` also becomes unavailable. The list contains no healthy standbys:
+
+ ```
+ ANY 1 ("foo-2","foo-3","foo-1")
+ ```
+
+ At this point no write operations will be allowed until at least one of the
+ standbys is available again.
+
+4. When the standbys are available again, `synchronous_standby_names` will
+ be back to the initial state.
+
+#### Preferred Data Durability
+
+When `dataDurability` is set to `preferred`, the required number of synchronous
+instances adjusts based on the number of available standbys. PostgreSQL will
+attempt to replicate WAL records to the designated number of synchronous
+standbys, but write operations will continue even if fewer than the requested
+number of standbys are available.
+
+This setting balances data safety with availability, enabling applications to
+continue writing during temporary standby unavailability—hence, it’s also known
+as *self-healing mode*.
+
+!!! Warning
+
+ This mode may result in data loss if all standbys become unavailable.
+
+With `preferred` data durability, **only healthy replicas** are included in
+`synchronous_standby_names`.
+
+##### Example
+
+Consider the following example. For demonstration, we’ll use a cluster named
+`bar` with 5 instances and 2 synchronous standbys:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: bar
+spec:
+ instances: 5
+ postgresql:
+ synchronous:
+ method: any
+ number: 2
+ dataDurability: required
+```
+
+1. Initial state. The content of `synchronous_standby_names` is:
+
+ ```
+ ANY 2 ("bar-2","bar-3", "bar-4", "bar-5")
+ ```
+
+2. `bar-2` and `bar-3` become unavailable. They are removed from the list:
+
+ ```
+ ANY 2 ("bar-4", "bar-5")
+ ```
+
+3. `bar-4` also becomes unavailable. It gets removed from the list. Since the
+ number of available standbys is less than the requested number, the requested
+ amount gets reduced:
+
+ ```
+ ANY 1 ("bar-5")
+ ```
+
+4. `bar-5` also becomes unavailable. `synchronous_standby_names` becomes empty,
+ disabling synchronous replication completely. Write operations will continue,
+ but with the risk of potential data loss in case of a primary failure.
+
+5. When the replicas are back, `synchronous_standby_names` will be back to
+ the initial state.
+
## Synchronous Replication (Deprecated)
!!! Warning
+
Prior to EDB Postgres for Kubernetes 1.24, only the quorum-based synchronous replication
- implementation was supported. Although this method is now deprecated, it will
- not be removed anytime soon.
- The new method prioritizes data durability over self-healing and offers
- more robust features, including priority-based synchronous replication and full
+ implementation was supported. Although this method is now deprecated, it
+ will not be removed anytime soon.
+ The new method prioritizes data durability over self-healing and offers more
+ robust features, including priority-based synchronous replication and full
control over the `synchronous_standby_names` option.
It is recommended to gradually migrate to the new configuration method for
synchronous replication, as explained in the previous paragraph.
!!! Important
+
The deprecated method and the new method are mutually exclusive.
EDB Postgres for Kubernetes supports the configuration of **quorum-based synchronous
@@ -328,6 +481,7 @@ For self-healing purposes, the operator always compares these two values with
the number of available replicas to determine the quorum.
!!! Important
+
By default, synchronous replication selects among all the available
replicas indistinctively. You can limit on which nodes your synchronous
replicas can be scheduled, by working on node labels through the
@@ -350,6 +504,7 @@ Where:
- `pod1, pod2, ...` is the list of all PostgreSQL pods in the cluster
!!! Warning
+
To provide self-healing capabilities, the operator can ignore
`minSyncReplicas` if such value is higher than the currently available
number of replicas. Synchronous replication is automatically disabled
@@ -362,6 +517,7 @@ transaction commits wait until their WAL records are replicated to at least the
requested number of synchronous standbys in the list*.
!!! Important
+
Even though the operator chooses self-healing over enforcement of
synchronous replication settings, our recommendation is to plan for
synchronous replication only in clusters with 3+ instances or,
@@ -375,20 +531,23 @@ rules based on the node labels where the PVC holding the PGDATA and the
Postgres pod are.
!!! Seealso "Scheduling"
+
For more information on the general pod affinity and anti-affinity rules,
please check the ["Scheduling" section](scheduling.md).
!!! Warning
+
The `.spec.postgresql.syncReplicaElectionConstraint` option only applies to the
legacy implementation of synchronous replication
(see ["Synchronous Replication (Deprecated)"](replication.md#synchronous-replication-deprecated)).
-As an example use-case for this feature: in a cluster with a single sync replica,
-we would be able to ensure the sync replica will be in a different availability
-zone from the primary instance, usually identified by the `topology.kubernetes.io/zone`
+As an example use-case for this feature: in a cluster with a single sync
+replica, we would be able to ensure the sync replica will be in a different
+availability zone from the primary instance, usually identified by
+the `topology.kubernetes.io/zone`
[label on a node](https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone).
-This would increase the robustness of the cluster in case of an outage in a single
-availability zone, especially in terms of recovery point objective (RPO).
+This would increase the robustness of the cluster in case of an outage in a
+single availability zone, especially in terms of recovery point objective (RPO).
The idea of anti-affinity is to ensure that sync replicas that participate in
the quorum are chosen from pods running on nodes that have different values for
@@ -397,14 +556,15 @@ where the primary is currently in execution. If no node matches such criteria,
the replicas are eligible for synchronous replication.
!!! Important
+
The self-healing enforcement still applies while defining additional
constraints for synchronous replica election
(see ["Synchronous replication"](replication.md#synchronous-replication)).
The example below shows how this can be done through the
`syncReplicaElectionConstraint` section within `.spec.postgresql`.
-`nodeLabelsAntiAffinity` allows you to specify those node labels that need to
-be evaluated to make sure that synchronous replication will be dynamically
+`nodeLabelsAntiAffinity` allows you to specify those node labels that need to be
+evaluated to make sure that synchronous replication will be dynamically
configured by the operator between the current primary and the replicas which
are located on nodes having a value of the availability zone label different
from that of the node where the primary is:
@@ -428,22 +588,24 @@ as storage, CPU, or memory.
[Replication slots](https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION-SLOTS)
are a native PostgreSQL feature introduced in 9.4 that provides an automated way
to ensure that the primary does not remove WAL segments until all the attached
-streaming replication clients have received them, and that the primary
-does not remove rows which could cause a recovery conflict even when the
-standby is (temporarily) disconnected.
+streaming replication clients have received them, and that the primary does not
+remove rows which could cause a recovery conflict even when the standby is (
+temporarily) disconnected.
A replication slot exists solely on the instance that created it, and PostgreSQL
-does not replicate it on the standby servers. As a result, after a failover
-or a switchover, the new primary does not contain the replication slot from
-the old primary. This can create problems for the streaming replication clients
-that were connected to the old primary and have lost their slot.
+does not replicate it on the standby servers. As a result, after a failover or a
+switchover, the new primary does not contain the replication slot from the old
+primary. This can create problems for the streaming replication clients that
+were connected to the old primary and have lost their slot.
EDB Postgres for Kubernetes provides a turn-key solution to synchronize the content of
physical replication slots from the primary to each standby, addressing two use
cases:
- the replication slots automatically created for the High Availability of the
- Postgres cluster (see ["Replication slots for High Availability" below](#replication-slots-for-high-availability) for details)
+ Postgres cluster (
+ see ["Replication slots for High Availability" below](#replication-slots-for-high-availability)
+ for details)
- [user-defined replication slots](#user-defined-replication-slots) created on
the primary
@@ -451,22 +613,22 @@ cases:
EDB Postgres for Kubernetes fills this gap by introducing the concept of cluster-managed
replication slots, starting with high availability clusters. This feature
-automatically manages physical replication slots for each hot standby replica
-in the High Availability cluster, both in the primary and the standby.
+automatically manages physical replication slots for each hot standby replica in
+the High Availability cluster, both in the primary and the standby.
In EDB Postgres for Kubernetes, we use the terms:
- **Primary HA slot**: a physical replication slot whose lifecycle is entirely
- managed by the current primary of the cluster and whose purpose is to map to
- a specific standby in streaming replication. Such a slot lives on the primary
+ managed by the current primary of the cluster and whose purpose is to map to a
+ specific standby in streaming replication. Such a slot lives on the primary
only.
-- **Standby HA slot**: a physical replication slot for a standby whose
- lifecycle is entirely managed by another standby in the cluster, based on the
- content of the `pg_replication_slots` view in the primary, and updated at regular
+- **Standby HA slot**: a physical replication slot for a standby whose lifecycle
+ is entirely managed by another standby in the cluster, based on the content of
+ the `pg_replication_slots` view in the primary, and updated at regular
intervals using `pg_replication_slot_advance()`.
-This feature is enabled by default and can be disabled via configuration.
-For details, please refer to the
+This feature is enabled by default and can be disabled via configuration. For
+details, please refer to the
["replicationSlots" section in the API reference](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration).
Here follows a brief description of the main options:
@@ -474,13 +636,13 @@ Here follows a brief description of the main options:
: if `true`, the feature is enabled (`true` is the default)
`.spec.replicationSlots.highAvailability.slotPrefix`
-: the prefix that identifies replication slots managed by the operator
- for this feature (default: `_cnp_`)
+: the prefix that identifies replication slots managed by the operator for this
+feature (default: `_cnp_`)
`.spec.replicationSlots.updateInterval`
: how often the standby synchronizes the position of the local copy of the
- replication slots with the position on the current primary, expressed in
- seconds (default: 30)
+replication slots with the position on the current primary, expressed in
+seconds (default: 30)
Although it is not recommended, if you desire a different behavior, you can
customize the above options.
@@ -510,6 +672,7 @@ Although EDB Postgres for Kubernetes doesn't support a way to declaratively defi
replication slots, you can still [create your own slots via SQL](https://www.postgresql.org/docs/current/functions-admin.html#FUNCTIONS-REPLICATION).
!!! Info rmation
+
At the moment, we don't have any plans to manage replication slots
in a declarative way, but it might change depending on the feedback
we receive from users. The reason is that replication slots exist
@@ -555,6 +718,7 @@ Here follows a brief description of the main options:
exclude specific slots based on naming conventions.
!!! Warning
+
Users utilizing this feature should carefully monitor user-defined replication
slots to ensure they align with their operational requirements and do not
interfere with the failover process.
@@ -582,18 +746,18 @@ spec:
### Capping the WAL size retained for replication slots
-When replication slots is enabled, you might end up running out of disk
-space due to PostgreSQL trying to retain WAL files requested by a replication
-slot. This might happen due to a standby that is (temporarily?) down, or
-lagging, or simply an orphan replication slot.
+When replication slots is enabled, you might end up running out of disk space
+due to PostgreSQL trying to retain WAL files requested by a replication slot.
+This might happen due to a standby that is (temporarily?) down, or lagging, or
+simply an orphan replication slot.
Starting with PostgreSQL 13, you can take advantage of the
[`max_slot_wal_keep_size`](https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-MAX-SLOT-WAL-KEEP-SIZE)
configuration option controlling the maximum size of WAL files that replication
-slots are allowed to retain in the `pg_wal` directory at checkpoint time.
-By default, in PostgreSQL `max_slot_wal_keep_size` is set to `-1`, meaning that
-replication slots may retain an unlimited amount of WAL files.
-As a result, our recommendation is to explicitly set `max_slot_wal_keep_size`
+slots are allowed to retain in the `pg_wal` directory at checkpoint time. By
+default, in PostgreSQL `max_slot_wal_keep_size` is set to `-1`, meaning that
+replication slots may retain an unlimited amount of WAL files. As a result, our
+recommendation is to explicitly set `max_slot_wal_keep_size`
when replication slots support is enabled. For example:
```ini
@@ -612,5 +776,6 @@ key information such as the name of the slot, the type, whether it is active,
the lag from the primary.
!!! Seealso "Monitoring"
+
Please refer to the ["Monitoring" section](monitoring.md) for details on
how to monitor a EDB Postgres for Kubernetes deployment.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx b/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx
index a516d7459ae..11ad2f303bf 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx
@@ -95,6 +95,7 @@ For more details, please refer to the ["Resource Consumption"](https://www.postg
section in the PostgreSQL documentation.
!!! Seealso "Managing Compute Resources for Containers"
+
For more details on resource management, please refer to the
["Managing Compute Resources for Containers"](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
page from the Kubernetes documentation.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx b/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx
index 2c23ed4fe4c..9d5dae8ae16 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx
@@ -7,6 +7,7 @@ The operator allows changing the PostgreSQL version used in a cluster while
applications are running against it.
!!! Important
+
Only upgrades for PostgreSQL minor releases are supported.
Rolling upgrades are started when:
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples.mdx b/product_docs/docs/postgres_for_kubernetes/1/samples.mdx
index 52ab68303fb..0727a18a868 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples.mdx
@@ -7,11 +7,13 @@ The examples show configuration files for setting up
your PostgreSQL cluster.
!!! Important
+
These examples are for demonstration and experimentation
purposes. You can execute them on a personal Kubernetes cluster with Minikube
or Kind, as described in [Quick start](quickstart.md).
!!! Seealso "Reference"
+
For a list of available options, see [API reference](pg4k.v1.md).
## Basics
@@ -20,6 +22,14 @@ your PostgreSQL cluster.
: [`cluster-example.yaml`](../samples/cluster-example.yaml)
A basic example of a cluster.
+**EDB Postgres Advanced Server (EPAS) cluster**
+: [`cluster-example-epas.yaml`](../samples/cluster-example-epas.yaml)
+ A basic example of an EPAS cluster.
+
+**EDB Postgres Extended (PGE) cluster**
+: [`cluster-example-pge.yaml`](../samples/cluster-example-pge.yaml)
+ A basic example of a PGE cluster.
+
**Custom cluster**
: [`cluster-example-custom.yaml`](../samples/cluster-example-custom.yaml)
A basic cluster that uses the default storage class and custom parameters for
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-epas.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-epas.yaml
index c06d19e1b7d..b8c3c4475fe 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-epas.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-epas.yaml
@@ -1,14 +1,10 @@
apiVersion: postgresql.k8s.enterprisedb.io/v1
kind: Cluster
metadata:
- name: cluster-example
+ name: postgresql-advanced-cluster
spec:
instances: 3
- # imageName: docker-epas:13
-
- bootstrap:
- initdb:
- redwood: false
+ imageName: docker.enterprisedb.com/k8s_enterprise/edb-postgres-advanced:16
storage:
size: 1Gi
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-pge.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-pge.yaml
new file mode 100644
index 00000000000..8d4f74adf21
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-pge.yaml
@@ -0,0 +1,10 @@
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: postgresql-extended-cluster
+spec:
+ instances: 3
+ imageName: docker.enterprisedb.com/k8s_enterprise/edb-postgres-extended:16
+
+ storage:
+ size: 1Gi
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-tde.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-tde.yaml
index 79ae2b0b8b8..88f34e73752 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-tde.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-tde.yaml
@@ -13,7 +13,7 @@ metadata:
name: cluster-example
spec:
instances: 3
- imageName: ghcr.io/enterprisedb/edb-postgres-advanced:16
+ imageName: docker.enterprisedb.com/k8s_enterprise/edb-postgres-advanced:16
postgresql:
epas:
tde:
@@ -22,9 +22,5 @@ spec:
name: tde-key
key: key
- bootstrap:
- initdb:
- redwood: true
-
storage:
size: 1Gi
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/database-example-fail.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/database-example-fail.yaml
new file mode 100644
index 00000000000..44260589b88
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/database-example-fail.yaml
@@ -0,0 +1,9 @@
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Database
+metadata:
+ name: db-two
+spec:
+ name: two
+ owner: app-two
+ cluster:
+ name: cluster-example
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/database-example-icu.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/database-example-icu.yaml
new file mode 100644
index 00000000000..0251f5b5b6e
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/database-example-icu.yaml
@@ -0,0 +1,16 @@
+# NOTE: this manifest will only work properly if the Postgres version supports
+# ICU locales and rules (version 16 and newer)
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Database
+metadata:
+ name: db-icu
+spec:
+ name: declarative-icu
+ owner: app
+ encoding: UTF8
+ locale_provider: icu
+ icu_locale: en
+ icu_rules: fr
+ template: template0
+ cluster:
+ name: cluster-example
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/database-example.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/database-example.yaml
new file mode 100644
index 00000000000..c951ecdce4d
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/database-example.yaml
@@ -0,0 +1,9 @@
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Database
+metadata:
+ name: db-one
+spec:
+ name: one
+ owner: app
+ cluster:
+ name: cluster-example
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/prometheusrule.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/prometheusrule.yaml
index 34aae13b846..68ef76e5540 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/prometheusrule.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/prometheusrule.yaml
@@ -29,7 +29,7 @@ spec:
description: Over 150,000,000 transactions from frozen xid on pod {{ $labels.pod }}
summary: Number of transactions from the frozen XID to the current one
expr: |-
- cnp_pg_database_xid_age > 150000000
+ cnp_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
diff --git a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
index 8148c9f1dbb..d55d3f576ab 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
@@ -7,6 +7,7 @@ Scheduling, in Kubernetes, is the process responsible for placing a new pod on
the best node possible, based on several criteria.
!!! Seealso "Kubernetes documentation"
+
Please refer to the
[Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/)
for more information on scheduling, including all the available policies. On
@@ -92,6 +93,7 @@ if resources are insufficient—this is particularly relevant when using [Cluste
for automated horizontal scaling in a Kubernetes cluster.
!!! Seealso "Inter-pod Affinity and Anti-Affinity"
+
For more details, refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity).
### Topology Considerations
@@ -115,6 +117,7 @@ be added to those generated by the operator, if enabled, or used directly if
the operator-generated rules are disabled.
!!! Note
+
When using `additionalPodAntiAffinity` or `additionalPodAffinity`, you must
provide the full `podAntiAffinity` or `podAffinity` structure expected by the
Pod specification. The following YAML example demonstrates how to configure
@@ -156,12 +159,14 @@ Tolerations can be configured for all the pods of a Cluster through the
for tolerations.
!!! Seealso "Taints and Tolerations"
+
More information on taints and tolerations can be found in the
[Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
## Isolating PostgreSQL workloads
!!! Important
+
Before proceeding, please ensure you have read the
["Architecture"](architecture.md) section of the documentation.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/service_management.mdx b/product_docs/docs/postgres_for_kubernetes/1/service_management.mdx
index 4650b19cf04..712dd17bfdf 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/service_management.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/service_management.mdx
@@ -20,6 +20,7 @@ resource, with the following conventions:
- All services are of type `ClusterIP`.
!!! Important
+
Default service names are reserved for EDB Postgres for Kubernetes usage.
While this setup covers most use cases for accessing PostgreSQL within the same
@@ -42,6 +43,7 @@ You can disable any or all of the `ro` and `r` default services through the
[`managed.services.disabledDefaultServices` option](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-ManagedServices).
!!! Important
+
The `rw` service is essential and cannot be disabled because EDB Postgres for Kubernetes
relies on it to ensure PostgreSQL replication.
@@ -58,6 +60,7 @@ managed:
## Adding Your Own Services
!!! Important
+
When defining your own services, you cannot use any of the default reserved
service names that follow the convention `-`. It is
your responsibility to pick a unique name for the service in the Kubernetes
@@ -76,6 +79,7 @@ You must provide a `name` to the service and avoid defining the `selector`
field, as it is managed by the operator.
!!! Warning
+
Service templates give you unlimited possibilities in terms of configuring
network access to your PostgreSQL database. This translates into greater
responsibility on your end to ensure that services work as expected.
@@ -89,6 +93,7 @@ Alternatively, the `recreate` strategy deletes the existing service and
recreates it from the template.
!!! Warning
+
The `recreate` strategy will cause a service disruption with every
change. However, it may be necessary for modifying certain
parameters that can only be set during service creation.
@@ -131,5 +136,6 @@ Be aware that allowing access to a database from the public network could
expose your database to potential attacks from malicious users.
!!! Warning
+
Ensure you secure your database before granting external access, or make
sure your Kubernetes cluster is only reachable from a private network.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx
index 7ee20950297..6f2cc8f8932 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx
@@ -4,6 +4,7 @@ originalFilePath: 'src/ssl_connections.md'
---
!!! Seealso "Certificates"
+
See [Certificates](certificates.md)
for more details on how EDB Postgres for Kubernetes supports TLS certificates.
@@ -24,6 +25,7 @@ convention by way of the `initdb` configuration in the `bootstrap` section.)
## Issuing a new certificate
!!! Seealso "About CNP plugin for kubectl"
+
See the [Certificates in the EDB Postgres for Kubernetes plugin](kubectl-plugin.md#certificates)
content for details on how to use the plugin for kubectl.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/storage.mdx b/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
index f526be3575d..a39bd97025f 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
@@ -10,6 +10,7 @@ requirements that apply to traditional environments, such as virtual machines
and bare metal, are also valid in container contexts managed by Kubernetes.
!!! Important
+
When it comes to dynamically provisioned storage,
Kubernetes has its own specifics. These include *storage classes*, *persistent
volumes*, and *Persistent Volume Claims (PVCs)*. You need to own these
@@ -35,6 +36,7 @@ for high transactional and very large database (VLDB) workloads, as it
guarantees higher and more predictable performance.
!!! Warning
+
Before you deploy a PostgreSQL cluster with EDB Postgres for Kubernetes,
ensure that the storage you're using is recommended for database
workloads. We recommend clearly setting performance expectations by
@@ -42,6 +44,7 @@ guarantees higher and more predictable performance.
and then the database using [pgbench](https://www.postgresql.org/docs/current/pgbench.html).
!!! Info
+
EDB Postgres for Kubernetes doesn't use `StatefulSet` for managing data persistence.
Rather, it manages PVCs directly. If you want
to know more, see
@@ -54,6 +57,7 @@ we recommend that you also consider this aspect when you choose your storage
solution, especially if you manage very large databases.
!!! Important
+
See the Kubernetes documentation for a list of all
the supported [container storage interface (CSI) drivers](https://kubernetes-csi.github.io/docs/drivers.html)
that provide snapshot capabilities.
@@ -73,6 +77,7 @@ Briefly, we recommend operating at two levels:
distributed with PostgreSQL
!!! Important
+
You must measure both the storage and database performance before putting
the database into production. These results are extremely valuable not just in
the planning phase (for example, capacity planning). They are also valuable in
@@ -109,6 +114,7 @@ defined as a *PVC group*.
## Configuration via a storage class
!!! Important
+
EDB Postgres for Kubernetes was designed to work interchangeably with all storage classes.
As usual, we recommend properly benchmarking the storage class in a
controlled environment before deploying to production.
@@ -177,6 +183,7 @@ form of segment files. (`pg_wal` is historically known as `pg_xlog` in
PostgreSQL.)
!!! Info
+
Normally, each segment is 16MB in size, but you can configure the size
using the `walSegmentSize` option. This option is applied at cluster
initialization time, as described in
@@ -206,6 +213,7 @@ volume has a few benefits:
for example, `PGDATA` requires resizing.
!!! Seealso "Write-Ahead Log (WAL)"
+
See [Reliability and the Write-Ahead Log](https://www.postgresql.org/docs/current/wal.html)
in the PostgreSQL documentation for more information.
@@ -227,6 +235,7 @@ spec:
```
!!! Important
+
Removing `walStorage` isn't supported. Once added, a separate volume for
WALs can't be removed from an existing Postgres cluster.
@@ -360,6 +369,7 @@ kubectl get pvc cluster-example-3 -o=jsonpath='{.status.capacity.storage}'
You can repeat these steps for the remaining pods.
!!! Important
+
Leave the resizing of the disk associated with the primary instance as the
last disk, after promoting through a switchover a new resized pod, using
`kubectl cnp promote`. For example, use `kubectl cnp promote cluster-example 3`
@@ -412,6 +422,7 @@ $ kubectl delete pvc/cluster-example-3 pod/cluster-example-3
```
!!! Important
+
If you created a dedicated WAL volume, both PVCs must be deleted during
this process. The same procedure applies if you want to regenerate the WAL
volume PVC. You can do this by also disabling `resizeInUseVolumes` for the
@@ -448,6 +459,7 @@ their representation inside the Kubernetes cluster. This is also known as
*pre-provisioning* of volumes.
!!! Important
+
We recommend that you avoid pre-provisioning volumes, as it has an effect
on the high availability and self-healing capabilities of the operator. It
breaks the fully declarative model on which EDB Postgres for Kubernetes was built.
@@ -464,6 +476,7 @@ To use a pre-provisioned volume in EDB Postgres for Kubernetes:
and enable EDB Postgres for Kubernetes to create the needed `PersistentVolumeClaim`.
!!! Warning
+
With static provisioning, it's your responsibility to ensure that Postgres
pods can be correctly scheduled by Kubernetes where a pre-provisioned volume
exists. (The scheduling configuration is based on the affinity rules of your
diff --git a/product_docs/docs/postgres_for_kubernetes/1/tablespaces.mdx b/product_docs/docs/postgres_for_kubernetes/1/tablespaces.mdx
index e54ce48a2b9..f11e55984f8 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/tablespaces.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/tablespaces.mdx
@@ -177,6 +177,7 @@ CREATE TABLE facts_202312 PARTITION OF facts
```
!!! Important
+
This example assumes you're familiar with
[PostgreSQL declarative partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html).
@@ -202,6 +203,7 @@ the `postgres` user, like in the following excerpt:
```
!!! Important
+
If you change the ownership of a tablespace, make sure that you're using
an existing role. Otherwise, the status of the cluster reports the
issue and stops reconciling tablespaces until fixed. It's your responsibility
@@ -248,6 +250,7 @@ EDB Postgres for Kubernetes handles backup of tablespaces (and the relative
tablespace map) both on object stores and volume snapshots.
!!! Warning
+
By default, backups are taken from replica nodes. A backup taken immediately
after creating tablespaces in a cluster can result in an
incomplete view of the tablespaces from the replica and thus an incomplete
diff --git a/product_docs/docs/postgres_for_kubernetes/1/tde.mdx b/product_docs/docs/postgres_for_kubernetes/1/tde.mdx
index 2c42dd1c221..b0c8fec4e34 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/tde.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/tde.mdx
@@ -4,6 +4,7 @@ originalFilePath: 'src/tde.md'
---
!!! Important
+
TDE is available *only* for operands that support it:
EPAS and PG Extended, versions 15 and newer.
@@ -16,6 +17,7 @@ Server from version 15, and is supported by the EDB Postgres for Kubernetes
operator.
!!! Important
+
Before you proceed, please take some time to familiarize with the
[TDE feature in the EPAS documentation](/tde/latest/).
@@ -25,6 +27,7 @@ managed by the database without requiring any application changes or updated
client drivers.
!!! Note
+
In the code samples shown below, the `epas` sub-section of `postgresql` in
the YAML manifests is used to activate TDE. The `epas` section can be used
to enable TDE for PG Extended images as well as for EPAS images.
@@ -41,6 +44,7 @@ The basic approach is to store the passphrase in a Kubernetes secret. Such a
passphrase will be used to encrypt the EPAS binary key.
!!! Seealso "EPAS documentation"
+
Please refer to [the EPAS documentation](/tde/latest/key_stores/)
for details on the EPAS encryption key.
@@ -64,6 +68,7 @@ For example:
You can find an example in [`cluster-example-tde.yaml`](../samples/cluster-example-tde.yaml).
!!! Note
+
This file also contains the definition of the secret to hold the encryption
key. Look at the following section for an example on how to create a secret
for this purpose.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx
index 53e0bd8bf10..2068fec5b76 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx
@@ -7,6 +7,7 @@ In this page, you can find some basic information on how to troubleshoot
EDB Postgres for Kubernetes in your Kubernetes cluster deployment.
!!! Hint
+
As a Kubernetes administrator, you should have the
[`kubectl` Cheat Sheet](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) page
bookmarked!
@@ -86,6 +87,7 @@ In some emergency situations, you might need to take an emergency logical
backup of the main `app` database.
!!! Important
+
The instructions you find below must be executed only in emergency situations
and the temporary backup files kept under the data protection policies
that are effective in your organization. The dump file is indeed stored
@@ -102,6 +104,7 @@ kubectl exec cluster-example-1 -c postgres \
```
!!! Note
+
You can easily adapt the above command to backup your cluster, by providing
the names of the objects you have used in your environment.
@@ -122,6 +125,7 @@ kubectl exec -i new-cluster-example-1 -c postgres \
```
!!! Important
+
The example in this section assumes that you have no other global objects
(databases and roles) to dump and restore, as per our recommendation. In case
you have multiple roles, make sure you have taken a backup using `pg_dumpall -g`
@@ -154,6 +158,7 @@ for doing so:
and more.
!!! Note
+
The following sections provide examples of how to retrieve logs for various
resources when troubleshooting EDB Postgres for Kubernetes.
@@ -171,6 +176,7 @@ kubectl get pods -n postgresql-operator-system
```
!!! Note
+
Under normal circumstances, you should have one pod where the operator is
running, identified by a name starting with `postgresql-operator-controller-manager-`.
In case you have set up your operator for high availability, you should have more entries.
@@ -200,6 +206,7 @@ kubectl logs -n postgresql-operator-system \
```
!!! Tip
+
You can add `-f` flag to above command to follow logs in real time.
Save logs to a JSON file by running:
@@ -280,6 +287,7 @@ kubectl cnp status -n
```
!!! Tip
+
You can print more information by adding the `--verbose` option.
Get EDB PostgreSQL Advanced Server (EPAS) / PostgreSQL container image version:
@@ -295,6 +303,7 @@ Output:
```
!!! Note
+
Also you can use `kubectl-cnp status -n `
to get the same information.
@@ -592,6 +601,7 @@ EDB Postgres for Kubernetes allows you to control what to include in the core du
the `k8s.enterprisedb.io/coredumpFilter` annotation.
!!! Info
+
Please refer to ["Labels and annotations"](labels_annotations.md)
for more details on the standard annotations that EDB Postgres for Kubernetes provides.
@@ -600,11 +610,13 @@ exclude shared memory segments from the dump, as this is the safest
approach in most cases.
!!! Info
+
Please refer to
["Core dump filtering settings" section of "The `/proc` Filesystem" page of the Linux Kernel documentation](https://docs.kernel.org/filesystems/proc.html#proc-pid-coredump-filter-core-dump-filtering-settings).
for more details on how to set the bitmask that controls the core dump filter.
!!! Important
+
Beware that this setting only takes effect during Pod startup and that changing
the annotation doesn't trigger an automated rollout of the instances.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
index 26385fee0ef..18edcf0246e 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
@@ -7,6 +7,7 @@ WAL archiving is the process that feeds a [WAL archive](backup.md#wal-archive)
in EDB Postgres for Kubernetes.
!!! Important
+
EDB Postgres for Kubernetes currently only supports WAL archives on object stores. Such
WAL archives serve for both object store backups and volume snapshot backups.
@@ -16,8 +17,9 @@ the ["Backup on object stores" section](backup_barmanobjectstore.md) to set up
the WAL archive.
!!! Info
- Please refer to [`BarmanObjectStoreConfiguration`](pg4k.v1.md#postgresql-k8s-enterprisedb-io-v1-BarmanObjectStoreConfiguration)
- in the API reference for a full list of options.
+
+ Please refer to [`BarmanObjectStoreConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration)
+ in the barman-cloud API for a full list of options.
If required, you can choose to compress WAL files as soon as they
are uploaded and/or encrypt them:
@@ -43,6 +45,7 @@ PostgreSQL implements a sequential archiving scheme, where the
segment to be archived.
!!! Important
+
By default, EDB Postgres for Kubernetes sets `archive_timeout` to `5min`, ensuring
that WAL files, even in case of low workloads, are closed and archived
at least every 5 minutes, providing a deterministic time-based value for