diff --git a/.github/workflows/sync-and-process-files.yml b/.github/workflows/sync-and-process-files.yml
index 795e72e9212..2dd1bcb0918 100644
--- a/.github/workflows/sync-and-process-files.yml
+++ b/.github/workflows/sync-and-process-files.yml
@@ -37,6 +37,9 @@ jobs:
with:
node-version: '14'
+ - name: update npm
+ run: npm install -g npm@7
+
- name: Process changes
run: |
case ${{ github.event.client_payload.repo }} in
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx
index 7176b8dec3d..a14b608179c 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx
@@ -21,46 +21,45 @@ Below you will find a description of the defined resources:
-- [AffinityConfiguration](#AffinityConfiguration)
-- [AzureCredentials](#AzureCredentials)
-- [Backup](#Backup)
-- [BackupConfiguration](#BackupConfiguration)
-- [BackupList](#BackupList)
-- [BackupSpec](#BackupSpec)
-- [BackupStatus](#BackupStatus)
-- [BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration)
-- [BootstrapConfiguration](#BootstrapConfiguration)
-- [BootstrapInitDB](#BootstrapInitDB)
-- [BootstrapPgBaseBackup](#BootstrapPgBaseBackup)
-- [BootstrapRecovery](#BootstrapRecovery)
-- [CertificatesConfiguration](#CertificatesConfiguration)
-- [CertificatesStatus](#CertificatesStatus)
-- [Cluster](#Cluster)
-- [ClusterList](#ClusterList)
-- [ClusterSpec](#ClusterSpec)
-- [ClusterStatus](#ClusterStatus)
-- [ConfigMapKeySelector](#ConfigMapKeySelector)
-- [ConfigMapResourceVersion](#ConfigMapResourceVersion)
-- [DataBackupConfiguration](#DataBackupConfiguration)
-- [EPASConfiguration](#EPASConfiguration)
-- [ExternalCluster](#ExternalCluster)
-- [LocalObjectReference](#LocalObjectReference)
-- [MonitoringConfiguration](#MonitoringConfiguration)
-- [NodeMaintenanceWindow](#NodeMaintenanceWindow)
-- [PostgresConfiguration](#PostgresConfiguration)
-- [RecoveryTarget](#RecoveryTarget)
-- [ReplicaClusterConfiguration](#ReplicaClusterConfiguration)
-- [RollingUpdateStatus](#RollingUpdateStatus)
-- [S3Credentials](#S3Credentials)
-- [ScheduledBackup](#ScheduledBackup)
-- [ScheduledBackupList](#ScheduledBackupList)
-- [ScheduledBackupSpec](#ScheduledBackupSpec)
-- [ScheduledBackupStatus](#ScheduledBackupStatus)
-- [SecretKeySelector](#SecretKeySelector)
-- [SecretsResourceVersion](#SecretsResourceVersion)
-- [StorageConfiguration](#StorageConfiguration)
-- [WalBackupConfiguration](#WalBackupConfiguration)
-
+- [AffinityConfiguration](#AffinityConfiguration)
+- [AzureCredentials](#AzureCredentials)
+- [Backup](#Backup)
+- [BackupConfiguration](#BackupConfiguration)
+- [BackupList](#BackupList)
+- [BackupSpec](#BackupSpec)
+- [BackupStatus](#BackupStatus)
+- [BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration)
+- [BootstrapConfiguration](#BootstrapConfiguration)
+- [BootstrapInitDB](#BootstrapInitDB)
+- [BootstrapPgBaseBackup](#BootstrapPgBaseBackup)
+- [BootstrapRecovery](#BootstrapRecovery)
+- [CertificatesConfiguration](#CertificatesConfiguration)
+- [CertificatesStatus](#CertificatesStatus)
+- [Cluster](#Cluster)
+- [ClusterList](#ClusterList)
+- [ClusterSpec](#ClusterSpec)
+- [ClusterStatus](#ClusterStatus)
+- [ConfigMapKeySelector](#ConfigMapKeySelector)
+- [ConfigMapResourceVersion](#ConfigMapResourceVersion)
+- [DataBackupConfiguration](#DataBackupConfiguration)
+- [EPASConfiguration](#EPASConfiguration)
+- [ExternalCluster](#ExternalCluster)
+- [LocalObjectReference](#LocalObjectReference)
+- [MonitoringConfiguration](#MonitoringConfiguration)
+- [NodeMaintenanceWindow](#NodeMaintenanceWindow)
+- [PostgresConfiguration](#PostgresConfiguration)
+- [RecoveryTarget](#RecoveryTarget)
+- [ReplicaClusterConfiguration](#ReplicaClusterConfiguration)
+- [RollingUpdateStatus](#RollingUpdateStatus)
+- [S3Credentials](#S3Credentials)
+- [ScheduledBackup](#ScheduledBackup)
+- [ScheduledBackupList](#ScheduledBackupList)
+- [ScheduledBackupSpec](#ScheduledBackupSpec)
+- [ScheduledBackupStatus](#ScheduledBackupStatus)
+- [SecretKeySelector](#SecretKeySelector)
+- [SecretsResourceVersion](#SecretsResourceVersion)
+- [StorageConfiguration](#StorageConfiguration)
+- [WalBackupConfiguration](#WalBackupConfiguration)
@@ -68,15 +67,15 @@ Below you will find a description of the defined resources:
AffinityConfiguration contains the info we need to create the affinity rules for Pods
-Name | Description | Type
-------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -----------------------
-`enablePodAntiAffinity ` | Activates anti-affinity for the pods. The operator will define pods anti-affinity unless this field is explicitly set to false | *bool
-`topologyKey ` | TopologyKey to use for anti-affinity configuration. See k8s documentation for more info on that - *mandatory* | string
-`nodeSelector ` | NodeSelector is map of key-value pairs used to define the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | map[string]string
-`tolerations ` | Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run on tainted nodes. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | []corev1.Toleration
-`podAntiAffinityType ` | PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are added if all the existing nodes don't match the required pod anti-affinity rule. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity | string
-`additionalPodAntiAffinity` | AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. | *corev1.PodAntiAffinity
-`additionalPodAffinity ` | AdditionalPodAffinity allows to specify pod affinity terms to be passed to all the cluster's pods. | *corev1.PodAffinity
+| Name | Description | Type |
+| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
+| `enablePodAntiAffinity ` | Activates anti-affinity for the pods. The operator will define pods anti-affinity unless this field is explicitly set to false | \*bool |
+| `topologyKey ` | TopologyKey to use for anti-affinity configuration. See k8s documentation for more info on that - *mandatory* | string |
+| `nodeSelector ` | NodeSelector is map of key-value pairs used to define the nodes on which the pods can run. More info: | map[string]string |
+| `tolerations ` | Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run on tainted nodes. More info: | \[]corev1.Toleration |
+| `podAntiAffinityType ` | PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are added if all the existing nodes don't match the required pod anti-affinity rule. More info: | string |
+| `additionalPodAntiAffinity` | AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. | \*corev1.PodAntiAffinity |
+| `additionalPodAffinity ` | AdditionalPodAffinity allows to specify pod affinity terms to be passed to all the cluster's pods. | \*corev1.PodAffinity |
@@ -84,14 +83,14 @@ Name | Description
AzureCredentials is the type for the credentials to be used to upload files to Azure Blob Storage. The connection string contains every needed information. If the connection string is not specified, we'll need the storage account name and also one (and only one) of:
-- storageKey - storageSasToken
+- storageKey - storageSasToken
-Name | Description | Type
----------------- | --------------------------------------------------------------------------------- | ----------------------------------------
-`connectionString` | The connection string to be used | [*SecretKeySelector](#SecretKeySelector)
-`storageAccount ` | The storage account where to upload data | [*SecretKeySelector](#SecretKeySelector)
-`storageKey ` | The storage account key to be used in conjunction with the storage account name | [*SecretKeySelector](#SecretKeySelector)
-`storageSasToken ` | A shared-access-signature to be used in conjunction with the storage account name | [*SecretKeySelector](#SecretKeySelector)
+| Name | Description | Type |
+| ------------------ | --------------------------------------------------------------------------------- | ----------------------------------------- |
+| `connectionString` | The connection string to be used | [\*SecretKeySelector](#SecretKeySelector) |
+| `storageAccount ` | The storage account where to upload data | [\*SecretKeySelector](#SecretKeySelector) |
+| `storageKey ` | The storage account key to be used in conjunction with the storage account name | [\*SecretKeySelector](#SecretKeySelector) |
+| `storageSasToken ` | A shared-access-signature to be used in conjunction with the storage account name | [\*SecretKeySelector](#SecretKeySelector) |
@@ -99,11 +98,11 @@ Name | Description
Backup is the Schema for the backups API
-Name | Description | Type
--------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------
-`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta)
-`spec ` | Specification of the desired behavior of the backup. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [BackupSpec](#BackupSpec)
-`status ` | Most recently observed status of the backup. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [BackupStatus](#BackupStatus)
+| Name | Description | Type |
+| ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ |
+| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta) |
+| `spec ` | Specification of the desired behavior of the backup. More info: | [BackupSpec](#BackupSpec) |
+| `status ` | Most recently observed status of the backup. This data may not be up to date. Populated by the system. Read-only. More info: | [BackupStatus](#BackupStatus) |
@@ -111,9 +110,9 @@ Name | Description
BackupConfiguration defines how the backup of the cluster are taken. Currently the only supported backup method is barmanObjectStore. For details and examples refer to the Backup and Recovery section of the documentation
-Name | Description | Type
------------------ | ------------------------------------------------- | ------------------------------------------------------------------
-`barmanObjectStore` | The configuration for the barman-cloud tool suite | [*BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration)
+| Name | Description | Type |
+| ------------------- | ------------------------------------------------- | ------------------------------------------------------------------- |
+| `barmanObjectStore` | The configuration for the barman-cloud tool suite | [\*BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration) |
@@ -121,10 +120,10 @@ Name | Description | Type
BackupList contains a list of Backup
-Name | Description | Type
--------- | ---------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------
-`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta)
-`items ` | List of backups - *mandatory* | [[]Backup](#Backup)
+| Name | Description | Type |
+| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------- |
+| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta) |
+| `items ` | List of backups - *mandatory* | [\[\]Backup](#Backup) |
@@ -132,9 +131,9 @@ Name | Description
BackupSpec defines the desired state of Backup
-Name | Description | Type
-------- | --------------------- | ---------------------------------------------
-`cluster` | The cluster to backup | [LocalObjectReference](#LocalObjectReference)
+| Name | Description | Type |
+| --------- | --------------------- | --------------------------------------------- |
+| `cluster` | The cluster to backup | [LocalObjectReference](#LocalObjectReference) |
@@ -142,25 +141,25 @@ Name | Description | Type
BackupStatus defines the observed state of Backup
-Name | Description | Type
----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------
-`s3Credentials ` | The credentials to be used to upload data to S3 | [*S3Credentials](#S3Credentials)
-`azureCredentials` | The credentials to be used to upload data to Azure Blob Storage | [*AzureCredentials](#AzureCredentials)
-`endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string
-`destinationPath ` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string
-`serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string
-`encryption ` | Encryption method required to S3 API | string
-`backupId ` | The ID of the Barman backup | string
-`phase ` | The last backup status | BackupPhase
-`startedAt ` | When the backup was started | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta)
-`stoppedAt ` | When the backup was terminated | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta)
-`beginWal ` | The starting WAL | string
-`endWal ` | The ending WAL | string
-`beginLSN ` | The starting xlog | string
-`endLSN ` | The ending xlog | string
-`error ` | The detected error | string
-`commandOutput ` | Unused. Retained for compatibility with old versions. | string
-`commandError ` | The backup command output in case of error | string
+| Name | Description | Type |
+| ------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- |
+| `s3Credentials ` | The credentials to be used to upload data to S3 | [\*S3Credentials](#S3Credentials) |
+| `azureCredentials` | The credentials to be used to upload data to Azure Blob Storage | [\*AzureCredentials](#AzureCredentials) |
+| `endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string |
+| `destinationPath ` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string |
+| `serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string |
+| `encryption ` | Encryption method required to S3 API | string |
+| `backupId ` | The ID of the Barman backup | string |
+| `phase ` | The last backup status | BackupPhase |
+| `startedAt ` | When the backup was started | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) |
+| `stoppedAt ` | When the backup was terminated | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) |
+| `beginWal ` | The starting WAL | string |
+| `endWal ` | The ending WAL | string |
+| `beginLSN ` | The starting xlog | string |
+| `endLSN ` | The ending xlog | string |
+| `error ` | The detected error | string |
+| `commandOutput ` | Unused. Retained for compatibility with old versions. | string |
+| `commandError ` | The backup command output in case of error | string |
@@ -168,16 +167,16 @@ Name | Description
BarmanObjectStoreConfiguration contains the backup configuration using Barman against an S3-compatible object storage
-Name | Description | Type
----------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------
-`s3Credentials ` | The credentials to use to upload data to S3 | [*S3Credentials](#S3Credentials)
-`azureCredentials` | The credentials to use to upload data in Azure Blob Storage | [*AzureCredentials](#AzureCredentials)
-`endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string
-`endpointCA ` | EndpointCA store the CA bundle of the barman endpoint. Useful when using self-signed certificates to avoid errors with certificate issuer and barman-cloud-wal-archive | [*SecretKeySelector](#SecretKeySelector)
-`destinationPath ` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string
-`serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string
-`wal ` | The configuration for the backup of the WAL stream. When not defined, WAL files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [*WalBackupConfiguration](#WalBackupConfiguration)
-`data ` | The configuration to be used to backup the data files When not defined, base backups files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [*DataBackupConfiguration](#DataBackupConfiguration)
+| Name | Description | Type |
+| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
+| `s3Credentials ` | The credentials to use to upload data to S3 | [\*S3Credentials](#S3Credentials) |
+| `azureCredentials` | The credentials to use to upload data in Azure Blob Storage | [\*AzureCredentials](#AzureCredentials) |
+| `endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string |
+| `endpointCA ` | EndpointCA store the CA bundle of the barman endpoint. Useful when using self-signed certificates to avoid errors with certificate issuer and barman-cloud-wal-archive | [\*SecretKeySelector](#SecretKeySelector) |
+| `destinationPath ` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string |
+| `serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string |
+| `wal ` | The configuration for the backup of the WAL stream. When not defined, WAL files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [\*WalBackupConfiguration](#WalBackupConfiguration) |
+| `data ` | The configuration to be used to backup the data files When not defined, base backups files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [\*DataBackupConfiguration](#DataBackupConfiguration) |
@@ -185,11 +184,11 @@ Name | Description
BootstrapConfiguration contains information about how to create the PostgreSQL cluster. Only a single bootstrap method can be defined among the supported ones. `initdb` will be used as the bootstrap method if left unspecified. Refer to the Bootstrap page of the documentation for more information.
-Name | Description | Type
-------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------
-`initdb ` | Bootstrap the cluster via initdb | [*BootstrapInitDB](#BootstrapInitDB)
-`recovery ` | Bootstrap the cluster from a backup | [*BootstrapRecovery](#BootstrapRecovery)
-`pg_basebackup` | Bootstrap the cluster taking a physical backup of another compatible PostgreSQL instance | [*BootstrapPgBaseBackup](#BootstrapPgBaseBackup)
+| Name | Description | Type |
+| --------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------- |
+| `initdb ` | Bootstrap the cluster via initdb | [\*BootstrapInitDB](#BootstrapInitDB) |
+| `recovery ` | Bootstrap the cluster from a backup | [\*BootstrapRecovery](#BootstrapRecovery) |
+| `pg_basebackup` | Bootstrap the cluster taking a physical backup of another compatible PostgreSQL instance | [\*BootstrapPgBaseBackup](#BootstrapPgBaseBackup) |
@@ -197,14 +196,14 @@ Name | Description
BootstrapInitDB is the configuration of the bootstrap process when initdb is used Refer to the Bootstrap page of the documentation for more information.
-Name | Description | Type
------------ | -------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------
-`database ` | Name of the database used by the application. Default: `app`. - *mandatory* | string
-`owner ` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. - *mandatory* | string
-`secret ` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | [*LocalObjectReference](#LocalObjectReference)
-`redwood ` | If we need to enable/disable Redwood compatibility. Requires EPAS and for EPAS defaults to true | *bool
-`options ` | The list of options that must be passed to initdb when creating the cluster | []string
-`postInitSQL` | List of SQL queries to be executed as a superuser immediately after the cluster has been created - to be used with extreme care (by default empty) | []string
+| Name | Description | Type |
+| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------- |
+| `database ` | Name of the database used by the application. Default: `app`. - *mandatory* | string |
+| `owner ` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. - *mandatory* | string |
+| `secret ` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | [\*LocalObjectReference](#LocalObjectReference) |
+| `redwood ` | If we need to enable/disable Redwood compatibility. Requires EPAS and for EPAS defaults to true | \*bool |
+| `options ` | The list of options that must be passed to initdb when creating the cluster | \[]string |
+| `postInitSQL` | List of SQL queries to be executed as a superuser immediately after the cluster has been created - to be used with extreme care (by default empty) | \[]string |
@@ -212,9 +211,9 @@ Name | Description
BootstrapPgBaseBackup contains the configuration required to take a physical backup of an existing PostgreSQL cluster
-Name | Description | Type
------- | ----------------------------------------------------------------- | ------
-`source` | The name of the server of which we need to take a physical backup - *mandatory* | string
+| Name | Description | Type |
+| -------- | ------------------------------------------------------------------------------- | ------ |
+| `source` | The name of the server of which we need to take a physical backup - *mandatory* | string |
@@ -222,11 +221,11 @@ Name | Description | Typ
BootstrapRecovery contains the configuration required to restore the backup with the specified name and, after having changed the password with the one chosen for the superuser, will use it to bootstrap a full cluster cloning all the instances from the restored primary. Refer to the Bootstrap page of the documentation for more information.
-Name | Description | Type
--------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------
-`backup ` | The backup we need to restore | [*LocalObjectReference](#LocalObjectReference)
-`source ` | The external cluster whose backup we will restore. This is also used as the name of the folder under which the backup is stored, so it must be set to the name of the source cluster | string
-`recoveryTarget` | By default, the recovery process applies all the available WAL files in the archive (full recovery). However, you can also end the recovery as soon as a consistent state is reached or recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET | [*RecoveryTarget](#RecoveryTarget)
+| Name | Description | Type |
+| ---------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------- |
+| `backup ` | The backup we need to restore | [\*LocalObjectReference](#LocalObjectReference) |
+| `source ` | The external cluster whose backup we will restore. This is also used as the name of the folder under which the backup is stored, so it must be set to the name of the source cluster | string |
+| `recoveryTarget` | By default, the recovery process applies all the available WAL files in the archive (full recovery). However, you can also end the recovery as soon as a consistent state is reached or recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). More info: | [\*RecoveryTarget](#RecoveryTarget) |
@@ -234,13 +233,13 @@ Name | Description
CertificatesConfiguration contains the needed configurations to handle server certificates.
-Name | Description | Type
--------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------
-`serverCASecret ` | The secret containing the Server CA certificate. If not defined, a new secret will be created with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret. Contains: - `ca.crt`: CA that should be used to validate the server certificate, used as `sslrootcert` in client connection strings. - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, this can be omitted. | string
-`serverTLSSecret ` | The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. If not defined, ServerCASecret must provide also `ca.key` and a new secret will be created using the provided CA. | string
-`replicationTLSSecret` | The secret of type kubernetes.io/tls containing the client certificate to authenticate as the `streaming_replica` user. If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be created using the provided CA. | string
-`clientCASecret ` | The secret containing the Client CA certificate. If not defined, a new secret will be created with a self-signed CA and will be used to generate all the client certificates. Contains: - `ca.crt`: CA that should be used to validate the client certificates, used as `ssl_ca_file` of all the instances. - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, this can be omitted. | string
-`serverAltDNSNames ` | The list of the server alternative DNS names to be added to the generated server TLS certificates, when required. | []string
+| Name | Description | Type |
+| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- |
+| `serverCASecret ` | The secret containing the Server CA certificate. If not defined, a new secret will be created with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret. Contains: - `ca.crt`: CA that should be used to validate the server certificate, used as `sslrootcert` in client connection strings. - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, this can be omitted. | string |
+| `serverTLSSecret ` | The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. If not defined, ServerCASecret must provide also `ca.key` and a new secret will be created using the provided CA. | string |
+| `replicationTLSSecret` | The secret of type kubernetes.io/tls containing the client certificate to authenticate as the `streaming_replica` user. If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be created using the provided CA. | string |
+| `clientCASecret ` | The secret containing the Client CA certificate. If not defined, a new secret will be created with a self-signed CA and will be used to generate all the client certificates. Contains: - `ca.crt`: CA that should be used to validate the client certificates, used as `ssl_ca_file` of all the instances. - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, this can be omitted. | string |
+| `serverAltDNSNames ` | The list of the server alternative DNS names to be added to the generated server TLS certificates, when required. | \[]string |
@@ -248,9 +247,9 @@ Name | Description
CertificatesStatus contains configuration certificates and related expiration dates.
-Name | Description | Type
------------ | -------------------------------------- | -----------------
-`expirations` | Expiration dates for all certificates. | map[string]string
+| Name | Description | Type |
+| ------------- | -------------------------------------- | ----------------- |
+| `expirations` | Expiration dates for all certificates. | map[string]string |
@@ -258,11 +257,11 @@ Name | Description | Type
Cluster is the Schema for the PostgreSQL API
-Name | Description | Type
--------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------
-`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta)
-`spec ` | Specification of the desired behavior of the cluster. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ClusterSpec](#ClusterSpec)
-`status ` | Most recently observed status of the cluster. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ClusterStatus](#ClusterStatus)
+| Name | Description | Type |
+| ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ |
+| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta) |
+| `spec ` | Specification of the desired behavior of the cluster. More info: | [ClusterSpec](#ClusterSpec) |
+| `status ` | Most recently observed status of the cluster. This data may not be up to date. Populated by the system. Read-only. More info: | [ClusterStatus](#ClusterStatus) |
@@ -270,10 +269,10 @@ Name | Description
ClusterList contains a list of Cluster
-Name | Description | Type
--------- | ---------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------
-`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta)
-`items ` | List of clusters - *mandatory* | [[]Cluster](#Cluster)
+| Name | Description | Type |
+| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------- |
+| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta) |
+| `items ` | List of clusters - *mandatory* | [\[\]Cluster](#Cluster) |
@@ -281,36 +280,36 @@ Name | Description
ClusterSpec defines the desired state of Cluster
-Name | Description | Type
---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------
-`description ` | Description of this PostgreSQL cluster | string
-`imageName ` | Name of the container image, supporting both tags (`:`) and digests for deterministic and repeatable deployments (`:@sha256:`) | string
-`imagePullPolicy ` | Image pull policy. One of `Always`, `Never` or `IfNotPresent`. If not defined, it defaults to `IfNotPresent`. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images | corev1.PullPolicy
-`postgresUID ` | The UID of the `postgres` user inside the image, defaults to `26` | int64
-`postgresGID ` | The GID of the `postgres` user inside the image, defaults to `26` | int64
-`instances ` | Number of instances required in the cluster - *mandatory* | int32
-`minSyncReplicas ` | Minimum number of instances required in synchronous replication with the primary. Undefined or 0 allow writes to complete when no standby is available. | int32
-`maxSyncReplicas ` | The target value for the synchronous replication quorum, that can be decreased if the number of ready standbys is lower than this. Undefined or 0 disable synchronous replication. | int32
-`postgresql ` | Configuration of the PostgreSQL server | [PostgresConfiguration](#PostgresConfiguration)
-`bootstrap ` | Instructions to bootstrap this cluster | [*BootstrapConfiguration](#BootstrapConfiguration)
-`replica ` | Replica cluster configuration | [*ReplicaClusterConfiguration](#ReplicaClusterConfiguration)
-`superuserSecret ` | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | [*LocalObjectReference](#LocalObjectReference)
-`enableSuperuserAccess` | When this option is enabled, the operator will use the `SuperuserSecret` to update the `postgres` user password (if the secret is not present, the operator will automatically create one). When this option is disabled, the operator will ignore the `SuperuserSecret` content, delete it when automatically created, and then blank the password of the `postgres` user by setting it to `NULL`. Enabled by default. | *bool
-`certificates ` | The configuration for the CA and related certificates | [*CertificatesConfiguration](#CertificatesConfiguration)
-`imagePullSecrets ` | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | [[]LocalObjectReference](#LocalObjectReference)
-`storage ` | Configuration of the storage of the instances | [StorageConfiguration](#StorageConfiguration)
-`startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32
-`stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32
-`affinity ` | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#AffinityConfiguration)
-`resources ` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#resourcerequirements-v1-core)
-`primaryUpdateStrategy` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy
-`backup ` | The configuration to be used for backups | [*BackupConfiguration](#BackupConfiguration)
-`nodeMaintenanceWindow` | Define a maintenance window for the Kubernetes nodes | [*NodeMaintenanceWindow](#NodeMaintenanceWindow)
-`licenseKey ` | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string
-`licenseKeySecret ` | The reference to the license key. When this is set it take precedence over LicenseKey. | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core)
-`monitoring ` | The configuration of the monitoring infrastructure of this cluster | [*MonitoringConfiguration](#MonitoringConfiguration)
-`externalClusters ` | The list of external clusters which are used in the configuration | [[]ExternalCluster](#ExternalCluster)
-`logLevel ` | The instances' log level, one of the following values: error, info (default), debug, trace | string
+| Name | Description | Type |
+| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
+| `description ` | Description of this PostgreSQL cluster | string |
+| `imageName ` | Name of the container image, supporting both tags (`:`) and digests for deterministic and repeatable deployments (`:@sha256:`) | string |
+| `imagePullPolicy ` | Image pull policy. One of `Always`, `Never` or `IfNotPresent`. If not defined, it defaults to `IfNotPresent`. Cannot be updated. More info: | corev1.PullPolicy |
+| `postgresUID ` | The UID of the `postgres` user inside the image, defaults to `26` | int64 |
+| `postgresGID ` | The GID of the `postgres` user inside the image, defaults to `26` | int64 |
+| `instances ` | Number of instances required in the cluster - *mandatory* | int32 |
+| `minSyncReplicas ` | Minimum number of instances required in synchronous replication with the primary. Undefined or 0 allow writes to complete when no standby is available. | int32 |
+| `maxSyncReplicas ` | The target value for the synchronous replication quorum, that can be decreased if the number of ready standbys is lower than this. Undefined or 0 disable synchronous replication. | int32 |
+| `postgresql ` | Configuration of the PostgreSQL server | [PostgresConfiguration](#PostgresConfiguration) |
+| `bootstrap ` | Instructions to bootstrap this cluster | [\*BootstrapConfiguration](#BootstrapConfiguration) |
+| `replica ` | Replica cluster configuration | [\*ReplicaClusterConfiguration](#ReplicaClusterConfiguration) |
+| `superuserSecret ` | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | [\*LocalObjectReference](#LocalObjectReference) |
+| `enableSuperuserAccess` | When this option is enabled, the operator will use the `SuperuserSecret` to update the `postgres` user password (if the secret is not present, the operator will automatically create one). When this option is disabled, the operator will ignore the `SuperuserSecret` content, delete it when automatically created, and then blank the password of the `postgres` user by setting it to `NULL`. Enabled by default. | \*bool |
+| `certificates ` | The configuration for the CA and related certificates | [\*CertificatesConfiguration](#CertificatesConfiguration) |
+| `imagePullSecrets ` | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | [\[\]LocalObjectReference](#LocalObjectReference) |
+| `storage ` | Configuration of the storage of the instances | [StorageConfiguration](#StorageConfiguration) |
+| `startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32 |
+| `stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32 |
+| `affinity ` | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#AffinityConfiguration) |
+| `resources ` | Resources requirements of every generated Pod. Please refer to for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#resourcerequirements-v1-core) |
+| `primaryUpdateStrategy` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy |
+| `backup ` | The configuration to be used for backups | [\*BackupConfiguration](#BackupConfiguration) |
+| `nodeMaintenanceWindow` | Define a maintenance window for the Kubernetes nodes | [\*NodeMaintenanceWindow](#NodeMaintenanceWindow) |
+| `licenseKey ` | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string |
+| `licenseKeySecret ` | The reference to the license key. When this is set it take precedence over LicenseKey. | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) |
+| `monitoring ` | The configuration of the monitoring infrastructure of this cluster | [\*MonitoringConfiguration](#MonitoringConfiguration) |
+| `externalClusters ` | The list of external clusters which are used in the configuration | [\[\]ExternalCluster](#ExternalCluster) |
+| `logLevel ` | The instances' log level, one of the following values: error, info (default), debug, trace | string |
@@ -318,31 +317,31 @@ Name | Description
ClusterStatus defines the observed state of Cluster
-Name | Description | Type
-------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -----------------------------------------------------
-`instances ` | Total number of instances in the cluster | int32
-`readyInstances ` | Total number of ready instances in the cluster | int32
-`instancesStatus ` | Instances status | map[utils.PodStatus][]string
-`latestGeneratedNode ` | ID of the latest generated node (used to avoid node name clashing) | int32
-`currentPrimary ` | Current primary instance | string
-`targetPrimary ` | Target primary instance, this is different from the previous one during a switchover or a failover | string
-`pvcCount ` | How many PVCs have been created by this cluster | int32
-`jobCount ` | How many Jobs have been created by this cluster | int32
-`danglingPVC ` | List of all the PVCs created by this cluster and still available which are not attached to a Pod | []string
-`initializingPVC ` | List of all the PVCs that are being initialized by this cluster | []string
-`healthyPVC ` | List of all the PVCs not dangling nor initializing | []string
-`licenseStatus ` | Status of the license | licensekey.Status
-`writeService ` | Current write pod | string
-`readService ` | Current list of read pods | string
-`phase ` | Current phase of the cluster | string
-`phaseReason ` | Reason for the current phase | string
-`secretsResourceVersion ` | The list of resource versions of the secrets managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the secret data | [SecretsResourceVersion](#SecretsResourceVersion)
-`configMapResourceVersion ` | The list of resource versions of the configmaps, managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the configmap data | [ConfigMapResourceVersion](#ConfigMapResourceVersion)
-`certificates ` | The configuration for the CA and related certificates, initialized with defaults. | [CertificatesStatus](#CertificatesStatus)
-`firstRecoverabilityPoint ` | The first recoverability point, stored as a date in RFC3339 format | string
-`cloudNativePostgresqlCommitHash` | The commit hash number of which this operator running | string
-`currentPrimaryTimestamp ` | The timestamp when the last actual promotion to primary has occurred | string
-`targetPrimaryTimestamp ` | The timestamp when the last request for a new primary has occurred | string
+| Name | Description | Type |
+| --------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- |
+| `instances ` | Total number of instances in the cluster | int32 |
+| `readyInstances ` | Total number of ready instances in the cluster | int32 |
+| `instancesStatus ` | Instances status | map[utils.PodStatus][]string |
+| `latestGeneratedNode ` | ID of the latest generated node (used to avoid node name clashing) | int32 |
+| `currentPrimary ` | Current primary instance | string |
+| `targetPrimary ` | Target primary instance, this is different from the previous one during a switchover or a failover | string |
+| `pvcCount ` | How many PVCs have been created by this cluster | int32 |
+| `jobCount ` | How many Jobs have been created by this cluster | int32 |
+| `danglingPVC ` | List of all the PVCs created by this cluster and still available which are not attached to a Pod | \[]string |
+| `initializingPVC ` | List of all the PVCs that are being initialized by this cluster | \[]string |
+| `healthyPVC ` | List of all the PVCs not dangling nor initializing | \[]string |
+| `licenseStatus ` | Status of the license | licensekey.Status |
+| `writeService ` | Current write pod | string |
+| `readService ` | Current list of read pods | string |
+| `phase ` | Current phase of the cluster | string |
+| `phaseReason ` | Reason for the current phase | string |
+| `secretsResourceVersion ` | The list of resource versions of the secrets managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the secret data | [SecretsResourceVersion](#SecretsResourceVersion) |
+| `configMapResourceVersion ` | The list of resource versions of the configmaps, managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the configmap data | [ConfigMapResourceVersion](#ConfigMapResourceVersion) |
+| `certificates ` | The configuration for the CA and related certificates, initialized with defaults. | [CertificatesStatus](#CertificatesStatus) |
+| `firstRecoverabilityPoint ` | The first recoverability point, stored as a date in RFC3339 format | string |
+| `cloudNativePostgresqlCommitHash` | The commit hash number of which this operator running | string |
+| `currentPrimaryTimestamp ` | The timestamp when the last actual promotion to primary has occurred | string |
+| `targetPrimaryTimestamp ` | The timestamp when the last request for a new primary has occurred | string |
@@ -350,9 +349,9 @@ Name | Description
ConfigMapKeySelector contains enough information to let you locate the key of a ConfigMap
-Name | Description | Type
---- | ----------------- | ------
-`key` | The key to select - *mandatory* | string
+| Name | Description | Type |
+| ----- | ------------------------------- | ------ |
+| `key` | The key to select - *mandatory* | string |
@@ -360,9 +359,9 @@ Name | Description | Type
ConfigMapResourceVersion is the resource versions of the secrets managed by the operator
-Name | Description | Type
-------- | ----------------------------------------------------------------------------------------------------------------------------------- | -----------------
-`metrics` | A map with the versions of all the config maps used to pass metrics. Map keys are the config map names, map values are the versions | map[string]string
+| Name | Description | Type |
+| --------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------- |
+| `metrics` | A map with the versions of all the config maps used to pass metrics. Map keys are the config map names, map values are the versions | map[string]string |
@@ -370,12 +369,12 @@ Name | Description
DataBackupConfiguration is the configuration of the backup of the data directory
-Name | Description | Type
-------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------
-`compression ` | Compress a backup file (a tar file per tablespace) while streaming it to the object store. Available options are empty string (no compression, default), `gzip` or `bzip2`. | CompressionType
-`encryption ` | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType
-`immediateCheckpoint` | Control whether the I/O workload for the backup initial checkpoint will be limited, according to the `checkpoint_completion_target` setting on the PostgreSQL server. If set to true, an immediate checkpoint will be used, meaning PostgreSQL will complete the checkpoint as soon as possible. `false` by default. | bool
-`jobs ` | The number of parallel jobs to be used to upload the backup, defaults to 2 | *int32
+| Name | Description | Type |
+| --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
+| `compression ` | Compress a backup file (a tar file per tablespace) while streaming it to the object store. Available options are empty string (no compression, default), `gzip` or `bzip2`. | CompressionType |
+| `encryption ` | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType |
+| `immediateCheckpoint` | Control whether the I/O workload for the backup initial checkpoint will be limited, according to the `checkpoint_completion_target` setting on the PostgreSQL server. If set to true, an immediate checkpoint will be used, meaning PostgreSQL will complete the checkpoint as soon as possible. `false` by default. | bool |
+| `jobs ` | The number of parallel jobs to be used to upload the backup, defaults to 2 | \*int32 |
@@ -383,9 +382,9 @@ Name | Description
EPASConfiguration contains EDB Postgres Advanced Server specific configurations
-Name | Description | Type
------ | --------------------------------- | ----
-`audit` | If true enables edb_audit logging | bool
+| Name | Description | Type |
+| ------- | --------------------------------- | ---- |
+| `audit` | If true enables edb_audit logging | bool |
@@ -393,15 +392,15 @@ Name | Description | Type
ExternalCluster represents the connection parameters to an external cluster which is used in the other sections of the configuration
-Name | Description | Type
--------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------
-`name ` | The server name, required - *mandatory* | string
-`connectionParameters` | The list of connection parameters, such as dbname, host, username, etc | map[string]string
-`sslCert ` | The reference to an SSL certificate to be used to connect to this instance | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core)
-`sslKey ` | The reference to an SSL private key to be used to connect to this instance | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core)
-`sslRootCert ` | The reference to an SSL CA public key to be used to connect to this instance | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core)
-`password ` | The reference to the password to be used to connect to the server | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core)
-`barmanObjectStore ` | The configuration for the barman-cloud tool suite | [*BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration)
+| Name | Description | Type |
+| ---------------------- | ------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- |
+| `name ` | The server name, required - *mandatory* | string |
+| `connectionParameters` | The list of connection parameters, such as dbname, host, username, etc | map[string]string |
+| `sslCert ` | The reference to an SSL certificate to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) |
+| `sslKey ` | The reference to an SSL private key to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) |
+| `sslRootCert ` | The reference to an SSL CA public key to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) |
+| `password ` | The reference to the password to be used to connect to the server | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) |
+| `barmanObjectStore ` | The configuration for the barman-cloud tool suite | [\*BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration) |
@@ -409,9 +408,9 @@ Name | Description
LocalObjectReference contains enough information to let you locate a local object with a known type inside the same namespace
-Name | Description | Type
----- | --------------------- | ------
-`name` | Name of the referent. - *mandatory* | string
+| Name | Description | Type |
+| ------ | ----------------------------------- | ------ |
+| `name` | Name of the referent. - *mandatory* | string |
@@ -419,10 +418,10 @@ Name | Description | Type
MonitoringConfiguration is the type containing all the monitoring configuration for a certain cluster
-Name | Description | Type
----------------------- | ----------------------------------------------------- | -----------------------------------------------
-`customQueriesConfigMap` | The list of config maps containing the custom queries | [[]ConfigMapKeySelector](#ConfigMapKeySelector)
-`customQueriesSecret ` | The list of secrets containing the custom queries | [[]SecretKeySelector](#SecretKeySelector)
+| Name | Description | Type |
+| ------------------------ | ----------------------------------------------------- | ------------------------------------------------- |
+| `customQueriesConfigMap` | The list of config maps containing the custom queries | [\[\]ConfigMapKeySelector](#ConfigMapKeySelector) |
+| `customQueriesSecret ` | The list of secrets containing the custom queries | [\[\]SecretKeySelector](#SecretKeySelector) |
@@ -432,10 +431,10 @@ NodeMaintenanceWindow contains information that the operator will use while upgr
This option is only useful when the chosen storage prevents the Pods from being freely moved across nodes.
-Name | Description | Type
----------- | ------------------------------------------------------------------------------------------ | -----
-`inProgress` | Is there a node maintenance activity in progress? - *mandatory* | bool
-`reusePVC ` | Reuse the existing PVC (wait for the node to come up again) or not (recreate it elsewhere) - *mandatory* | *bool
+| Name | Description | Type |
+| ------------ | -------------------------------------------------------------------------------------------------------- | ------ |
+| `inProgress` | Is there a node maintenance activity in progress? - *mandatory* | bool |
+| `reusePVC ` | Reuse the existing PVC (wait for the node to come up again) or not (recreate it elsewhere) - *mandatory* | \*bool |
@@ -443,13 +442,13 @@ Name | Description
PostgresConfiguration defines the PostgreSQL configuration
-Name | Description | Type
------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------
-`parameters ` | PostgreSQL configuration options (postgresql.conf) | map[string]string
-`pg_hba ` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | []string
-`epas ` | EDB Postgres Advanced Server specific configurations | [*EPASConfiguration](#EPASConfiguration)
-`promotionTimeout ` | Specifies the maximum number of seconds to wait when promoting an instance to primary | int32
-`shared_preload_libraries` | Lists of shared preload libraries to add to the default ones | []string
+| Name | Description | Type |
+| -------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------- |
+| `parameters ` | PostgreSQL configuration options (postgresql.conf) | map[string]string |
+| `pg_hba ` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | \[]string |
+| `epas ` | EDB Postgres Advanced Server specific configurations | [\*EPASConfiguration](#EPASConfiguration) |
+| `promotionTimeout ` | Specifies the maximum number of seconds to wait when promoting an instance to primary | int32 |
+| `shared_preload_libraries` | Lists of shared preload libraries to add to the default ones | \[]string |
@@ -457,15 +456,15 @@ Name | Description
RecoveryTarget allows to configure the moment where the recovery process will stop. All the target options except TargetTLI are mutually exclusive.
-Name | Description | Type
---------------- | ------------------------------------------------------------------------- | ------
-`targetTLI ` | The target timeline ("latest", "current" or a positive integer) | string
-`targetXID ` | The target transaction ID | string
-`targetName ` | The target name (to be previously created with `pg_create_restore_point`) | string
-`targetLSN ` | The target LSN (Log Sequence Number) | string
-`targetTime ` | The target time, in any unambiguous representation allowed by PostgreSQL | string
-`targetImmediate` | End recovery as soon as a consistent state is reached | *bool
-`exclusive ` | Set the target to be exclusive (defaults to true) | *bool
+| Name | Description | Type |
+| ----------------- | ------------------------------------------------------------------------- | ------ |
+| `targetTLI ` | The target timeline ("latest", "current" or a positive integer) | string |
+| `targetXID ` | The target transaction ID | string |
+| `targetName ` | The target name (to be previously created with `pg_create_restore_point`) | string |
+| `targetLSN ` | The target LSN (Log Sequence Number) | string |
+| `targetTime ` | The target time, in any unambiguous representation allowed by PostgreSQL | string |
+| `targetImmediate` | End recovery as soon as a consistent state is reached | \*bool |
+| `exclusive ` | Set the target to be exclusive (defaults to true) | \*bool |
@@ -473,10 +472,10 @@ Name | Description
ReplicaClusterConfiguration encapsulates the configuration of a replica cluster
-Name | Description | Type
-------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------
-`enabled` | If replica mode is enabled, this cluster will be a replica of an existing cluster. A cluster of such type can be created only using bootstrap via pg_basebackup - *mandatory* | bool
-`source ` | The name of the external cluster which is the replication origin - *mandatory* | string
+| Name | Description | Type |
+| --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ |
+| `enabled` | If replica mode is enabled, this cluster will be a replica of an existing cluster. A cluster of such type can be created only using bootstrap via pg_basebackup - *mandatory* | bool |
+| `source ` | The name of the external cluster which is the replication origin - *mandatory* | string |
@@ -484,10 +483,10 @@ Name | Description
RollingUpdateStatus contains the information about an instance which is being updated
-Name | Description | Type
---------- | ----------------------------------- | ------------------------------------------------------------------------------------------------
-`imageName` | The image which we put into the Pod - *mandatory* | string
-`startedAt` | When the update has been started | [metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta)
+| Name | Description | Type |
+| ----------- | ------------------------------------------------- | ------------------------------------------------------------------------------------------------ |
+| `imageName` | The image which we put into the Pod - *mandatory* | string |
+| `startedAt` | When the update has been started | [metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) |
@@ -495,10 +494,10 @@ Name | Description | Type
S3Credentials is the type for the credentials to be used to upload files to S3
-Name | Description | Type
---------------- | -------------------------------------- | ---------------------------------------
-`accessKeyId ` | The reference to the access key id - *mandatory* | [SecretKeySelector](#SecretKeySelector)
-`secretAccessKey` | The reference to the secret access key - *mandatory* | [SecretKeySelector](#SecretKeySelector)
+| Name | Description | Type |
+| ----------------- | ---------------------------------------------------- | --------------------------------------- |
+| `accessKeyId ` | The reference to the access key id - *mandatory* | [SecretKeySelector](#SecretKeySelector) |
+| `secretAccessKey` | The reference to the secret access key - *mandatory* | [SecretKeySelector](#SecretKeySelector) |
@@ -506,11 +505,11 @@ Name | Description | Type
ScheduledBackup is the Schema for the scheduledbackups API
-Name | Description | Type
--------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------
-`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta)
-`spec ` | Specification of the desired behavior of the ScheduledBackup. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ScheduledBackupSpec](#ScheduledBackupSpec)
-`status ` | Most recently observed status of the ScheduledBackup. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ScheduledBackupStatus](#ScheduledBackupStatus)
+| Name | Description | Type |
+| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ |
+| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta) |
+| `spec ` | Specification of the desired behavior of the ScheduledBackup. More info: | [ScheduledBackupSpec](#ScheduledBackupSpec) |
+| `status ` | Most recently observed status of the ScheduledBackup. This data may not be up to date. Populated by the system. Read-only. More info: | [ScheduledBackupStatus](#ScheduledBackupStatus) |
@@ -518,10 +517,10 @@ Name | Description
ScheduledBackupList contains a list of ScheduledBackup
-Name | Description | Type
--------- | ---------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------
-`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta)
-`items ` | List of clusters - *mandatory* | [[]ScheduledBackup](#ScheduledBackup)
+| Name | Description | Type |
+| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------- |
+| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta) |
+| `items ` | List of clusters - *mandatory* | [\[\]ScheduledBackup](#ScheduledBackup) |
@@ -529,12 +528,12 @@ Name | Description
ScheduledBackupSpec defines the desired state of ScheduledBackup
-Name | Description | Type
---------- | --------------------------------------------------------------------- | ---------------------------------------------
-`suspend ` | If this backup is suspended or not | *bool
-`immediate` | If the first backup has to be immediately start after creation or not | *bool
-`schedule ` | The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - *mandatory* | string
-`cluster ` | The cluster to backup | [LocalObjectReference](#LocalObjectReference)
+| Name | Description | Type |
+| ----------- | ------------------------------------------------------------------------------------- | --------------------------------------------- |
+| `suspend ` | If this backup is suspended or not | \*bool |
+| `immediate` | If the first backup has to be immediately start after creation or not | \*bool |
+| `schedule ` | The schedule in Cron format, see . - *mandatory* | string |
+| `cluster ` | The cluster to backup | [LocalObjectReference](#LocalObjectReference) |
@@ -542,11 +541,11 @@ Name | Description
ScheduledBackupStatus defines the observed state of ScheduledBackup
-Name | Description | Type
----------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------
-`lastCheckTime ` | The latest time the schedule | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta)
-`lastScheduleTime` | Information when was the last time that backup was successfully scheduled. | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta)
-`nextScheduleTime` | Next time we will run a backup | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta)
+| Name | Description | Type |
+| ------------------ | -------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- |
+| `lastCheckTime ` | The latest time the schedule | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) |
+| `lastScheduleTime` | Information when was the last time that backup was successfully scheduled. | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) |
+| `nextScheduleTime` | Next time we will run a backup | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) |
@@ -554,9 +553,9 @@ Name | Description
SecretKeySelector contains enough information to let you locate the key of a Secret
-Name | Description | Type
---- | ----------------- | ------
-`key` | The key to select - *mandatory* | string
+| Name | Description | Type |
+| ----- | ------------------------------- | ------ |
+| `key` | The key to select - *mandatory* | string |
@@ -564,17 +563,17 @@ Name | Description | Type
SecretsResourceVersion is the resource versions of the secrets managed by the operator
-Name | Description | Type
------------------------- | --------------------------------------------------------------------------------------------------------------------------- | -----------------
-`superuserSecretVersion ` | The resource version of the "postgres" user secret | string
-`replicationSecretVersion` | The resource version of the "streaming_replica" user secret | string
-`applicationSecretVersion` | The resource version of the "app" user secret | string
-`caSecretVersion ` | Unused. Retained for compatibility with old versions. | string
-`clientCaSecretVersion ` | The resource version of the PostgreSQL client-side CA secret version | string
-`serverCaSecretVersion ` | The resource version of the PostgreSQL server-side CA secret version | string
-`serverSecretVersion ` | The resource version of the PostgreSQL server-side secret version | string
-`barmanEndpointCA ` | The resource version of the Barman Endpoint CA if provided | string
-`metrics ` | A map with the versions of all the secrets used to pass metrics. Map keys are the secret names, map values are the versions | map[string]string
+| Name | Description | Type |
+| -------------------------- | --------------------------------------------------------------------------------------------------------------------------- | ----------------- |
+| `superuserSecretVersion ` | The resource version of the "postgres" user secret | string |
+| `replicationSecretVersion` | The resource version of the "streaming_replica" user secret | string |
+| `applicationSecretVersion` | The resource version of the "app" user secret | string |
+| `caSecretVersion ` | Unused. Retained for compatibility with old versions. | string |
+| `clientCaSecretVersion ` | The resource version of the PostgreSQL client-side CA secret version | string |
+| `serverCaSecretVersion ` | The resource version of the PostgreSQL server-side CA secret version | string |
+| `serverSecretVersion ` | The resource version of the PostgreSQL server-side secret version | string |
+| `barmanEndpointCA ` | The resource version of the Barman Endpoint CA if provided | string |
+| `metrics ` | A map with the versions of all the secrets used to pass metrics. Map keys are the secret names, map values are the versions | map[string]string |
@@ -582,12 +581,12 @@ Name | Description
StorageConfiguration is the configuration of the storage of the PostgreSQL instances
-Name | Description | Type
------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------
-`storageClass ` | StorageClass to use for database data (`PGDATA`). Applied after evaluating the PVC template, if available. If not specified, generated PVCs will be satisfied by the default storage class | *string
-`size ` | Size of the storage. Required if not already specified in the PVC template. Changes to this field are automatically reapplied to the created PVCs. Size cannot be decreased. - *mandatory* | string
-`resizeInUseVolumes` | Resize existent PVCs, defaults to true | *bool
-`pvcTemplate ` | Template to be used to generate the Persistent Volume Claim | [*corev1.PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#persistentvolumeclaim-v1-core)
+| Name | Description | Type |
+| -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- |
+| `storageClass ` | StorageClass to use for database data (`PGDATA`). Applied after evaluating the PVC template, if available. If not specified, generated PVCs will be satisfied by the default storage class | \*string |
+| `size ` | Size of the storage. Required if not already specified in the PVC template. Changes to this field are automatically reapplied to the created PVCs. Size cannot be decreased. - *mandatory* | string |
+| `resizeInUseVolumes` | Resize existent PVCs, defaults to true | \*bool |
+| `pvcTemplate ` | Template to be used to generate the Persistent Volume Claim | [\*corev1.PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#persistentvolumeclaim-v1-core) |
@@ -595,7 +594,7 @@ Name | Description
WalBackupConfiguration is the configuration of the backup of the WAL stream
-Name | Description | Type
------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------
-`compression` | Compress a WAL file before sending it to the object store. Available options are empty string (no compression, default), `gzip` or `bzip2`. | CompressionType
-`encryption ` | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType
\ No newline at end of file
+| Name | Description | Type |
+| ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
+| `compression` | Compress a WAL file before sending it to the object store. Available options are empty string (no compression, default), `gzip` or `bzip2`. | CompressionType |
+| `encryption ` | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType |
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx
index 2d4c557da3d..66cd3ff3950 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx
@@ -18,17 +18,17 @@ Cloud Native PostgreSQL supports clusters based on asynchronous and synchronous
streaming replication to manage multiple hot standby replicas within the same
Kubernetes cluster, with the following specifications:
-* One primary, with optional multiple hot standby replicas for High Availability
-* Available services for applications:
- * `-rw`: applications connect to the only primary instance of the cluster
- * `-ro`: applications connect to the only hot standby replicas for read-only-workloads
- * `-r`: applications connect to any of the instances for read-only workloads
-* Shared-nothing architecture recommended for better resilience of the PostgreSQL cluster:
- * PostgreSQL instances should reside on different Kubernetes worker nodes
- and share only the network
- * PostgreSQL instances can reside in different
- availability zones in the same region
- * All nodes of a PostgreSQL cluster should reside in the same region
+- One primary, with optional multiple hot standby replicas for High Availability
+- Available services for applications:
+ - `-rw`: applications connect to the only primary instance of the cluster
+ - `-ro`: applications connect to the only hot standby replicas for read-only-workloads
+ - `-r`: applications connect to any of the instances for read-only workloads
+- Shared-nothing architecture recommended for better resilience of the PostgreSQL cluster:
+ - PostgreSQL instances should reside on different Kubernetes worker nodes
+ and share only the network
+ - PostgreSQL instances can reside in different
+ availability zones in the same region
+ - All nodes of a PostgreSQL cluster should reside in the same region
!!! Seealso "Replication"
Please refer to the ["Replication" section](replication.md) for more
@@ -73,9 +73,9 @@ Applications can also access any PostgreSQL instance through the
Applications are supposed to work with the services created by Cloud Native PostgreSQL
in the same Kubernetes cluster:
-* `[cluster name]-rw`
-* `[cluster name]-ro`
-* `[cluster name]-r`
+- `[cluster name]-rw`
+- `[cluster name]-ro`
+- `[cluster name]-r`
Those services are entirely managed by the Kubernetes cluster and
implement a form of Virtual IP as described in the
@@ -88,8 +88,8 @@ implement a form of Virtual IP as described in the
You can use these services in your applications through:
-* DNS resolution
-* environment variables
+- DNS resolution
+- environment variables
For the credentials to connect to PostgreSQL, you can
use the secrets generated by the operator.
@@ -118,22 +118,22 @@ PostgreSQL cluster, you can also use environment variables to connect to the dat
For example, suppose that your PostgreSQL cluster is called `pg-database`,
you can use the following environment variables in your applications:
-* `PG_DATABASE_R_SERVICE_HOST`: the IP address of the service
- pointing to all the PostgreSQL instances for read-only workloads
+- `PG_DATABASE_R_SERVICE_HOST`: the IP address of the service
+ pointing to all the PostgreSQL instances for read-only workloads
-* `PG_DATABASE_RO_SERVICE_HOST`: the IP address of the
- service pointing to all hot-standby replicas of the cluster
+- `PG_DATABASE_RO_SERVICE_HOST`: the IP address of the
+ service pointing to all hot-standby replicas of the cluster
-* `PG_DATABASE_RW_SERVICE_HOST`: the IP address of the
- service pointing to the *primary* instance of the cluster
+- `PG_DATABASE_RW_SERVICE_HOST`: the IP address of the
+ service pointing to the *primary* instance of the cluster
### Secrets
The PostgreSQL operator will generate two `basic-auth` type secrets for every
PostgreSQL cluster it deploys:
-* `[cluster name]-superuser`
-* `[cluster name]-app`
+- `[cluster name]-superuser`
+- `[cluster name]-app`
The secrets contain the username, password, and a working
[`.pgpass file`](https://www.postgresql.org/docs/current/libpq-pgpass.html)
@@ -162,11 +162,11 @@ only write inside a single Kubernetes cluster, at any time.
However, for business continuity objectives it is fundamental to:
-- reduce global **recovery point objectives** (RPO) by storing PostgreSQL backup data
- in multiple locations, regions and possibly using different providers
- (**Disaster Recovery**)
-- reduce global **recovery time objectives** (RTO) by taking advantage of PostgreSQL
- replication beyond the primary Kubernetes cluster (**High Availability**)
+- reduce global **recovery point objectives** (RPO) by storing PostgreSQL backup data
+ in multiple locations, regions and possibly using different providers
+ (**Disaster Recovery**)
+- reduce global **recovery time objectives** (RTO) by taking advantage of PostgreSQL
+ replication beyond the primary Kubernetes cluster (**High Availability**)
In order to address the above concerns, Cloud Native PostgreSQL introduces the
concept of a *PostgreSQL Replica Cluster*. Replica clusters are the Cloud
@@ -175,17 +175,17 @@ hybrid, and multi-cloud contexts.
A replica cluster is a separate `Cluster` resource:
-1. having either `pg_basebackup` or full `recovery` as the `bootstrap`
- option from a defined external source cluster
-2. having the `replica.enabled` option set to `true`
-3. replicating from a defined external cluster identified by `replica.source`,
- normally located outside the Kubernetes cluster
-4. replaying WAL information received from the recovery object store
- (using PostgreSQL's `restore_command` parameter), or via streaming
- replication (using PostgreSQL's `primary_conninfo` parameter), or any of
- the two (in case both the `barmanObjectStore` and `connectionParameters`
- are defined in the external cluster)
-5. accepting only read connections, as supported by PostgreSQL's Hot Standby
+1. having either `pg_basebackup` or full `recovery` as the `bootstrap`
+ option from a defined external source cluster
+2. having the `replica.enabled` option set to `true`
+3. replicating from a defined external cluster identified by `replica.source`,
+ normally located outside the Kubernetes cluster
+4. replaying WAL information received from the recovery object store
+ (using PostgreSQL's `restore_command` parameter), or via streaming
+ replication (using PostgreSQL's `primary_conninfo` parameter), or any of
+ the two (in case both the `barmanObjectStore` and `connectionParameters`
+ are defined in the external cluster)
+5. accepting only read connections, as supported by PostgreSQL's Hot Standby
!!! Seealso
Please refer to the ["Bootstrap" section](bootstrap.md) for more information
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx
index ce0b29824ee..d9a4c214874 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx
@@ -33,8 +33,8 @@ for more information about designated primary instances).
You can archive the backup files in any service that is supported
by the Barman Cloud infrastructure. That is:
-- [AWS S3](https://aws.amazon.com/s3/)
-- [Microsoft Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/).
+- [AWS S3](https://aws.amazon.com/s3/)
+- [Microsoft Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/).
You can also use any compatible implementation of the
supported services.
@@ -46,12 +46,12 @@ discussed in the following sections.
You will need the following information about your environment:
-- `ACCESS_KEY_ID`: the ID of the access key that will be used
- to upload files in S3
+- `ACCESS_KEY_ID`: the ID of the access key that will be used
+ to upload files in S3
-- `ACCESS_SECRET_KEY`: the secret part of the previous access key
+- `ACCESS_SECRET_KEY`: the secret part of the previous access key
-- `ACCESS_SESSION_TOKEN`: the optional session token in case it is required
+- `ACCESS_SESSION_TOKEN`: the optional session token in case it is required
The access key used must have permission to upload files in
the bucket. Given that, you must create a k8s secret with the
@@ -249,9 +249,9 @@ proceeding with a backup.
In order to access your storage account, you will need one of the following combinations
of credentials:
-- [**Connection String**](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#configure-a-connection-string-for-an-azure-storage-account)
-- **Storage account name** and [**Storage account access key**](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage)
-- **Storage account name** and [**Storage account SAS Token**](https://docs.microsoft.com/en-us/azure/storage/blobs/sas-service-create).
+- [**Connection String**](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#configure-a-connection-string-for-an-azure-storage-account)
+- **Storage account name** and [**Storage account access key**](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage)
+- **Storage account name** and [**Storage account SAS Token**](https://docs.microsoft.com/en-us/azure/storage/blobs/sas-service-create).
The credentials need to be stored inside a Kubernetes Secret, adding data entries only when
needed. The following command performs that:
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/before_you_start.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/before_you_start.mdx
index 221ef4627ad..947982828a1 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/before_you_start.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/before_you_start.mdx
@@ -9,42 +9,42 @@ specific to Kubernetes and PostgreSQL.
## Kubernetes terminology
-| Resource | Description |
-|-------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) | A *node* is a worker machine in Kubernetes, either virtual or physical, where all services necessary to run pods are managed by the control plane node(s). |
-| [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) | A *pod* is the smallest computing unit that can be deployed in a Kubernetes cluster and is composed of one or more containers that share network and storage. |
-| [Service](https://kubernetes.io/docs/concepts/services-networking/service/) | A *service* is an abstraction that exposes as a network service an application that runs on a group of pods and standardizes important features such as service discovery across applications, load balancing, failover, and so on. |
-| [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) | A *secret* is an object that is designed to store small amounts of sensitive data such as passwords, access keys, or tokens, and use them in pods. |
-| [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/) | A *storage class* allows an administrator to define the classes of storage in a cluster, including provisioner (such as AWS EBS), reclaim policies, mount options, volume expansion, and so on. |
+| Resource | Description |
+| ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) | A *node* is a worker machine in Kubernetes, either virtual or physical, where all services necessary to run pods are managed by the control plane node(s). |
+| [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) | A *pod* is the smallest computing unit that can be deployed in a Kubernetes cluster and is composed of one or more containers that share network and storage. |
+| [Service](https://kubernetes.io/docs/concepts/services-networking/service/) | A *service* is an abstraction that exposes as a network service an application that runs on a group of pods and standardizes important features such as service discovery across applications, load balancing, failover, and so on. |
+| [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) | A *secret* is an object that is designed to store small amounts of sensitive data such as passwords, access keys, or tokens, and use them in pods. |
+| [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/) | A *storage class* allows an administrator to define the classes of storage in a cluster, including provisioner (such as AWS EBS), reclaim policies, mount options, volume expansion, and so on. |
| [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) | A *persistent volume* (PV) is a resource in a Kubernetes cluster that represents storage that has been either manually provisioned by an administrator or dynamically provisioned by a *storage class* controller. A PV is associated with a pod using a *persistent volume claim* and its lifecycle is independent of any pod that uses it. Normally, a PV is a network volume, especially in the public cloud. A [*local persistent volume* (LPV)](https://kubernetes.io/docs/concepts/storage/volumes/#local) is a persistent volume that exists only on the particular node where the pod that uses it is running. |
| [Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) | A *persistent volume claim* (PVC) represents a request for storage, which might include size, access mode, or a particular storage class. Similar to how a pod consumes node resources, a PVC consumes the resources of a PV. |
-| [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) | A *namespace* is a logical and isolated subset of a Kubernetes cluster and can be seen as a *virtual cluster* within the wider physical cluster. Namespaces allow administrators to create separated environments based on projects, departments, teams, and so on. |
-| [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) | *Role Based Access Control* (RBAC), also known as *role-based security*, is a method used in computer systems security to restrict access to the network and resources of a system to authorized users only. Kubernetes has a native API to control roles at the namespace and cluster level and associate them with specific resources and individuals. |
-| [CRD](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) | A *custom resource definition* (CRD) is an extension of the Kubernetes API and allows developers to create new data types and objects, *called custom resources*. |
-| [Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) | An *operator* is a custom resource that automates those steps that are normally performed by a human operator when managing one or more applications or given services. An operator assists Kubernetes in making sure that the resource's defined state always matches the observed one. |
-| [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) | `kubectl` is the command-line tool used to manage a Kubernetes cluster. |
+| [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) | A *namespace* is a logical and isolated subset of a Kubernetes cluster and can be seen as a *virtual cluster* within the wider physical cluster. Namespaces allow administrators to create separated environments based on projects, departments, teams, and so on. |
+| [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) | *Role Based Access Control* (RBAC), also known as *role-based security*, is a method used in computer systems security to restrict access to the network and resources of a system to authorized users only. Kubernetes has a native API to control roles at the namespace and cluster level and associate them with specific resources and individuals. |
+| [CRD](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) | A *custom resource definition* (CRD) is an extension of the Kubernetes API and allows developers to create new data types and objects, *called custom resources*. |
+| [Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) | An *operator* is a custom resource that automates those steps that are normally performed by a human operator when managing one or more applications or given services. An operator assists Kubernetes in making sure that the resource's defined state always matches the observed one. |
+| [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) | `kubectl` is the command-line tool used to manage a Kubernetes cluster. |
Cloud Native PostgreSQL requires Kubernetes 1.17 or higher.
## PostgreSQL terminology
-| Resource | Description |
-|-------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Instance | A Postgres server process running and listening on a pair "IP address(es)" and "TCP port" (usually 5432). |
-| Primary | A PostgreSQL instance that can accept both read and write operations. |
-| Replica | A PostgreSQL instance replicating from the only primary instance in a cluster and is kept updated by reading a stream of Write-Ahead Log (WAL) records. A replica is also known as *standby* or *secondary* server. PostgreSQL relies on physical streaming replication (async/sync) and file-based log shipping (async). |
-| Hot Standby | PostgreSQL feature that allows a *replica* to accept read-only workloads. |
-| Cluster | To be intended as High Availability (HA) Cluster: a set of PostgreSQL instances made up by a single primary and an optional arbitrary number of replicas. |
-| Replica Cluster | A Cloud Native PostgreSQL `Cluster` that is in continuous recovery mode from a selected PostgreSQL cluster, normally residing outside the Kubernetes cluster. It is a feature that enables multi-cluster deployments in private, public, hybrid, and multi-cloud contexts. |
-| Designated Primary | A PostgreSQL standby instance in a replica cluster that is in continuous recovery from another PostgreSQL cluster and that is designated to become primary in case the replica cluster becomes primary. |
-| Superuser | In PostgreSQL a *superuser* is any role with both `LOGIN` and `SUPERUSER` privileges. For security reasons, Cloud Native PostgreSQL performs administrative tasks by connecting to the `postgres` database as the `postgres` user via `peer` authentication over the local Unix Domain Socket. |
+| Resource | Description |
+| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Instance | A Postgres server process running and listening on a pair "IP address(es)" and "TCP port" (usually 5432). |
+| Primary | A PostgreSQL instance that can accept both read and write operations. |
+| Replica | A PostgreSQL instance replicating from the only primary instance in a cluster and is kept updated by reading a stream of Write-Ahead Log (WAL) records. A replica is also known as *standby* or *secondary* server. PostgreSQL relies on physical streaming replication (async/sync) and file-based log shipping (async). |
+| Hot Standby | PostgreSQL feature that allows a *replica* to accept read-only workloads. |
+| Cluster | To be intended as High Availability (HA) Cluster: a set of PostgreSQL instances made up by a single primary and an optional arbitrary number of replicas. |
+| Replica Cluster | A Cloud Native PostgreSQL `Cluster` that is in continuous recovery mode from a selected PostgreSQL cluster, normally residing outside the Kubernetes cluster. It is a feature that enables multi-cluster deployments in private, public, hybrid, and multi-cloud contexts. |
+| Designated Primary | A PostgreSQL standby instance in a replica cluster that is in continuous recovery from another PostgreSQL cluster and that is designated to become primary in case the replica cluster becomes primary. |
+| Superuser | In PostgreSQL a *superuser* is any role with both `LOGIN` and `SUPERUSER` privileges. For security reasons, Cloud Native PostgreSQL performs administrative tasks by connecting to the `postgres` database as the `postgres` user via `peer` authentication over the local Unix Domain Socket. |
## Cloud terminology
-| Resource | Description |
-|-------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Region | A *region* in the Cloud is an isolated and independent geographic area organized in *availability zones*. Zones within a region have very little round-trip network latency. |
-| Zone | An *availability zone* in the Cloud (also known as *zone*) is an area in a region where resources can be deployed. Usually, an availability zone corresponds to a data center or an isolated building of the same data center. |
+| Resource | Description |
+| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Region | A *region* in the Cloud is an isolated and independent geographic area organized in *availability zones*. Zones within a region have very little round-trip network latency. |
+| Zone | An *availability zone* in the Cloud (also known as *zone*) is an area in a region where resources can be deployed. Usually, an availability zone corresponds to a data center or an isolated building of the same data center. |
## What to do next
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx
index 0b888a4394d..7efd36f4774 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx
@@ -13,9 +13,9 @@ This section describes the options you have to create a new
PostgreSQL cluster and the design rationale behind them.
There are primarily two ways to bootstrap a new cluster:
-- from scratch (`initdb`)
-- from an existing PostgreSQL cluster, either directly (`pg_basebackup`)
- or indirectly (`recovery`)
+- from scratch (`initdb`)
+- from an existing PostgreSQL cluster, either directly (`pg_basebackup`)
+ or indirectly (`recovery`)
!!! Important
Bootstrapping from an existing cluster opens up the possibility
@@ -36,14 +36,14 @@ The *bootstrap* method can be defined in the `bootstrap` section of the cluster
specification.
Cloud Native PostgreSQL currently supports the following bootstrap methods:
-- `initdb`: initialize an empty PostgreSQL cluster (default)
-- `recovery`: create a PostgreSQL cluster by restoring from an existing cluster
- via a backup object store, and replaying all the available WAL files or up to
- a given *point in time*
-- `pg_basebackup`: create a PostgreSQL cluster by cloning an existing one of
- the same major version using `pg_basebackup` via streaming replication protocol -
- useful if you want to migrate databases to Cloud Native PostgreSQL, even
- from outside Kubernetes.
+- `initdb`: initialize an empty PostgreSQL cluster (default)
+- `recovery`: create a PostgreSQL cluster by restoring from an existing cluster
+ via a backup object store, and replaying all the available WAL files or up to
+ a given *point in time*
+- `pg_basebackup`: create a PostgreSQL cluster by cloning an existing one of
+ the same major version using `pg_basebackup` via streaming replication protocol -
+ useful if you want to migrate databases to Cloud Native PostgreSQL, even
+ from outside Kubernetes.
Differently from the `initdb` method, both `recovery` and `pg_basebackup`
create a new cluster based on another one (either offline or online) and can be
@@ -67,14 +67,14 @@ As far as bootstrapping is concerned, `externalClusters` can be used
to define the source PostgreSQL cluster for either the `pg_basebackup`
method or the `recovery` one. An external cluster needs to have:
-- a name that identifies the origin cluster, to be used as a reference via the
- `source` option
-- at least one of the following:
+- a name that identifies the origin cluster, to be used as a reference via the
+ `source` option
+- at least one of the following:
- - information about streaming connection
- - information about the **recovery object store**, which is a Barman Cloud
- compatible object store that contains the backup files of the source
- cluster - that is, base backups and WAL archives.
+ - information about streaming connection
+ - information about the **recovery object store**, which is a Barman Cloud
+ compatible object store that contains the backup files of the source
+ cluster - that is, base backups and WAL archives.
!!! Note
A recovery object store is normally an AWS S3 or an Azure Blob Storage
@@ -125,12 +125,12 @@ spec:
The above example of bootstrap will:
-1. create a new `PGDATA` folder using PostgreSQL's native `initdb` command
-2. set a password for the `postgres` *superuser* from the secret named `superuser-secret`
-3. create an *unprivileged* user named `app`
-4. set the password of the latter (`app`) using the one in the `app-secret`
- secret (make sure that `username` matches the same name of the `owner`)
-5. create a database called `app` owned by the `app` user.
+1. create a new `PGDATA` folder using PostgreSQL's native `initdb` command
+2. set a password for the `postgres` *superuser* from the secret named `superuser-secret`
+3. create an *unprivileged* user named `app`
+4. set the password of the latter (`app`) using the one in the `app-secret`
+ secret (make sure that `username` matches the same name of the `owner`)
+5. create a database called `app` owned by the `app` user.
Thanks to the *convention over configuration paradigm*, you can let the
operator choose a default database name (`app`) and a default application
@@ -290,7 +290,6 @@ by `name` (our recommendation is to use the same `name` of the origin cluster).
`backupObjectStore.serverName` property (by default assigned to the
value of `name` in the external cluster definition).
-
### Bootstrap from a backup (`recovery`)
The `recovery` bootstrap mode lets you create a new cluster from
@@ -298,11 +297,11 @@ an existing backup, namely a *recovery object store*.
There are two ways to achieve this result in Cloud Native PostgreSQL:
-- using a recovery object store, that is a backup of another cluster
- created by Barman Cloud and defined via the `barmanObjectStore` option
- in the `externalClusters` section
-- using an existing `Backup` object in the same namespace (this was the
- only option available before version 1.8.0).
+- using a recovery object store, that is a backup of another cluster
+ created by Barman Cloud and defined via the `barmanObjectStore` option
+ in the `externalClusters` section
+- using an existing `Backup` object in the same namespace (this was the
+ only option available before version 1.8.0).
Both recovery methods enable either full recovery (up to the last
available WAL) or up to a [point in time](#point-in-time-recovery).
@@ -390,18 +389,18 @@ backup that needs to be restored.
Whether you recover from a recovery object store or an existing `Backup`
resource, the following considerations apply:
-- The application database name and the application database user are preserved
-from the backup that is being restored. The operator does not currently attempt
-to back up the underlying secrets, as this is part of the usual maintenance
-activity of the Kubernetes cluster itself.
-- In case you don't supply any `superuserSecret`, a new one is automatically
-generated with a secure and random password. The secret is then used to
-reset the password for the `postgres` user of the cluster.
-- By default, the recovery will continue up to the latest
-available WAL on the default target timeline (`current` for PostgreSQL up to
-11, `latest` for version 12 and above).
-You can optionally specify a `recoveryTarget` to perform a point in time
-recovery (see the ["Point in time recovery" section](#point-in-time-recovery)).
+- The application database name and the application database user are preserved
+ from the backup that is being restored. The operator does not currently attempt
+ to back up the underlying secrets, as this is part of the usual maintenance
+ activity of the Kubernetes cluster itself.
+- In case you don't supply any `superuserSecret`, a new one is automatically
+ generated with a secure and random password. The secret is then used to
+ reset the password for the `postgres` user of the cluster.
+- By default, the recovery will continue up to the latest
+ available WAL on the default target timeline (`current` for PostgreSQL up to
+ 11, `latest` for version 12 and above).
+ You can optionally specify a `recoveryTarget` to perform a point in time
+ recovery (see the ["Point in time recovery" section](#point-in-time-recovery)).
#### Point in time recovery (PITR)
@@ -449,16 +448,16 @@ spec:
Besides `targetTime`, you can use the following criteria to stop the recovery:
-- `targetXID` specify a transaction ID up to which recovery will proceed
+- `targetXID` specify a transaction ID up to which recovery will proceed
-- `targetName` specify a restore point (created with `pg_create_restore_point`
- to which recovery will proceed)
+- `targetName` specify a restore point (created with `pg_create_restore_point`
+ to which recovery will proceed)
-- `targetLSN` specify the LSN of the write-ahead log location up to which
- recovery will proceed
+- `targetLSN` specify the LSN of the write-ahead log location up to which
+ recovery will proceed
-- `targetImmediate` specify to stop as soon as a consistent state is
- reached
+- `targetImmediate` specify to stop as soon as a consistent state is
+ reached
You can choose only a single one among the targets above in each
`recoveryTarget` configuration.
@@ -533,8 +532,8 @@ The streaming replication client on the target instance, which will be
transparently managed by `pg_basebackup`, can authenticate itself on the source
instance in any of the following ways:
-1. via [username/password](#usernamepassword-authentication)
-2. via [TLS client certificate](#tls-certificate-authentication)
+1. via [username/password](#usernamepassword-authentication)
+2. via [TLS client certificate](#tls-certificate-authentication)
The latter is the recommended one if you connect to a source managed
by Cloud Native PostgreSQL or configured for TLS authentication.
@@ -547,19 +546,19 @@ Both cases are explained below.
The following requirements apply to the `pg_basebackup` bootstrap method:
-- target and source must have the same hardware architecture
-- target and source must have the same major PostgreSQL version
-- source must not have any tablespace defined (see ["Current limitations"](#current-limitations) below)
-- source must be configured with enough `max_wal_senders` to grant
- access from the target for this one-off operation by providing at least
- one *walsender* for the backup plus one for WAL streaming
-- the network between source and target must be configured to enable the target
- instance to connect to the PostgreSQL port on the source instance
-- source must have a role with `REPLICATION LOGIN` privileges and must accept
- connections from the target instance for this role in `pg_hba.conf`, preferably
- via TLS (see ["About the replication user"](#about-the-replication-user) below)
-- target must be able to successfully connect to the source PostgreSQL instance
- using a role with `REPLICATION LOGIN` privileges
+- target and source must have the same hardware architecture
+- target and source must have the same major PostgreSQL version
+- source must not have any tablespace defined (see ["Current limitations"](#current-limitations) below)
+- source must be configured with enough `max_wal_senders` to grant
+ access from the target for this one-off operation by providing at least
+ one *walsender* for the backup plus one for WAL streaming
+- the network between source and target must be configured to enable the target
+ instance to connect to the PostgreSQL port on the source instance
+- source must have a role with `REPLICATION LOGIN` privileges and must accept
+ connections from the target instance for this role in `pg_hba.conf`, preferably
+ via TLS (see ["About the replication user"](#about-the-replication-user) below)
+- target must be able to successfully connect to the source PostgreSQL instance
+ using a role with `REPLICATION LOGIN` privileges
!!! Seealso
For further information, please refer to the
@@ -606,10 +605,10 @@ with the `pg_basebackup` bootstrap is based on username and password matching.
Make sure you have the following information before you start the procedure:
-- location of the source instance, identified by a hostname or an IP address
- and a TCP port
-- replication username (`streaming_replica` for simplicity)
-- password
+- location of the source instance, identified by a hostname or an IP address
+ and a TCP port
+- replication username (`streaming_replica` for simplicity)
+- password
You might need to add a line similar to the following to the `pg_hba.conf`
file on the source PostgreSQL instance:
@@ -751,6 +750,6 @@ PostgreSQL's continuous recovery mechanism via Write-Ahead Log (WAL) shipping
by creating a new cluster that is a replica of another PostgreSQL instance.
This will open up two main use cases:
-- replication over different Kubernetes clusters in Cloud Native PostgreSQL
-- *0 cutover time* migrations to Cloud Native PostgreSQL with the `pg_basebackup`
- bootstrap method
\ No newline at end of file
+- replication over different Kubernetes clusters in Cloud Native PostgreSQL
+- *0 cutover time* migrations to Cloud Native PostgreSQL with the `pg_basebackup`
+ bootstrap method
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx
index 9a8227b52c3..519dfbc2d2d 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx
@@ -7,10 +7,10 @@ product: 'Cloud Native Operator'
Cloud Native PostgreSQL has been designed to natively support TLS certificates.
In order to set up a `Cluster`, the operator requires:
-- a server Certification Authority (CA) certificate
-- a server TLS certificate signed by the server Certification Authority
-- a client Certification Authority certificate
-- a streaming replication client certificate generated by the client Certification Authority
+- a server Certification Authority (CA) certificate
+- a server TLS certificate signed by the server Certification Authority
+- a client Certification Authority certificate
+- a streaming replication client certificate generated by the client Certification Authority
!!! Note
You can find all the secrets used by the cluster and their expiration dates
@@ -19,13 +19,13 @@ In order to set up a `Cluster`, the operator requires:
Cloud Native PostgreSQL is very flexible when it comes to TLS certificates, and
primarily operates in two modes:
-1. [**operator managed**](#operator-managed-mode): certificates are internally
- managed by the operator in a fully automated way, and signed using a CA created
- by Cloud Native PostgreSQL
-2. [**user provided**](#user-provided-certificates-mode): certificates are
- generated outside the operator and imported in the cluster definition as
- secrets - Cloud Native PostgreSQL integrates itself with cert-manager (see
- examples below)
+1. [**operator managed**](#operator-managed-mode): certificates are internally
+ managed by the operator in a fully automated way, and signed using a CA created
+ by Cloud Native PostgreSQL
+2. [**user provided**](#user-provided-certificates-mode): certificates are
+ generated outside the operator and imported in the cluster definition as
+ secrets - Cloud Native PostgreSQL integrates itself with cert-manager (see
+ examples below)
You can also choose a hybrid approach, where only part of the certificates is
generated outside CNP.
@@ -43,8 +43,8 @@ automatically.
The operator generates a self-signed CA and stores it in a generic secret
containing the following keys:
-- `ca.crt`: CA certificate used to validate the server certificate, used as `sslrootcert` in clients' connection strings.
-- `ca.key`: the key used to sign Server SSL certificate automatically
+- `ca.crt`: CA certificate used to validate the server certificate, used as `sslrootcert` in clients' connection strings.
+- `ca.key`: the key used to sign Server SSL certificate automatically
#### Server TLS Secret
@@ -83,10 +83,10 @@ using a separate component such as [cert-manager](https://cert-manager.io/). In
order to use a custom server TLS certificate for a Cluster, you must specify
the following parameters:
-- `serverTLSSecret`: the name of a Secret of type `kubernetes.io/tls`,
- containing the server TLS certificate. It must contain both the standard
- `tls.crt` and `tls.key` keys.
-- `serverCASecret`: the name of a Secret containing the `ca.crt` key.
+- `serverTLSSecret`: the name of a Secret of type `kubernetes.io/tls`,
+ containing the server TLS certificate. It must contain both the standard
+ `tls.crt` and `tls.key` keys.
+- `serverCASecret`: the name of a Secret containing the `ca.crt` key.
!!! Note
The operator will still create and manage the two secrets related to client
@@ -103,9 +103,9 @@ See below for a complete example.
Given the following files:
-- `server-ca.crt`: the certificate of the CA that signed the server TLS certificate.
-- `server.crt`: the certificate of the server TLS certificate.
-- `server.key`: the private key of the server TLS certificate.
+- `server-ca.crt`: the certificate of the CA that signed the server TLS certificate.
+- `server.crt`: the certificate of the server TLS certificate.
+- `server.key`: the private key of the server TLS certificate.
Create a secret containing the CA certificate:
@@ -216,11 +216,11 @@ using a separate component such as [cert-manager](https://cert-manager.io/) or
use a custom CA to verify client certificates for a Cluster, you must specify
the following parameters:
-- `replicationTLSSecret`: the name of a Secret of type `kubernetes.io/tls`,
- containing the client certificate for user `streaming_replica`. It must contain
- both the standard `tls.crt` and `tls.key` keys.
-- `clientCASecret`: the name of a Secret containing the `ca.crt` key of the CA
- that should be used to verify client certificate.
+- `replicationTLSSecret`: the name of a Secret of type `kubernetes.io/tls`,
+ containing the client certificate for user `streaming_replica`. It must contain
+ both the standard `tls.crt` and `tls.key` keys.
+- `clientCASecret`: the name of a Secret containing the `ca.crt` key of the CA
+ that should be used to verify client certificate.
!!! Note
The operator will still create and manage the two secrets related to server
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx
index 227c756ea1b..6512d63a5d9 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx
@@ -118,7 +118,9 @@ can start with maintenance work or test a switch-over situation in your cluster
```shell
kubectl cnp promote cluster-example cluster-example-2
```
+
Or you can use the instance node number to promote
+
```shell
kubectl cnp promote cluster-example 2
```
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/container_images.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/container_images.mdx
index eea64d2837f..3208d5bd1bf 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/container_images.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/container_images.mdx
@@ -8,21 +8,21 @@ The Cloud Native PostgreSQL operator for Kubernetes is designed to
work with any compatible container image of PostgreSQL that complies
with the following requirements:
-- PostgreSQL 10+ executables that must be in the path:
- - `initdb`
- - `postgres`
- - `pg_ctl`
- - `pg_controldata`
- - `pg_basebackup`
-- Barman Cloud executables that must be in the path:
- - `barman-cloud-wal-archive`
- - `barman-cloud-wal-restore`
- - `barman-cloud-backup`
- - `barman-cloud-restore`
- - `barman-cloud-backup-list`
-- PGAudit extension installed (optional - only if PGAudit is required
- in the deployed clusters)
-- Sensible locale settings
+- PostgreSQL 10+ executables that must be in the path:
+ - `initdb`
+ - `postgres`
+ - `pg_ctl`
+ - `pg_controldata`
+ - `pg_basebackup`
+- Barman Cloud executables that must be in the path:
+ - `barman-cloud-wal-archive`
+ - `barman-cloud-wal-restore`
+ - `barman-cloud-backup`
+ - `barman-cloud-restore`
+ - `barman-cloud-backup-list`
+- PGAudit extension installed (optional - only if PGAudit is required
+ in the deployed clusters)
+- Sensible locale settings
No entry point and/or command is required in the image definition, as Cloud
Native PostgreSQL overrides it with its instance manager.
@@ -51,11 +51,11 @@ accepted in a Docker tag, preceded by a dot, an underscore, or a minus sign.
Examples of accepted image tags:
-- `9.6.19-alpine`
-- `12.4`
-- `11_1`
-- `13`
-- `12.3.2.1-1`
+- `9.6.19-alpine`
+- `12.4`
+- `11_1`
+- `13`
+- `12.3.2.1-1`
!!! Warning
`latest` is not considered a valid tag for the image.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/credits.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/credits.mdx
index bb3df044411..23649dbe3b5 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/credits.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/credits.mdx
@@ -7,17 +7,17 @@ product: 'Cloud Native Operator'
Cloud Native PostgreSQL (Operator for Kubernetes/OpenShift) has been designed,
developed, and tested by the EnterpriseDB Cloud Native team:
-- Gabriele Bartolini
-- Jonathan Battiato
-- Francesco Canovai
-- Leonardo Cecchi
-- Valerio Del Sarto
-- Niccolò Fei
-- Jonathan Gonzalez
-- Danish Khan
-- Anand Nednur
-- Marco Nenciarini
-- Gabriele Quaresima
-- Philippe Scorsolini
-- Jitendra Wadle
-- Adam Wright
\ No newline at end of file
+- Gabriele Bartolini
+- Jonathan Battiato
+- Francesco Canovai
+- Leonardo Cecchi
+- Valerio Del Sarto
+- Niccolò Fei
+- Jonathan Gonzalez
+- Danish Khan
+- Anand Nednur
+- Marco Nenciarini
+- Gabriele Quaresima
+- Philippe Scorsolini
+- Jitendra Wadle
+- Adam Wright
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx
index be8856ff847..3641ba508b2 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx
@@ -12,54 +12,54 @@ Moreover, the following Kubernetes versions are tested for each commit,
ensuring failure and bugs detection at an early stage of the development
process:
-* 1.22
-* 1.21
-* 1.20
-* 1.19
-* 1.18
-* 1.17
+- 1.22
+- 1.21
+- 1.20
+- 1.19
+- 1.18
+- 1.17
The following PostgreSQL versions are tested:
-* PostgreSQL 14
-* PostgreSQL 13
-* PostgreSQL 12
-* PostgreSQL 11
-* PostgreSQL 10
+- PostgreSQL 14
+- PostgreSQL 13
+- PostgreSQL 12
+- PostgreSQL 11
+- PostgreSQL 10
For each tested version of Kubernetes and PostgreSQL, a Kubernetes
cluster is created using [kind](https://kind.sigs.k8s.io/),
and the following suite of E2E tests are performed on that cluster:
-* Installation of the operator;
-* Creation of a `Cluster`;
-* Usage of a persistent volume for data storage;
-* Connection via services, including read-only;
-* Connection via user-provided server and/or client certificates;
-* Scale-up and scale-down of a `Cluster`;
-* Failover;
-* Switchover;
-* Manage PostgreSQL configuration changes;
-* Rolling updates when changing PostgreSQL images;
-* Backup and ScheduledBackups execution;
-* Backup and ScheduledBackups execution using Barman Cloud on Azure blob storage;
-* Synchronous replication;
-* Restore from backup;
-* Restore from backup using Barman Cloud on Azure blob storage;
-* Pod affinity using `NodeSelector`;
-* Metrics collection;
-* JSON log format;
-* Operator configuration via ConfigMap;
-* Operator pod deletion;
-* Operator pod eviction;
-* Operator upgrade;
-* Operator High Availability;
-* Node drain;
-* Primary endpoint switch in case of failover in less than 10 seconds;
-* Primary endpoint switch in case of switchover in less than 20 seconds;
-* Recover from a degraded state in less than 60 seconds;
-* Physical replica clusters;
-* Storage expansion;
-* Data corruption;
+- Installation of the operator;
+- Creation of a `Cluster`;
+- Usage of a persistent volume for data storage;
+- Connection via services, including read-only;
+- Connection via user-provided server and/or client certificates;
+- Scale-up and scale-down of a `Cluster`;
+- Failover;
+- Switchover;
+- Manage PostgreSQL configuration changes;
+- Rolling updates when changing PostgreSQL images;
+- Backup and ScheduledBackups execution;
+- Backup and ScheduledBackups execution using Barman Cloud on Azure blob storage;
+- Synchronous replication;
+- Restore from backup;
+- Restore from backup using Barman Cloud on Azure blob storage;
+- Pod affinity using `NodeSelector`;
+- Metrics collection;
+- JSON log format;
+- Operator configuration via ConfigMap;
+- Operator pod deletion;
+- Operator pod eviction;
+- Operator upgrade;
+- Operator High Availability;
+- Node drain;
+- Primary endpoint switch in case of failover in less than 10 seconds;
+- Primary endpoint switch in case of switchover in less than 20 seconds;
+- Recover from a degraded state in less than 60 seconds;
+- Physical replica clusters;
+- Storage expansion;
+- Data corruption;
The E2E tests suite is also run for OpenShift versions 4.6 and 4.7.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/evaluation.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/evaluation.mdx
index fd52d7bfda0..7b8dd831405 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/evaluation.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/evaluation.mdx
@@ -35,7 +35,7 @@ EDB Postgres Advanced container images are available at
Once you have received the license key, you can use EDB Postgres Advanced
by setting in the `spec` section of the `Cluster` deployment configuration file:
-- `imageName` to point to the `quay.io/enterprisedb/edb-postgres-advanced` repository
-- `licenseKey` to your license key (in the form of a string)
+- `imageName` to point to the `quay.io/enterprisedb/edb-postgres-advanced` repository
+- `licenseKey` to your license key (in the form of a string)
Please refer to the full example in the [configuration samples](samples.md) section.
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/expose_pg_services.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/expose_pg_services.mdx
index 8d322282301..72ae0e18e87 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/expose_pg_services.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/expose_pg_services.mdx
@@ -35,9 +35,8 @@ for a comprehensive list).
We assume that:
-* the NGINX Ingress controller has been deployed and works correctly
-* it is possible to create a service of type `LoadBalancer` in your cluster
-
+- the NGINX Ingress controller has been deployed and works correctly
+- it is possible to create a service of type `LoadBalancer` in your cluster
!!! Important
Ingresses are only required to expose HTTP and HTTPS traffic. While the NGINX
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/failure_modes.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/failure_modes.mdx
index a6c536394eb..7abcd5640fe 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/failure_modes.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/failure_modes.mdx
@@ -36,8 +36,8 @@ The operator will instantiate one PVC for every PostgreSQL instance to store the
Such storage space is set for reuse in two cases:
-- when the corresponding Pod is deleted by the user (and a new Pod will be recreated)
-- when the corresponding Pod is evicted and scheduled on another node
+- when the corresponding Pod is deleted by the user (and a new Pod will be recreated)
+- when the corresponding Pod is evicted and scheduled on another node
If you want to prevent the operator from reusing a certain PVC you need to
remove the PVC before deleting the Pod. For this purpose, you can use the
@@ -59,11 +59,11 @@ pod "cluster-example-1" deleted
A pod belonging to a `Cluster` can fail in the following ways:
-* the pod is explicitly deleted by the user;
-* the readiness probe on its `postgres` container fails;
-* the liveness probe on its `postgres` container fails;
-* the Kubernetes worker node is drained;
-* the Kubernetes worker node where the pod is scheduled fails.
+- the pod is explicitly deleted by the user;
+- the readiness probe on its `postgres` container fails;
+- the liveness probe on its `postgres` container fails;
+- the Kubernetes worker node is drained;
+- the Kubernetes worker node where the pod is scheduled fails.
Each one of these failures has different effects on the `Cluster` and the
services managed by the operator.
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx
index 859e80c616b..435e245de8b 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx
@@ -109,37 +109,37 @@ format for the following platforms: `linux/amd64`, `linux/arm64`,
The following versions of Postgres are currently supported:
-- PostgreSQL 13, 12, 11 and 10 (`linux/amd64`)
-- EDB Postgres Advanced 13, 12, 11 and 10 (`linux/amd64`, `linux/ppc64le`, `linux/s390x`)
+- PostgreSQL 13, 12, 11 and 10 (`linux/amd64`)
+- EDB Postgres Advanced 13, 12, 11 and 10 (`linux/amd64`, `linux/ppc64le`, `linux/s390x`)
## Main features
-* Direct integration with Kubernetes API server for High Availability,
- without requiring an external tool
-* Self-Healing capability, through:
- * failover of the primary instance by promoting the most aligned replica
- * automated recreation of a replica
-* Planned switchover of the primary instance by promoting a selected replica
-* Scale up/down capabilities
-* Definition of an arbitrary number of instances (minimum 1 - one primary server)
-* Definition of the *read-write* service, to connect your applications to the only primary server of the cluster
-* Definition of the *read-only* service, to connect your applications to any of the instances for reading workloads
-* Support for Local Persistent Volumes with PVC templates
-* Reuse of Persistent Volumes storage in Pods
-* Rolling updates for PostgreSQL minor versions and operator upgrades
-* TLS connections and client certificate authentication
-* Support for custom TLS certificates (including integration with cert-manager)
-* Continuous backup to an S3 compatible object store
-* Full recovery and Point-In-Time recovery from an S3 compatible object store backup
-* Replica clusters for PostgreSQL deployments across multiple Kubernetes
- clusters, enabling private, public, hybrid, and multi-cloud architectures
-* Support for Synchronous Replicas
-* Support for node affinity via `nodeSelector`
-* Native customizable exporter of user defined metrics for Prometheus through the `metrics` port (9187)
-* Standard output logging of PostgreSQL error messages in JSON format
-* Support for the `restricted` security context constraint (SCC) in Red Hat OpenShift
-* `cnp` plugin for `kubectl`
-* Multi-arch format container images
+- Direct integration with Kubernetes API server for High Availability,
+ without requiring an external tool
+- Self-Healing capability, through:
+ - failover of the primary instance by promoting the most aligned replica
+ - automated recreation of a replica
+- Planned switchover of the primary instance by promoting a selected replica
+- Scale up/down capabilities
+- Definition of an arbitrary number of instances (minimum 1 - one primary server)
+- Definition of the *read-write* service, to connect your applications to the only primary server of the cluster
+- Definition of the *read-only* service, to connect your applications to any of the instances for reading workloads
+- Support for Local Persistent Volumes with PVC templates
+- Reuse of Persistent Volumes storage in Pods
+- Rolling updates for PostgreSQL minor versions and operator upgrades
+- TLS connections and client certificate authentication
+- Support for custom TLS certificates (including integration with cert-manager)
+- Continuous backup to an S3 compatible object store
+- Full recovery and Point-In-Time recovery from an S3 compatible object store backup
+- Replica clusters for PostgreSQL deployments across multiple Kubernetes
+ clusters, enabling private, public, hybrid, and multi-cloud architectures
+- Support for Synchronous Replicas
+- Support for node affinity via `nodeSelector`
+- Native customizable exporter of user defined metrics for Prometheus through the `metrics` port (9187)
+- Standard output logging of PostgreSQL error messages in JSON format
+- Support for the `restricted` security context constraint (SCC) in Red Hat OpenShift
+- `cnp` plugin for `kubectl`
+- Multi-arch format container images
## About this guide
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx
index 894f1b9993b..924b89b7915 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx
@@ -122,8 +122,8 @@ selected installation method.
Upgrading Cloud Native PostgreSQL operator is a two-step process:
-1. upgrade the controller and the related Kubernetes resources
-2. upgrade the instance manager running in every PostgreSQL pod
+1. upgrade the controller and the related Kubernetes resources
+2. upgrade the instance manager running in every PostgreSQL pod
Unless differently stated in the release notes, the first step is normally done
by applying the manifest of the newer version for plain Kubernetes
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/kubernetes_upgrade.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/kubernetes_upgrade.mdx
index b9dd802af2d..c5d33a2c701 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/kubernetes_upgrade.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/kubernetes_upgrade.mdx
@@ -24,9 +24,9 @@ the cluster to the latest version of Kubernetes.
Usually, maintenance operations in a cluster are performed one node
at a time by:
-1. evicting the workloads from the node to be updated (`drain`)
-2. performing the actual operation (for example, system update)
-3. re-joining the node to the cluster (`uncordon`)
+1. evicting the workloads from the node to be updated (`drain`)
+2. performing the actual operation (for example, system update)
+3. re-joining the node to the cluster (`uncordon`)
The above process requires workloads to be either stopped for the
entire duration of the upgrade or migrated to another node.
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/labels_annotations.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/labels_annotations.mdx
index 17d8854a62b..a211a89e5b7 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/labels_annotations.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/labels_annotations.mdx
@@ -16,10 +16,10 @@ can be linked together and put in relationship through **labels** and
In short:
-- an annotation is used to assign additional non-identifying information to
- resources with the goal to facilitate integration with external tools
-- a label is used to group objects and query them through Kubernetes' native
- selector capability
+- an annotation is used to assign additional non-identifying information to
+ resources with the goal to facilitate integration with external tools
+- a label is used to group objects and query them through Kubernetes' native
+ selector capability
You can select one or more labels and/or annotations you will use
in your Cloud Native PostgreSQL deployments. Then you need to configure the operator
@@ -39,8 +39,8 @@ instructions provided in the ["Operator configuration"](operator_conf.md) sectio
Below we will continue on that example and limit it to the following:
-- annotations: `categories`
-- labels: `app`, `environment`, and `workload`
+- annotations: `categories`
+- labels: `app`, `environment`, and `workload`
!!! Note
Feel free to select the names that most suit your context for both
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx
index e4e4428cfa3..75f7a12fa6b 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx
@@ -9,12 +9,12 @@ including PostgreSQL logs.
Each log entry has the following fields:
-- `level`: log level (`info`, `notice`, ...)
-- `ts`: the timestamp (epoch with microseconds)
-- `logger`: the type of the record (e.g. `postgres` or `pg_controldata`)
-- `msg`: the actual message or the keyword `record` in case the message is parsed in JSON format
-- `record`: the actual record (with structure that varies depending on the
- `logger` type)
+- `level`: log level (`info`, `notice`, ...)
+- `ts`: the timestamp (epoch with microseconds)
+- `logger`: the type of the record (e.g. `postgres` or `pg_controldata`)
+- `msg`: the actual message or the keyword `record` in case the message is parsed in JSON format
+- `record`: the actual record (with structure that varies depending on the
+ `logger` type)
## Operator log
@@ -118,11 +118,11 @@ spec:
The audit CSV logs entries returned by PGAudit are then parsed and routed to
stdout in JSON format, similarly to all the remaining logs:
-- `.logger` is set to `pgaudit`
-- `.msg` is set to `record`
-- `.record` contains the whole parsed record as a JSON object, similar to
- `logging_collector` logs - except for `.record.audit`, which contains the
- PGAudit CSV message formatted as a JSON object
+- `.logger` is set to `pgaudit`
+- `.msg` is set to `record`
+- `.record` contains the whole parsed record as a JSON object, similar to
+ `logging_collector` logs - except for `.record.audit`, which contains the
+ PGAudit CSV message formatted as a JSON object
See the example below:
@@ -205,9 +205,9 @@ Other parameters can be passed via `.spec.postgresql.parameters` as usual.
The audit CSV logs are parsed and routed to stdout in JSON format, similarly to all the remaining logs:
-- `.logger` set to `edb_audit`
-- `.msg` set to `record`
-- `.record` containing the whole parsed record as a JSON object
+- `.logger` set to `edb_audit`
+- `.msg` set to `record`
+- `.record` containing the whole parsed record as a JSON object
See the example below:
@@ -258,18 +258,18 @@ All logs that are produced by the operator and its instances are in JSON
format, with `logger` set accordingly to the process that produced them.
Therefore, all the possible `logger` values are the following ones:
-- `barman-cloud-wal-archive`: from `barman-cloud-wal-archive` directly
-- `barman-cloud-wal-restore`: from `barman-cloud-wal-restore` directly
-- `edb_audit`: from the EDB Audit extension
-- `initdb`: from running `initdb`
-- `pg_basebackup`: from running `pg_basebackup`
-- `pg_controldata`: from running `pg_controldata`
-- `pg_ctl`: from running any `pg_ctl` subcommand
-- `pg_rewind`: from running `pg_rewind`
-- `pgaudit`: from PGAudit extension
-- `postgres`: from the `postgres` instance (having `msg` different than `record`)
-- `wal-archive`: from the `wal-archive` subcommand of the instance manager
-- `wal-restore`: from the `wal-restore` subcommand of the instance manager
+- `barman-cloud-wal-archive`: from `barman-cloud-wal-archive` directly
+- `barman-cloud-wal-restore`: from `barman-cloud-wal-restore` directly
+- `edb_audit`: from the EDB Audit extension
+- `initdb`: from running `initdb`
+- `pg_basebackup`: from running `pg_basebackup`
+- `pg_controldata`: from running `pg_controldata`
+- `pg_ctl`: from running any `pg_ctl` subcommand
+- `pg_rewind`: from running `pg_rewind`
+- `pgaudit`: from PGAudit extension
+- `postgres`: from the `postgres` instance (having `msg` different than `record`)
+- `wal-archive`: from the `wal-archive` subcommand of the instance manager
+- `wal-restore`: from the `wal-restore` subcommand of the instance manager
Except for `postgres` and `edb_audit` that have the aforementioned structures,
all other possible values just have `msg` set to the escaped message that is
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx
index a848161f4a2..7eddfda6c58 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx
@@ -21,10 +21,10 @@ curl http://:9187/metrics
All monitoring queries that are performed on PostgreSQL are:
-- transactionally atomic (one transaction per query)
-- executed with the `pg_monitor` role
-- executed with `application_name` set to `cnp_metrics_exporter`
-- executed as user `postgres`
+- transactionally atomic (one transaction per query)
+- executed with the `pg_monitor` role
+- executed with `application_name` set to `cnp_metrics_exporter`
+- executed as user `postgres`
Please refer to the "Default roles" section in PostgreSQL
[documentation](https://www.postgresql.org/docs/current/default-roles.html)
@@ -34,9 +34,9 @@ Queries, by default, are run against the *main database*, as defined by
the specified `bootstrap` method of the `Cluster` resource, according
to the following logic:
-- using `initdb`: queries will be run against the specified database by default, so the
- value passed as `initdb.database` or defaulting to `app` if not specified.
-- not using `initdb`: queries will run against the `postgres` database, by default.
+- using `initdb`: queries will be run against the specified database by default, so the
+ value passed as `initdb.database` or defaulting to `app` if not specified.
+- not using `initdb`: queries will run against the `postgres` database, by default.
The default database can always be overridden for a given user-defined metric,
by specifying a list of one or more databases in the `target_databases` option.
@@ -70,16 +70,16 @@ spec:
Every PostgreSQL instance exporter automatically exposes a set of predefined
metrics, which can be classified in two major categories:
-- PostgreSQL related metrics, starting with `cnp_collector_*`, including:
+- PostgreSQL related metrics, starting with `cnp_collector_*`, including:
- - number of WAL files and total size on disk
- - number of `.ready` and `.done` files in the archive status folder
- - requested minimum and maximum number of synchronous replicas, as well as
- the expected and actually observed values
- - flag indicating if replica cluster mode is enabled or disabled
- - flag indicating if a manual switchover is required
+ - number of WAL files and total size on disk
+ - number of `.ready` and `.done` files in the archive status folder
+ - requested minimum and maximum number of synchronous replicas, as well as
+ the expected and actually observed values
+ - flag indicating if replica cluster mode is enabled or disabled
+ - flag indicating if a manual switchover is required
-- Go runtime related metrics, starting with `go_*`
+- Go runtime related metrics, starting with `go_*`
Below is a sample of the metrics returned by the `localhost:9187/metrics`
endpoint of an instance. As you can see, the Prometheus format is
@@ -250,6 +250,7 @@ go_memstats_sys_bytes 7.6891144e+07
# TYPE go_threads gauge
go_threads 18
```
+
### User defined metrics
This feature is currently in *beta* state and the format is inspired by the
@@ -415,33 +416,32 @@ Every custom query has the following basic structure:
Here is a short description of all the available fields:
-- ``: the name of the Prometheus metric
- - `query`: the SQL query to run on the target database to generate the metrics
- - `primary`: whether to run the query only on the primary instance
- - `master`: same as `primary` (for compatibility with the Prometheus PostgreSQL exporter's syntax - deprecated)
- - `runonserver`: a semantic version range to limit the versions of PostgreSQL the query should run on
- (e.g. `">=10.0.0"` or `">=12.0.0 <=14.0.0"`)
- - `target_databases`: a list of databases to run the `query` against,
- or a [shell-like pattern](#example-of-a-user-defined-metric-running-on-multiple-databases)
- to enable auto discovery. Overwrites the default database if provided.
- - `metrics`: section containing a list of all exported columns, defined as follows:
- - ``: the name of the column returned by the query
- - `usage`: one of the values described below
- - `description`: the metric's description
- - `metrics_mapping`: the optional column mapping when `usage` is set to `MAPPEDMETRIC`
+- ``: the name of the Prometheus metric
+ - `query`: the SQL query to run on the target database to generate the metrics
+ - `primary`: whether to run the query only on the primary instance
+ - `master`: same as `primary` (for compatibility with the Prometheus PostgreSQL exporter's syntax - deprecated)
+ - `runonserver`: a semantic version range to limit the versions of PostgreSQL the query should run on
+ (e.g. `">=10.0.0"` or `">=12.0.0 <=14.0.0"`)
+ - `target_databases`: a list of databases to run the `query` against,
+ or a [shell-like pattern](#example-of-a-user-defined-metric-running-on-multiple-databases)
+ to enable auto discovery. Overwrites the default database if provided.
+ - `metrics`: section containing a list of all exported columns, defined as follows:
+ - ``: the name of the column returned by the query
+ - `usage`: one of the values described below
+ - `description`: the metric's description
+ - `metrics_mapping`: the optional column mapping when `usage` is set to `MAPPEDMETRIC`
The possible values for `usage` are:
-| Column Usage Label | Description |
-|:--------------------|:---------------------------------------------------------|
-| `DISCARD` | this column should be ignored |
-| `LABEL` | use this column as a label |
-| `COUNTER` | use this column as a counter |
-| `GAUGE` | use this column as a gauge |
-| `MAPPEDMETRIC` | use this column with the supplied mapping of text values |
-| `DURATION` | use this column as a text duration (in milliseconds) |
-| `HISTOGRAM` | use this column as a histogram |
-
+| Column Usage Label | Description |
+| :----------------- | :------------------------------------------------------- |
+| `DISCARD` | this column should be ignored |
+| `LABEL` | use this column as a label |
+| `COUNTER` | use this column as a counter |
+| `GAUGE` | use this column as a gauge |
+| `MAPPEDMETRIC` | use this column with the supplied mapping of text values |
+| `DURATION` | use this column as a text duration (in milliseconds) |
+| `HISTOGRAM` | use this column as a histogram |
Please visit the ["Metric Types" page](https://prometheus.io/docs/concepts/metric_types/)
from the Prometheus documentation for more information.
@@ -458,7 +458,6 @@ cnp__{= ... } :@sha256:` format, for more deterministic and
- repeatable deployments
+- Introduce the `pg_basebackup` bootstrap method to create a new PostgreSQL
+ cluster as a copy of an existing PostgreSQL instance of the same major
+ version, even outside Kubernetes
+- Add support for Kubernetes’ tolerations in the `Affinity` section of the
+ `Cluster` resource, allowing users to distribute PostgreSQL instances on
+ Kubernetes nodes with the required taint
+- Enable specification of a digest to an image name, through the
+ `:@sha256:` format, for more deterministic and
+ repeatable deployments
Security Enhancements:
-- Customize TLS certificates to authenticate the PostgreSQL server by defining
- secrets for the server certificate and the related Certification Authority
- that signed it
-- Raise the `sslmode` for the WAL receiver process of internal and
- automatically managed streaming replicas from `require` to `verify-ca`
+- Customize TLS certificates to authenticate the PostgreSQL server by defining
+ secrets for the server certificate and the related Certification Authority
+ that signed it
+- Raise the `sslmode` for the WAL receiver process of internal and
+ automatically managed streaming replicas from `require` to `verify-ca`
Changes:
-- Enhance the `promote` subcommand of the `cnp` plugin for `kubectl` to accept
- just the node number rather than the whole name of the pod
-- Adopt DNS-1035 validation scheme for cluster names (from which service names
- are inherited)
-- Enforce streaming replication connection when cloning a standby instance or
- when bootstrapping using the `pg_basebackup` method
-- Integrate the `Backup` resource with `beginWal`, `endWal`, `beginLSN`,
- `endLSN`, `startedAt` and `stoppedAt` regarding the physical base backup
-- Documentation improvements:
- - Provide a list of ports exposed by the operator and the operand container
- - Introduce the `cnp-bench` helm charts and guidelines for benchmarking the
- storage and PostgreSQL for database workloads
-- E2E tests enhancements:
- - Test Kubernetes 1.21
- - Add test for High Availability of the operator
- - Add test for node draining
-- Minor bug fixes, including:
- - Timeout to pg_ctl start during recovery operations too short
- - Operator not watching over direct events on PVCs
- - Fix handling of `immediateCheckpoint` and `jobs` parameter in
- `barmanObjectStore` backups
- - Empty logs when recovering from a backup
+- Enhance the `promote` subcommand of the `cnp` plugin for `kubectl` to accept
+ just the node number rather than the whole name of the pod
+- Adopt DNS-1035 validation scheme for cluster names (from which service names
+ are inherited)
+- Enforce streaming replication connection when cloning a standby instance or
+ when bootstrapping using the `pg_basebackup` method
+- Integrate the `Backup` resource with `beginWal`, `endWal`, `beginLSN`,
+ `endLSN`, `startedAt` and `stoppedAt` regarding the physical base backup
+- Documentation improvements:
+ - Provide a list of ports exposed by the operator and the operand container
+ - Introduce the `cnp-bench` helm charts and guidelines for benchmarking the
+ storage and PostgreSQL for database workloads
+- E2E tests enhancements:
+ - Test Kubernetes 1.21
+ - Add test for High Availability of the operator
+ - Add test for node draining
+- Minor bug fixes, including:
+ - Timeout to pg_ctl start during recovery operations too short
+ - Operator not watching over direct events on PVCs
+ - Fix handling of `immediateCheckpoint` and `jobs` parameter in
+ `barmanObjectStore` backups
+ - Empty logs when recovering from a backup
## Version 1.4.0
@@ -286,39 +301,39 @@ Changes:
Features:
-- Standard output logging of PostgreSQL error messages in JSON format
-- Provide a basic set of PostgreSQL metrics for the Prometheus exporter
-- Add the `restart` command to the `cnp` plugin for `kubectl` to restart
- the pods of a given PostgreSQL cluster in a rollout fashion
+- Standard output logging of PostgreSQL error messages in JSON format
+- Provide a basic set of PostgreSQL metrics for the Prometheus exporter
+- Add the `restart` command to the `cnp` plugin for `kubectl` to restart
+ the pods of a given PostgreSQL cluster in a rollout fashion
Security Enhancements:
-- Set `readOnlyRootFilesystem` security context for pods
+- Set `readOnlyRootFilesystem` security context for pods
Changes:
-- **IMPORTANT:** If you have previously deployed the Cloud Native PostgreSQL
- operator using the YAML manifest, you must delete the existing operator
- deployment before installing the new version. This is required to avoid
- conflicts with other Kubernetes API's due to a change in labels
- and label selectors being directly managed by the operator. Please refer to
- the Cloud Native PostgreSQL documentation for additional detail on upgrading
- to 1.4.0
-- Fix the labels that are automatically defined by the operator, renaming them
- from `control-plane: controller-manager` to
- `app.kubernetes.io/name: cloud-native-postgresql`
-- Assign the `metrics` name to the TCP port for the Prometheus exporter
-- Set `cnp_metrics_exporter` as the `application_name` to the metrics exporter
- connection in PostgreSQL
-- When available, use the application database for monitoring queries of the
- Prometheus exporter instead of the `postgres` database
-- Documentation improvements:
- - Customization of monitoring queries
- - Operator upgrade instructions
-- E2E tests enhancements
-- Minor bug fixes, including:
- - Avoid using `-R` when calling `pg_basebackup`
- - Remove stack trace from error log when getting the status
+- **IMPORTANT:** If you have previously deployed the Cloud Native PostgreSQL
+ operator using the YAML manifest, you must delete the existing operator
+ deployment before installing the new version. This is required to avoid
+ conflicts with other Kubernetes API's due to a change in labels
+ and label selectors being directly managed by the operator. Please refer to
+ the Cloud Native PostgreSQL documentation for additional detail on upgrading
+ to 1.4.0
+- Fix the labels that are automatically defined by the operator, renaming them
+ from `control-plane: controller-manager` to
+ `app.kubernetes.io/name: cloud-native-postgresql`
+- Assign the `metrics` name to the TCP port for the Prometheus exporter
+- Set `cnp_metrics_exporter` as the `application_name` to the metrics exporter
+ connection in PostgreSQL
+- When available, use the application database for monitoring queries of the
+ Prometheus exporter instead of the `postgres` database
+- Documentation improvements:
+ - Customization of monitoring queries
+ - Operator upgrade instructions
+- E2E tests enhancements
+- Minor bug fixes, including:
+ - Avoid using `-R` when calling `pg_basebackup`
+ - Remove stack trace from error log when getting the status
## Version 1.3.0
@@ -326,62 +341,62 @@ Changes:
Features:
-- Inheritance of labels and annotations
-- Set resource limits for every container
+- Inheritance of labels and annotations
+- Set resource limits for every container
Security Enhancements:
-- Support for restricted security context constraint on RedHat OpenShift to
- limit pod execution to a namespace allocated UID and SELinux context
-- Pod security contexts explicitly defined by the operator to run as
- non-root, non-privileged and without privilege escalation
+- Support for restricted security context constraint on RedHat OpenShift to
+ limit pod execution to a namespace allocated UID and SELinux context
+- Pod security contexts explicitly defined by the operator to run as
+ non-root, non-privileged and without privilege escalation
Changes:
-- Prometheus exporter endpoint listening on port 9187 (port 8000 is now
- reserved to instance coordination with API server)
-- Documentation improvements
-- E2E tests enhancements, including GKE environment
-- Minor bug fixes
+- Prometheus exporter endpoint listening on port 9187 (port 8000 is now
+ reserved to instance coordination with API server)
+- Documentation improvements
+- E2E tests enhancements, including GKE environment
+- Minor bug fixes
## Version 1.2.1
**Release date:** 6 Apr 2021
-- ScheduledBackup are no longer owners of the Backups, meaning that backups
- are not removed when ScheduledBackup objects are deleted
-- Update on ubi8-minimal image to solve RHSA-2021:1024 (Security Advisory: Important)
+- ScheduledBackup are no longer owners of the Backups, meaning that backups
+ are not removed when ScheduledBackup objects are deleted
+- Update on ubi8-minimal image to solve RHSA-2021:1024 (Security Advisory: Important)
## Version 1.2.0
**Release date:** 31 Mar 2021
-- Introduce experimental support for custom monitoring queries as ConfigMap and
- Secret objects using a compatible syntax with `postgres_exporter` for Prometheus
-- Support Operator Lifecycle Manager (OLM) deployments, with the subsequent
- presence on OperatorHub.io
-- Expand license key support for company-wide usage (previous restrictions limited only to a single cluster namespace)
-- Enhance container security by applying guidelines from the US Department of
- Defense (DoD)'s Defense Information Systems Agency (DISA) and the Center for
- Internet Security (CIS) and verifying them directly in the pipeline with
- Dockle
-- Improve E2E tests on AKS
-- Minor bug fixes
+- Introduce experimental support for custom monitoring queries as ConfigMap and
+ Secret objects using a compatible syntax with `postgres_exporter` for Prometheus
+- Support Operator Lifecycle Manager (OLM) deployments, with the subsequent
+ presence on OperatorHub.io
+- Expand license key support for company-wide usage (previous restrictions limited only to a single cluster namespace)
+- Enhance container security by applying guidelines from the US Department of
+ Defense (DoD)'s Defense Information Systems Agency (DISA) and the Center for
+ Internet Security (CIS) and verifying them directly in the pipeline with
+ Dockle
+- Improve E2E tests on AKS
+- Minor bug fixes
-## Version 1.1.0
+\## Version 1.1.0
**Release date:** 3 Mar 2021
-- Add `kubectl cnp status` to pretty-print the status of a cluster, including
- JSON and YAML output
-- Add `kubectl cnp certificate` to enable TLS authentication for client applications
-- Add the `-ro` service to route connections to the available hot
- standby replicas only, enabling offload of read-only queries from
- the cluster's primary instance
-- Rollback scaling down a cluster to a value lower than `maxSyncReplicas`
-- Request a checkpoint before demoting a former primary
-- Send `SIGINT` signal (fast shutdown) to PostgreSQL process on `SIGTERM`
-- Minor bug fixes
+- Add `kubectl cnp status` to pretty-print the status of a cluster, including
+ JSON and YAML output
+- Add `kubectl cnp certificate` to enable TLS authentication for client applications
+- Add the `-ro` service to route connections to the available hot
+ standby replicas only, enabling offload of read-only queries from
+ the cluster's primary instance
+- Rollback scaling down a cluster to a value lower than `maxSyncReplicas`
+- Request a checkpoint before demoting a former primary
+- Send `SIGINT` signal (fast shutdown) to PostgreSQL process on `SIGTERM`
+- Minor bug fixes
## Version 1.0.0
@@ -392,24 +407,24 @@ The first major stable release of Cloud Native PostgreSQL implements `Cluster`,
It uses these resources to create and manage PostgreSQL clusters inside
Kubernetes with the following main capabilities:
-- Direct integration with Kubernetes API server for High Availability, without
- requiring an external tool
-- Self-Healing capability, through:
- - failover of the primary instance by promoting the most aligned replica
- - automated recreation of a replica
-- Planned switchover of the primary instance by promoting a selected replica
-- Scale up/down capabilities
-- Definition of an arbitrary number of instances (minimum 1 - one primary server)
-- Definition of the *read-write* service to connect your applications to the
- only primary server of the cluster
-- Definition of the *read* service to connect your applications to any of the
- instances for reading workloads
-- Support for Local Persistent Volumes with PVC templates
-- Reuse of Persistent Volumes storage in Pods
-- Rolling updates for PostgreSQL minor versions and operator upgrades
-- TLS connections and client certificate authentication
-- Continuous backup to an S3 compatible object store
-- Full recovery and point-in-time recovery from an S3 compatible object store backup
-- Support for synchronous replicas
-- Support for node affinity via `nodeSelector` property
-- Standard output logging of PostgreSQL error messages
\ No newline at end of file
+- Direct integration with Kubernetes API server for High Availability, without
+ requiring an external tool
+- Self-Healing capability, through:
+ - failover of the primary instance by promoting the most aligned replica
+ - automated recreation of a replica
+- Planned switchover of the primary instance by promoting a selected replica
+- Scale up/down capabilities
+- Definition of an arbitrary number of instances (minimum 1 - one primary server)
+- Definition of the *read-write* service to connect your applications to the
+ only primary server of the cluster
+- Definition of the *read* service to connect your applications to any of the
+ instances for reading workloads
+- Support for Local Persistent Volumes with PVC templates
+- Reuse of Persistent Volumes storage in Pods
+- Rolling updates for PostgreSQL minor versions and operator upgrades
+- TLS connections and client certificate authentication
+- Continuous backup to an S3 compatible object store
+- Full recovery and point-in-time recovery from an S3 compatible object store backup
+- Support for synchronous replicas
+- Support for node affinity via `nodeSelector` property
+- Standard output logging of PostgreSQL error messages
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/replication.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/replication.mdx
index 6ff6f77c853..0b850013ccf 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/replication.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/replication.mdx
@@ -78,7 +78,6 @@ hostssl replication streaming_replica all cert
to the ["Certificates" section](certificates.md#client-streaming_replica-certificate)
in the documentation.
-
### Continuous backup integration
In case continuous backup is configured in the cluster, Cloud Native PostgreSQL
@@ -107,9 +106,9 @@ ANY q (pod1, pod2, ...)
Where:
-- `q` is an integer automatically calculated by the operator to be:
- `1 <= minSyncReplicas <= q <= maxSyncReplicas <= readyReplicas`
-- `pod1, pod2, ...` is the list of all PostgreSQL pods in the cluster
+- `q` is an integer automatically calculated by the operator to be:
+ `1 <= minSyncReplicas <= q <= maxSyncReplicas <= readyReplicas`
+- `pod1, pod2, ...` is the list of all PostgreSQL pods in the cluster
!!! Warning
To provide self-healing capabilities, the operator has the power
@@ -140,14 +139,14 @@ can be a primary cluster or another replica cluster (cascading replica cluster).
The available options in terms of replication, both at bootstrap and continuous
recovery level, are:
-- use streaming replication between the replica cluster and the source
- (this will certainly require some administrative and security related
- work to be done to make sure that the network connection between the
- two clusters is correctly setup)
-- use a Barman Cloud object store for recovery of the base backups and
- the WAL files that are regularly shipped from the source to the object
- store and pulled by `barman-cloud-wal-restore` in the replica cluster
-- any of the two
+- use streaming replication between the replica cluster and the source
+ (this will certainly require some administrative and security related
+ work to be done to make sure that the network connection between the
+ two clusters is correctly setup)
+- use a Barman Cloud object store for recovery of the base backups and
+ the WAL files that are regularly shipped from the source to the object
+ store and pulled by `barman-cloud-wal-restore` in the replica cluster
+- any of the two
All you have to do is actually define an external cluster.
Please refer to the ["Bootstrap" section](bootstrap.md#bootstrap-from-another-cluster)
@@ -156,18 +155,18 @@ for information on how to clone a PostgreSQL server using either
If the external cluster contains a `barmanObjectStore` section:
-- you'll be able to boostrap the replica cluster from an object store
- using the `recovery` section
-- Cloud Native PostgreSQL will automatically set the `restore_command`
- in the designated primary instance
+- you'll be able to boostrap the replica cluster from an object store
+ using the `recovery` section
+- Cloud Native PostgreSQL will automatically set the `restore_command`
+ in the designated primary instance
If the external cluster contains a `connectionParameters` section:
-- you'll be able to boostrap the replica cluster via streaming replication
- using the `pg_basebackup` section
-- Cloud Native PostgreSQL will automatically set the `primary_conninfo`
- option in the designated primary instance, so that a WAL receiver
- process is started to connect to the source cluster and receive data
+- you'll be able to boostrap the replica cluster via streaming replication
+ using the `pg_basebackup` section
+- Cloud Native PostgreSQL will automatically set the `primary_conninfo`
+ option in the designated primary instance, so that a WAL receiver
+ process is started to connect to the source cluster and receive data
The created replica cluster can perform backups in a reserved object store from
the designated primary, enabling symmetric architectures in a distributed
@@ -176,10 +175,10 @@ fashion.
You have full flexibility and freedom to decide your favourite
distributed architecture for a PostgreSQL database, by choosing:
-- a private cloud spanning over multiple Kubernetes clusters in different data
- centers
-- a public cloud spanning over multiple Kubernetes clusters in different
- regions
-- a mix of the previous two (hybrid)
-- a public cloud spanning over multiple Kubernetes clusters in different
- regions and on different Cloud Service Providers
\ No newline at end of file
+- a private cloud spanning over multiple Kubernetes clusters in different data
+ centers
+- a public cloud spanning over multiple Kubernetes clusters in different
+ regions
+- a mix of the previous two (hybrid)
+- a public cloud spanning over multiple Kubernetes clusters in different
+ regions and on different Cloud Service Providers
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx
index daec60d53cf..7f43a6e79ac 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx
@@ -10,8 +10,8 @@ they might be allowed to use as much CPU and RAM as needed.
Cloud Native PostgreSQL allows administrators to control and manage resource usage by the pods of the cluster,
through the `resources` section of the manifest, with two knobs:
-- `requests`: initial requirement
-- `limits`: maximum usage, in case of dynamic increase of resource needs
+- `requests`: initial requirement
+- `limits`: maximum usage, in case of dynamic increase of resource needs
For example, you can request an initial amount of RAM of 32MiB (scalable to 128MiB) and 50m of CPU (scalable to 100m)
as follows:
@@ -34,9 +34,9 @@ available memory to satisfy the pod's memory request.
For each resource, we divide containers into 3 Quality of Service (QoS) classes, in decreasing order of priority:
-- *Guaranteed*
-- *Burstable*
-- *Best-Effort*
+- *Guaranteed*
+- *Burstable*
+- *Best-Effort*
For more details, please refer to the ["Configure Quality of Service for Pods"](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#qos-classes)
section in the Kubernetes documentation.
@@ -46,16 +46,16 @@ For a PostgreSQL workload it is recommended to set a "Guaranteed" QoS.
To avoid resources related issues in Kubernetes, we can refer to the best practices for "out of resource" handling
while creating a cluster:
-- Specify your required values for memory and CPU in the resources section of the manifest file.
- This way, you can avoid the `OOM Killed` (where "OOM" stands for Out Of Memory) and `CPU throttle` or any other
- resource-related issues on running instances.
-- For your cluster's pods to get assigned to the "Guaranteed" QoS class, you must set limits and requests
- for both memory and CPU to the same value.
-- Specify your required PostgreSQL memory parameters consistently with the pod resources (as you would do
- in a VM or physical machine scenario - see below).
-- Set up database server pods on a dedicated node using nodeSelector.
- See the "nodeSelector" and "tolerations" fields of the
- [“affinityconfiguration"](api_reference.md#affinityconfiguration) resource on the API reference page.
+- Specify your required values for memory and CPU in the resources section of the manifest file.
+ This way, you can avoid the `OOM Killed` (where "OOM" stands for Out Of Memory) and `CPU throttle` or any other
+ resource-related issues on running instances.
+- For your cluster's pods to get assigned to the "Guaranteed" QoS class, you must set limits and requests
+ for both memory and CPU to the same value.
+- Specify your required PostgreSQL memory parameters consistently with the pod resources (as you would do
+ in a VM or physical machine scenario - see below).
+- Set up database server pods on a dedicated node using nodeSelector.
+ See the "nodeSelector" and "tolerations" fields of the
+ [“affinityconfiguration"](api_reference.md#affinityconfiguration) resource on the API reference page.
You can refer to the following example manifest:
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/rolling_update.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/rolling_update.mdx
index 60d33f862e5..5a5ec050b5b 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/rolling_update.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/rolling_update.mdx
@@ -12,13 +12,13 @@ applications are running against it.
Rolling upgrades are started when:
-- the user changes the `imageName` attribute of the cluster specification;
+- the user changes the `imageName` attribute of the cluster specification;
-- after the operator is updated, to ensure the Pods run the latest instance
- manager;
+- after the operator is updated, to ensure the Pods run the latest instance
+ manager;
-- when a change in the PostgreSQL configuration requires a restart to be
- applied.
+- when a change in the PostgreSQL configuration requires a restart to be
+ applied.
The operator starts upgrading all the replicas, one Pod at a time, starting
from the one with the highest serial.
@@ -26,15 +26,15 @@ from the one with the highest serial.
The primary is the last node to be upgraded. This operation is configurable and
managed by the `primaryUpdateStrategy` option, accepting these two values:
-- `unsupervised`: the rolling update process is managed by Kubernetes
- and is entirely automated, with the *switchover* operation
- starting once all the replicas have been upgraded
-- `supervised`: the rolling update process is suspended immediately
- after all replicas have been upgraded and can only be completed
- with a manual switchover triggered by an administrator with
- `kubectl cnp promote [cluster] [pod]`. The plugin can be downloaded from the
- [`kubectl-cnp` project page](https://github.com/EnterpriseDB/kubectl-cnp)
- on GitHub.
+- `unsupervised`: the rolling update process is managed by Kubernetes
+ and is entirely automated, with the *switchover* operation
+ starting once all the replicas have been upgraded
+- `supervised`: the rolling update process is suspended immediately
+ after all replicas have been upgraded and can only be completed
+ with a manual switchover triggered by an administrator with
+ `kubectl cnp promote [cluster] [pod]`. The plugin can be downloaded from the
+ [`kubectl-cnp` project page](https://github.com/EnterpriseDB/kubectl-cnp)
+ on GitHub.
The default and recommended value is `unsupervised`.
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/samples.mdx
index 1ee3ab398e7..7abbe69d626 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples.mdx
@@ -6,17 +6,17 @@ product: 'Cloud Native Operator'
In this section, you can find some examples of configuration files to set up your PostgreSQL `Cluster`.
-* [`cluster-example.yaml`](../samples/cluster-example.yaml):
- a basic example of `Cluster` that uses the default storage class. For demonstration and experimentation purposes
- on a personal Kubernetes cluster with Minikube or Kind as described in the ["Quickstart"](quickstart.md).
-* [`cluster-example-custom.yaml`](../samples/cluster-example-custom.yaml):
- a basic example of `Cluster` that uses the default storage class and custom parameters for `postgresql.conf` and
- `pg_hba.conf` files
-* [`cluster-storage-class.yaml`](../samples/cluster-storage-class.yaml):
- a basic example of `Cluster` that uses a specified storage class.
-* [`cluster-pvc-template.yaml`](../samples/cluster-pvc-template.yaml):
- a basic example of `Cluster` that uses a persistent volume claim template.
-* [`cluster-example-full.yaml`](../samples/cluster-example-full.yaml):
- an example of `Cluster` that sets most of the available options.
+- [`cluster-example.yaml`](../samples/cluster-example.yaml):
+ a basic example of `Cluster` that uses the default storage class. For demonstration and experimentation purposes
+ on a personal Kubernetes cluster with Minikube or Kind as described in the ["Quickstart"](quickstart.md).
+- [`cluster-example-custom.yaml`](../samples/cluster-example-custom.yaml):
+ a basic example of `Cluster` that uses the default storage class and custom parameters for `postgresql.conf` and
+ `pg_hba.conf` files
+- [`cluster-storage-class.yaml`](../samples/cluster-storage-class.yaml):
+ a basic example of `Cluster` that uses a specified storage class.
+- [`cluster-pvc-template.yaml`](../samples/cluster-pvc-template.yaml):
+ a basic example of `Cluster` that uses a persistent volume claim template.
+- [`cluster-example-full.yaml`](../samples/cluster-example-full.yaml):
+ an example of `Cluster` that sets most of the available options.
For a list of available options, please refer to the ["API Reference" page](api_reference.md).
\ No newline at end of file
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/scheduling.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/scheduling.mdx
index 885e022bc35..4d33d1a01b3 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/scheduling.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/scheduling.mdx
@@ -18,9 +18,9 @@ You can control how the Cloud Native PostgreSQL cluster's instances should be
scheduled through the [`affinity`](api_reference.md#AffinityConfiguration)
section in the definition of the cluster, which supports:
-- pod affinity/anti-affinity
-- node selectors
-- tolerations
+- pod affinity/anti-affinity
+- node selectors
+- tolerations
!!! Info
Cloud Native PostgreSQL does not support pod templates for finer control
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx
index 75eb54d0bbe..f0fac197f55 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx
@@ -44,10 +44,10 @@ Every container image that is part of Cloud Native PostgreSQL is automatically b
Such images include not only the operator's, but also the operands' - specifically every supported PostgreSQL and EDB Postgres Advanced version.
Within the pipelines, images are scanned with:
-- [Dockle](https://github.com/goodwithtech/dockle): for best practices in terms
- of the container build process
-- [Clair](https://github.com/quay/clair): for vulnerabilities found in both the
- underlying operating system as well as libraries and applications that they run
+- [Dockle](https://github.com/goodwithtech/dockle): for best practices in terms
+ of the container build process
+- [Clair](https://github.com/quay/clair): for vulnerabilities found in both the
+ underlying operating system as well as libraries and applications that they run
!!! Important
All operand images are automatically rebuilt once a day by our pipelines in case
@@ -56,10 +56,10 @@ Within the pipelines, images are scanned with:
The following guidelines and frameworks have been taken into account for container-level security:
-- the ["Container Image Creation and Deployment Guide"](https://dl.dod.cyber.mil/wp-content/uploads/devsecops/pdf/DevSecOps_Enterprise_Container_Image_Creation_and_Deployment_Guide_2.6-Public-Release.pdf),
- developed by the Defense Information Systems Agency (DISA) of the United States Department of Defense (DoD)
-- the ["CIS Benchmark for Docker"](https://www.cisecurity.org/benchmark/docker/),
- developed by the Center for Internet Security (CIS)
+- the ["Container Image Creation and Deployment Guide"](https://dl.dod.cyber.mil/wp-content/uploads/devsecops/pdf/DevSecOps_Enterprise_Container_Image_Creation_and_Deployment_Guide_2.6-Public-Release.pdf),
+ developed by the Defense Information Systems Agency (DISA) of the United States Department of Defense (DoD)
+- the ["CIS Benchmark for Docker"](https://www.cisecurity.org/benchmark/docker/),
+ developed by the Center for Internet Security (CIS)
!!! Seealso "About the Container level security"
Please refer to ["Security and Containers in Cloud Native PostgreSQL"](https://www.enterprisedb.com/blog/security-and-containers-cloud-native-postgresql)
@@ -127,13 +127,13 @@ section of the Kubernetes documentation for further information.
Cloud Native PostgreSQL exposes ports at operator, instance manager and operand
levels, as listed in the table below:
-System | Port number | Exposing | Name | Certificates | Authentication
-:--------------- | :----------- | :------------------ | :------------------ | :------------ | :--------------
-operator | 9443 | webhook server | `webhook-server` | TLS | Yes
-operator | 8080 | metrics | `metrics` | no TLS | No
-instance manager | 9187 | metrics | `metrics` | no TLS | No
-instance manager | 8000 | status | `status` | no TLS | No
-operand | 5432 | PostgreSQL instance | `postgresql` | optional TLS | Yes
+| System | Port number | Exposing | Name | Certificates | Authentication |
+| :--------------- | :---------- | :------------------ | :--------------- | :----------- | :------------- |
+| operator | 9443 | webhook server | `webhook-server` | TLS | Yes |
+| operator | 8080 | metrics | `metrics` | no TLS | No |
+| instance manager | 9187 | metrics | `metrics` | no TLS | No |
+| instance manager | 8000 | status | `status` | no TLS | No |
+| operand | 5432 | PostgreSQL instance | `postgresql` | optional TLS | Yes |
### PostgreSQL
@@ -157,7 +157,7 @@ By default, every replica is automatically configured to connect in **physical
async streaming replication** with the current primary instance, with a special
user called `streaming_replica`. The connection between nodes is **encrypted**
and authentication is via **TLS client certificates** (please refer to the
-["Client TLS/SSL Connections"](ssl_connections.md#Client TLS/SSL Connections) page
+["Client TLS/SSL Connections"]\(ssl_connections.md#Client TLS/SSL Connections) page
for details).
Currently, the operator allows administrators to add `pg_hba.conf` lines directly in the manifest
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx
index 78814bff39b..b8b2c25f4fe 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx
@@ -125,10 +125,10 @@ spec:
This Pod will mount secrets managed by the Cloud Native PostgreSQL operator, including:
-* `sslcert`: the TLS client public certificate
-* `sslkey`: the TLS client certificate private key
-* `sslrootcert`: the TLS Certification Authority certificate, that signed the certificate on
- the server to be used to verify the identity of the instances
+- `sslcert`: the TLS client public certificate
+- `sslkey`: the TLS client certificate private key
+- `sslrootcert`: the TLS Certification Authority certificate, that signed the certificate on
+ the server to be used to verify the identity of the instances
They will be used to create the default resources that `psql` (and other libpq based applications like `pgbench`)
requires to establish a TLS encrypted connection to the Postgres database.
diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx
index 0f1469cee26..adc8f70e677 100644
--- a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx
+++ b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx
@@ -20,8 +20,8 @@ and bare metal, are also valid in container contexts managed by Kubernetes.
There are two primary methods of access to storage:
-- **network**: either directly or indirectly (think of an NFS volume locally mounted on a host running Kubernetes)
-- **local**: directly attached to the node where a Pod is running (this also includes directly attached disks on bare metal installations of Kubernetes)
+- **network**: either directly or indirectly (think of an NFS volume locally mounted on a host running Kubernetes)
+- **local**: directly attached to the node where a Pod is running (this also includes directly attached disks on bare metal installations of Kubernetes)
Network storage, which is the most common usage pattern in Kubernetes,
presents the same issues of throughput and latency that you can
@@ -48,11 +48,11 @@ in a controlled Kubernetes environment, before deploying the database in product
Briefly, `cnp-bench` is designed to operate at two levels:
-- measuring the performance of the underlying storage using `fio`, with relevant
- metrics for database workloads such as throughput for sequential reads, sequential
- writes, random reads and random writes
-- measuring the performance of the database using the default benchmarking tool
- distributed along with PostgreSQL: `pgbench`
+- measuring the performance of the underlying storage using `fio`, with relevant
+ metrics for database workloads such as throughput for sequential reads, sequential
+ writes, random reads and random writes
+- measuring the performance of the database using the default benchmarking tool
+ distributed along with PostgreSQL: `pgbench`
!!! Important
Measuring both the storage and database performance is an activity that
diff --git a/product_docs/docs/bdr/3.7/backup.mdx b/product_docs/docs/bdr/3.7/backup.mdx
index 1d384cdffce..8a749aa6dac 100644
--- a/product_docs/docs/bdr/3.7/backup.mdx
+++ b/product_docs/docs/bdr/3.7/backup.mdx
@@ -4,4 +4,290 @@ originalFilePath: backup.md
---
-
+In this chapter we discuss the backup and restore of a BDR 3.x cluster.
+
+BDR is designed to be a distributed, highly available system. If
+one or more nodes of a cluster are lost, the best way to replace them
+is to clone new nodes directly from the remaining nodes.
+
+The role of backup and recovery in BDR is to provide for Disaster
+Recovery (DR), such as in the following situations:
+
+- Loss of all nodes in the cluster
+- Significant, uncorrectable data corruption across multiple nodes
+ as a result of data corruption, application error or
+ security breach
+
+## Backup
+
+### `pg_dump`
+
+`pg_dump`, sometimes referred to as "logical backup", can be used
+normally with BDR.
+
+Note that `pg_dump` dumps both local and global sequences as if
+they were local sequences. This is intentional, to allow a BDR
+schema to be dumped and ported to other PostgreSQL databases.
+This means that sequence kind metadata is lost at the time of dump,
+so a restore would effectively reset all sequence kinds to
+the value of `bdr.default_sequence_kind` at time of restore.
+
+To create a post-restore script to reset the precise sequence kind
+for each sequence, you might want to use an SQL script like this:
+
+```sql
+SELECT 'SELECT bdr.alter_sequence_set_kind('''||
+ nspname||'.'||relname||''','''||seqkind||''');'
+FROM bdr.sequences
+WHERE seqkind != 'local';
+```
+
+Note that if `pg_dump` is run using `bdr.crdt_raw_value = on` then the
+dump can only be reloaded with `bdr.crdt_raw_value = on`.
+
+Technical Support recommends the use of physical backup techniques for
+backup and recovery of BDR.
+
+### Physical Backup
+
+Physical backups of a node in a BDR cluster can be taken using
+standard PostgreSQL software, such as
+[Barman](https://www.2ndquadrant.com/en/resources/barman/).
+
+A physical backup of a BDR node can be performed with the same
+procedure that applies to any PostgreSQL node: a BDR node is just a
+PostgreSQL node running the BDR extension.
+
+There are some specific points that must be considered when applying
+PostgreSQL backup techniques to BDR:
+
+- BDR operates at the level of a single database, while a physical
+ backup includes all the databases in the instance; you should plan
+ your databases to allow them to be easily backed-up and restored.
+
+- Backups will make a copy of just one node. In the simplest case,
+ every node has a copy of all data, so you would need to backup only
+ one node to capture all data. However, the goal of BDR will not be
+ met if the site containing that single copy goes down, so the
+ minimum should be at least one node backup per site (obviously with
+ many copies etc.).
+
+- However, each node may have un-replicated local data, and/or the
+ definition of replication sets may be complex so that all nodes do
+ not subscribe to all replication sets. In these cases, backup
+ planning must also include plans for how to backup any unreplicated
+ local data and a backup of at least one node that subscribes to each
+ replication set.
+
+### Eventual Consistency
+
+The nodes in a BDR cluster are *eventually consistent*, but not
+entirely *consistent*; a physical backup of a given node will
+provide Point-In-Time Recovery capabilities limited to the states
+actually assumed by that node (see the [Example] below).
+
+The following example shows how two nodes in the same BDR cluster might not
+(and usually do not) go through the same sequence of states.
+
+Consider a cluster with two nodes `N1` and `N2`, which is initially in
+state `S`. If transaction `W1` is applied to node `N1`, and at the same
+time a non-conflicting transaction `W2` is applied to node `N2`, then
+node `N1` will go through the following states:
+
+```
+(N1) S --> S + W1 --> S + W1 + W2
+```
+
+...while node `N2` will go through the following states:
+
+```
+(N2) S --> S + W2 --> S + W1 + W2
+```
+
+That is: node `N1` will *never* assume state `S + W2`, and node `N2`
+likewise will never assume state `S + W1`, but both nodes will end up
+in the same state `S + W1 + W2`. Considering this situation might affect how
+you decide upon your backup strategy.
+
+### Point-In-Time Recovery (PITR)
+
+In the example above, the changes are also inconsistent in time, since
+`W1` and `W2` both occur at time `T1`, but the change `W1` is not
+applied to `N2` until `T2`.
+
+PostgreSQL PITR is designed around the assumption of changes arriving
+from a single master in COMMIT order. Thus, PITR is possible by simply
+scanning through changes until one particular point-in-time (PIT) is reached.
+With this scheme, you can restore one node to a single point-in-time
+from its viewpoint, e.g. `T1`, but that state would not include other
+data from other nodes that had committed near that time but had not yet
+arrived on the node. As a result, the recovery might be considered to
+be partially inconsistent, or at least consistent for only one
+replication origin.
+
+To request this, use the standard syntax:
+
+```
+recovery_target_time = T1
+```
+
+BDR allows for changes from multiple masters, all recorded within the
+WAL log for one node, separately identified using replication origin
+identifiers.
+
+BDR allows PITR of all or some replication origins to a specific point in time,
+providing a fully consistent viewpoint across all subsets of nodes.
+
+Thus for multi-origins, we view the WAL stream as containing multiple
+streams all mixed up into one larger stream. There is still just one PIT,
+but that will be reached as different points for each origin separately.
+
+We read the WAL stream until requested origins have found their PIT. We
+apply all changes up until that point, except that we do not mark as committed
+any transaction records for an origin after the PIT on that origin has been
+reached.
+
+We end up with one LSN "stopping point" in WAL, but we also have one single
+timestamp applied consistently, just as we do with "single origin PITR".
+
+Once we have reached the defined PIT, a later one may also be set to allow
+the recovery to continue, as needed.
+
+After the desired stopping point has been reached, if the recovered server
+will be promoted, shut it down first and move the LSN forwards using
+`pg_resetwal` to an LSN value higher than used on any timeline on this server.
+This ensures that there will be no duplicate LSNs produced by logical
+decoding.
+
+In the specific example above, `N1` would be restored to `T1`, but
+would also include changes from other nodes that have been committed
+by `T1`, even though they were not applied on `N1` until later.
+
+To request multi-origin PITR, use the standard syntax in
+the recovery.conf file:
+
+```
+recovery_target_time = T1
+```
+
+The list of replication origins which would be restored to `T1` need either
+to be specified in a separate multi_recovery.conf file via the use of
+a new parameter `recovery_target_origins`:
+
+```
+recovery_target_origins = '*'
+```
+
+...or one can specify the origin subset as a list in `recovery_target_origins`.
+
+```
+recovery_target_origins = '1,3'
+```
+
+Note that the local WAL activity recovery to the specified
+`recovery_target_time` is always performed implicitly. For origins
+that are not specified in `recovery_target_origins`, recovery may
+stop at any point, depending on when the target for the list
+mentioned in `recovery_target_origins` is achieved.
+
+In the absence of the `multi_recovery.conf` file, the recovery defaults
+to the original PostgreSQL PITR behaviour that is designed around the assumption
+of changes arriving from a single master in COMMIT order.
+
+!!! Note
+ This is feature is only available on EDB Postgres Extended and
+ Barman does not currently automatically create a `multi_recovery.conf` file.
+
+## Restore
+
+While you can take a physical backup with the same procedure as a
+standard PostgreSQL node, what is slightly more complex is
+**restoring** the physical backup of a BDR node.
+
+### BDR Cluster Failure or Seeding a New Cluster from a Backup
+
+The most common use case for restoring a physical backup involves the failure
+or replacement of all the BDR nodes in a cluster, for instance in the event of
+a datacentre failure.
+
+You may also want to perform this procedure to clone the current contents of a
+BDR cluster to seed a QA or development instance.
+
+In that case, BDR capabilities can be restored based on a physical backup
+of a single BDR node, optionally plus WAL archives:
+
+- If you still have some BDR nodes live and running, fence off the host you
+ restored the BDR node to, so it cannot connect to any surviving BDR nodes.
+ This ensures that the new node does not confuse the existing cluster.
+- Restore a single PostgreSQL node from a physical backup of one of
+ the BDR nodes.
+- If you have WAL archives associated with the backup, create a suitable
+ `recovery.conf` and start PostgreSQL in recovery to replay up to the latest
+ state. You can specify a alternative `recovery_target` here if needed.
+- Start the restored node, or promote it to read/write if it was in standby
+ recovery. Keep it fenced from any surviving nodes!
+- Clean up any leftover BDR metadata that was included in the physical backup,
+ as described below.
+- Fully stop and restart the PostgreSQL instance.
+- Add further BDR nodes with the standard procedure based on the
+ `bdr.join_node_group()` function call.
+
+#### Cleanup BDR Metadata
+
+The cleaning of leftover BDR metadata is achieved as follows:
+
+1. Drop the `bdr` extension with `CASCADE`.
+2. Drop all the replication origins previously created by BDR.
+3. Drop any replication slots left over from BDR.
+4. Fully stop and re-start PostgreSQL (important!).
+5. Create the `bdr` extension.
+
+The `DROP EXTENSION`/`CREATE EXTENSION` cycle guarantees that all the
+BDR metadata from the previous cluster is removed, and that the node
+can be used to grow a new BDR cluster from scratch.
+
+#### Cleanup of Replication Origins
+
+Replication origins must be explicitly removed with a separate step
+because they are recorded persistently in a system catalog, and
+therefore included in the backup and in the restored instance. They
+are not removed automatically when dropping the BDR extension, because
+they are not explicitly recorded as its dependencies.
+
+BDR creates one replication origin for each remote master node, to
+track progress of incoming replication in a crash-safe way. Therefore
+we need to run:
+
+```
+SELECT pg_replication_origin_drop('bdr_dbname_grpname_nodename');
+```
+
+...once for each node in the (previous) cluster. Replication origins can
+be listed as follows:
+
+```
+SELECT * FROM pg_replication_origin;
+```
+
+...and those created by BDR are easily recognized by their name, as in
+the example shown above.
+
+#### Cleanup of Replication Slots
+
+If a physical backup was created with `pg_basebackup`, replication slots
+will be omitted from the backup.
+
+Some other backup methods may preserve replications slots, likely in
+outdated or invalid states. Once you restore the backup, just:
+
+```
+SELECT pg_drop_replication_slot(slot_name)
+FROM pg_replication_slots;
+```
+
+...to drop *all* replication slots. If you have a reason to preserve some,
+you can add a `WHERE slot_name LIKE 'bdr%'` clause, but this is rarely
+useful.
+
+!!! Warning
+ Never run this on a live BDR node.
diff --git a/product_docs/docs/bdr/3.7/catalogs.mdx b/product_docs/docs/bdr/3.7/catalogs.mdx
index d1dc6c34f71..fd8853ec467 100644
--- a/product_docs/docs/bdr/3.7/catalogs.mdx
+++ b/product_docs/docs/bdr/3.7/catalogs.mdx
@@ -4,4 +4,1140 @@ originalFilePath: catalogs.md
---
-
+This section contains a listing of system catalogs and views used by BDR in
+alphabetical order.
+
+## User-Visible Catalogs and Views
+
+### `bdr.conflict_history`
+
+This table is the default table where conflicts are logged. The table is
+RANGE partitioned on column `local_time` and is managed by Autopartition.
+The default data retention period is 30 days.
+
+Access to this table is possible by any table owner, who may see all
+conflicts for the tables they own, restricted by row-level security.
+
+For further details see [Logging Conflicts to a Table](conflicts).
+
+#### `bdr.conflict_history` Columns
+
+| Name | Type | Description |
+| ----------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------------ |
+| sub_id | oid | which subscription produced this conflict; can be joined to `bdr.subscription` table |
+| local_xid | xid | local transaction of the replication process at the time of conflict |
+| local_lsn | pg_lsn | local transaction of the replication process at the time of conflict |
+| local_time | timestamp with time zone | local time of the conflict |
+| remote_xid | xid | transaction which produced the conflicting change on the remote node (an origin) |
+| remote_commit_lsn | pg_lsn | commit lsn of the transaction which produced the conflicting change on the remote node (an origin) |
+| remote_commit_time | timestamp with time zone | commit timestamp of the transaction which produced the conflicting change on the remote node (an origin) |
+| conflict_type | text | detected type of the conflict (see [List of Conflict Types]) |
+| conflict_resolution | text | conflict resolution chosen (see [List of Conflict Resolutions]) |
+| conflict_index | regclass | conflicting index (only valid if the index wasn't dropped since) |
+| reloid | oid | conflicting relation (only valid if the index wasn't dropped since) |
+| nspname | text | name of the schema for the relation on which the conflict has occurred at the time of conflict (does not follow renames) |
+| relname | text | name of the relation on which the conflict has occurred at the time of conflict (does not follow renames) |
+| key_tuple | json | json representation of the key used for matching the row |
+| remote_tuple | json | json representation of an incoming conflicting row |
+| local_tuple | json | json representation of the local conflicting row |
+| apply_tuple | json | json representation of the resulting (the one that has been applied) row |
+| local_tuple_xmin | xid | transaction which produced the local conflicting row (if `local_tuple` is set and the row is not frozen) |
+| local_tuple_node_id | oid | node which produced the local conflicting row (if `local_tuple` is set and the row is not frozen) |
+| local_tuple_commit_time | timestamp with time zone | last known change timestamp of the local conflicting row (if `local_tuple` is set and the row is not frozen) |
+
+### `bdr.conflict_history_summary`
+
+A view containing user-readable details on row conflict.
+
+#### `bdr.conflict_history_summary` Columns
+
+| Name | Type | Description |
+| ----------------------- | ------------------------ | -------------------------- |
+| schema | text | Name of the schema |
+| table | text | Name of the table |
+| local_time | timestamp with time zone | local time of the conflict |
+| local_tuple_commit_time | timestamp with time zone | Time of local commit |
+| remote_commit_time | timestamp with time zone | Time of remote commit |
+| conflict_type | text | Type of conflict |
+| conflict_resolution | text | Resolution adopted |
+
+### `bdr.consensus_kv_data`
+
+A persistent storage for the internal Raft based KV store used by
+`bdr.consensus_kv_store()` and `bdr.consensus_kv_fetch()` interfaces.
+
+#### `bdr.consensus_kv_data` Columns
+
+| Name | Type | Description |
+| ------------ | ----------- | ------------------------------------------------ |
+| kv_key | text | Unique key |
+| kv_val | json | Arbitrary value in json format |
+| kv_create_ts | timestamptz | Last write timestamp |
+| kv_ttl | int | Time to live for the value in milliseconds |
+| kv_expire_ts | timestamptz | Expiration timestamp (`kv_create_ts` + `kv_ttl`) |
+
+### `bdr.camo_decision_journal`
+
+A persistent journal of decisions resolved by a CAMO partner node
+after a failover, in case `bdr.logical_transaction_status` got
+invoked. Unlike `bdr.node_pre_commit`, this does not cover
+transactions processed under normal operational conditions (i.e. both
+nodes of a CAMO pair are running and connected). Entries in this journal
+are not ever cleaned up automatically. This is a purely diagnostic
+tool that the system does not depend on in any way.
+
+#### `bdr.camo_decision_journal` Columns
+
+| Name | Type | Description |
+| -------------- | ----------- | ---------------------------------------------- |
+| origin_node_id | oid | OID of the node where the transaction executed |
+| origin_xid | oid | Transaction Id on the remote origin node |
+| decision | char | 'c' for commit, 'a' for abort |
+| decision_ts | timestamptz | Decision time |
+
+!!! Note
+ This catalog is only present when bdr-enterprise extension is installed.
+
+### `bdr.crdt_handlers`
+
+This table lists merge ("handlers") functions for all CRDT data types.
+
+#### `bdr.crdt_handlers` Columns
+
+| Name | Type | Description |
+| ------------- | ------- | --------------------------------- |
+| crdt_type_id | regtype | CRDT data type id |
+| crdt_merge_id | regproc | Merge function for this data type |
+
+!!! Note
+ This catalog is only present when bdr-enterprise extension is installed.
+
+### `bdr.ddl_replication`
+
+This view lists DDL replication configuration as set up by current [DDL filters](repsets#ddl-replication-filtering).
+
+#### `bdr.ddl_replication` Columns
+
+| Name | Type | Description |
+| ------------ | ---- | ------------------------------------------------------------ |
+| set_ddl_name | name | Name of DDL filter |
+| set_ddl_tag | text | Which command tags it applies on (regular expression) |
+| set_ddl_role | text | Which roles it applies to (regular expression) |
+| set_name | name | Name of the replication set for which this filter is defined |
+
+### `bdr.global_consensus_journal`
+
+This catalog table logs all the Raft messages that were sent while
+managing global consensus.
+
+As for the `bdr.global_consensus_response_journal` catalog, the
+payload is stored in a binary encoded format, which can be decoded
+with the `bdr.decode_message_payload()` function; see the
+[`bdr.global_consensus_journal_details`] view for more details.
+
+#### `bdr.global_consensus_journal` Columns
+
+| Name | Type | Description |
+| ------------- | ----- | --------------------------------------- |
+| log_index | int8 | Id of the journal entry |
+| term | int8 | Raft term |
+| origin | oid | Id of node where the request originated |
+| req_id | int8 | Id for the request |
+| req_payload | bytea | Payload for the request |
+| trace_context | bytea | Trace context for the request |
+
+### `bdr.global_consensus_journal_details`
+
+This view presents Raft messages that were sent, and the corresponding
+responses, using the `bdr.decode_message_payload()` function to decode
+their payloads.
+
+#### `bdr.global_consensus_journal_details` Columns
+
+| Name | Type | Description |
+| ------------------------ | ----- | --------------------------------------------- |
+| log_index | int8 | Id of the journal entry |
+| term | int8 | Raft term |
+| request_id | int8 | Id of the request |
+| origin_id | oid | Id of the node where the request originated |
+| req_payload | bytea | Payload of the request |
+| origin_node_name | name | Name of the node where the request originated |
+| message_type_no | oid | Id of the BDR message type for the request |
+| message_type | text | Name of the BDR message type for the request |
+| message_payload | text | BDR message payload for the request |
+| response_message_type_no | oid | Id of the BDR message type for the response |
+| response_message_type | text | Name of the BDR message type for the response |
+| response_payload | text | BDR message payload for the response |
+| response_errcode_no | text | SQLSTATE for the response |
+| response_errcode | text | Error code for the response |
+| response_message | text | Error message for the response |
+
+### `bdr.global_consensus_response_journal`
+
+This catalog table collects all the responses to the Raft messages
+that were received while managing global consensus.
+
+As for the `bdr.global_consensus_journal` catalog, the payload is
+stored in a binary-encoded format, which can be decoded with the
+`bdr.decode_message_payload()` function; see the
+[`bdr.global_consensus_journal_details`] view for more details.
+
+#### `bdr.global_consensus_response_journal` Columns
+
+| Name | Type | Description |
+| ------------- | ----- | ------------------------------ |
+| log_index | int8 | Id of the journal entry |
+| res_status | oid | Status code for the response |
+| res_payload | bytea | Payload for the response |
+| trace_context | bytea | Trace context for the response |
+
+### `bdr.global_lock`
+
+This catalog table stores the information needed for recovering the
+global lock state on server restart.
+
+For monitoring usage, operators should prefer the
+[`bdr.global_locks`](#bdrglobal_locks) view, because the visible rows
+in `bdr.global_lock` do not necessarily reflect all global locking activity.
+
+Do not modify the contents of this table: it is an important BDR catalog.
+
+#### `bdr.global_lock` Columns
+
+| Name | Type | Description |
+| -------------- | ------- | ---------------------------------------------------------------------------- |
+| ddl_epoch | int8 | DDL epoch for the lock |
+| origin_node_id | oid | OID of the node where the global lock has originated |
+| lock_type | oid | Type of the lock (DDL or DML) |
+| nspname | name | Schema name for the locked relation |
+| relname | name | Relation name for the locked relation |
+| groupid | oid | OID of the top level group (for Advisory locks) |
+| key1 | integer | First 32-bit key or lower order 32-bits of 64-bit key (for Advisory locks) |
+| key2 | integer | Second 32-bit key or higher order 32-bits of 64-bit key (for Advisory locks) |
+| key_is_bigint | boolean | True if 64-bit integer key is used (for Advisory locks) |
+
+### `bdr.global_locks`
+
+A view containing active global locks on this node. The `bdr.global_locks` view
+exposes BDR's shared-memory lock state tracking, giving administrators a greater
+insight into BDR's global locking activity and progress.
+
+See [Monitoring Global Locks](monitoring#Monitoring-Global-Locks)
+for more information about global locking.
+
+#### `bdr.global_locks` Columns
+
+| Name | Type | Description |
+| -------------------------- | ----------- | ----------------------------------------------------------------- |
+| `origin_node_id` | oid | The OID of the node where the global lock has originated |
+| `origin_node_name` | name | Name of the node where the global lock has originated |
+| `lock_type` | text | Type of the lock (DDL or DML) |
+| `relation` | text | Locked relation name (for DML locks) or keys (for advisory locks) |
+| `pid` | int4 | PID of the process holding the lock |
+| `acquire_stage` | text | Internal state of the lock acquisition process |
+| `waiters` | int4 | List of backends waiting for the same global lock |
+| `global_lock_request_time` | timestamptz | Time this global lock acquire was initiated by origin node |
+| `local_lock_request_time` | timestamptz | Time the local node started trying to acquire the local-lock |
+| `last_state_change_time` | timestamptz | Time `acquire_stage` last changed |
+
+Column details:
+
+- `relation`: For DML locks, `relation` shows the relation on which the DML
+ lock is acquired. For global advisory locks, `relation` column actually shows
+ the two 32-bit integers or one 64-bit integer on which the lock is acquired.
+
+- `origin_node_id` and `origin_node_name`: If these are the same as the local
+ node's ID and name, then the local node is the initiator of the global DDL
+ lock, i.e. it is the node running the acquiring transaction. If these fields
+ specify a different node, then the local node is instead trying to acquire its
+ local DDL lock to satisfy a global DDL lock request from a remote node.
+
+- `pid`: The process ID of the process that requested the global DDL lock,
+ if the local node is the requesting node. Null on other nodes; query the
+ origin node to determine the locker pid.
+
+- `global_lock_request_time`: The timestamp at which the global-lock request
+ initiator started the process of acquiring a global lock. May be null if
+ unknown on the current node. This time is stamped at the very beginning
+ of the DDL lock request, and includes the time taken for DDL epoch management
+ and any required flushes of pending-replication queues. Currently only
+ known on origin node.
+
+- `local_lock_request_time`: The timestamp at which the local node started
+ trying to acquire the local lock for this global lock. This includes the
+ time taken for the heavyweight session lock acquire, but does NOT include
+ any time taken on DDL epochs or queue flushing. If the lock is re-acquired
+ after local node restart, this will be the node restart time.
+
+- `last_state_change_time`: The timestamp at which the
+ `bdr.global_locks.acquire_stage` field last changed for this global lock
+ entry.
+
+### `bdr.local_consensus_snapshot`
+
+This catalog table contains consensus snapshots created or received by
+the local node.
+
+#### `bdr.local_consensus_snapshot` Columns
+
+| Name | Type | Description |
+| --------- | ----- | ----------------------- |
+| log_index | int8 | Id of the journal entry |
+| log_term | int8 | Raft term |
+| snapshot | bytea | Raft snapshot data |
+
+### `bdr.local_consensus_state`
+
+This catalog table stores the current state of Raft on the local node.
+
+#### `bdr.local_consensus_state` Columns
+
+| Name | Type | Description |
+| ----------------- | ---- | ----------------------------------- |
+| node_id | oid | Id of the node |
+| current_term | int8 | Raft term |
+| apply_index | int8 | Raft apply index |
+| voted_for | oid | Vote cast by this node in this term |
+| last_known_leader | oid | node_id of last known Raft leader |
+
+### `bdr.local_node_summary`
+
+A view containing the same information as [`bdr.node_summary`] but only for the
+local node.
+
+### `bdr.network_path_info`
+
+A catalog view that stores user-defined information on network costs between node locations.
+
+#### `bdr.network_path_info` Columns
+
+| Name | Type | Description |
+| --------------- | ------- | ------------------------------------------ |
+| node_group_name | name | Name of the BDR group |
+| node_region1 | text | Node region name, from bdr.node_location |
+| node_region2 | text | Node region name, from bdr.node_location |
+| node_location1 | text | Node location name, from bdr.node_location |
+| node_location2 | text | Node location name, from bdr.node_location |
+| network_cost | numeric | Node location name, from bdr.node_location |
+
+### `bdr.node`
+
+This table lists all the BDR nodes in the cluster.
+
+#### `bdr.node` Columns
+
+| Name | Type | Description |
+| --------------------- | ------ | --------------------------------------------------------------------------- |
+| node_id | oid | Id of the node |
+| node_group_id | oid | Id of the node group |
+| source_node_id | oid | Id of the source node |
+| node_state | oid | Consistent state of the node |
+| target_state | oid | State that the node is trying to reach (during join or promotion) |
+| seq_id | int4 | Sequence identifier of the node used for generating unique sequence numbers |
+| dbname | name | Database name of the node |
+| proto_version_ranges | int\[] | Supported protocol version ranges by the node |
+| reserved | int2 | Reserved field for compatibility purposes |
+| synchronize_structure | "char" | Schema synchronization done during the join |
+
+### `bdr.node_catchup_info`
+
+This catalog table records relevant catch-up information on each node, either
+if it is related to the join or part procedure.
+
+#### `bdr.node_catchup_info` Columns
+
+| Name | Type | Description |
+| -------------- | ------ | -------------------------------------------------------------------------- |
+| node_id | oid | Id of the node |
+| node_source_id | oid | Id of the node used as source for the data |
+| slot_name | name | Slot used for this source |
+| min_node_lsn | pg_lsn | Minimum LSN at which the node can switch to direct replay from a peer node |
+| catchup_state | oid | Status code of the catchup state |
+| origin_node_id | oid | Id of the node from which we want transactions |
+
+If a node(node_id) needs missing data from a parting node(origin_node_id),
+it can get it from a node that already has it(node_source_id) via forwarding.
+The records in this table will persist until the node(node_id) is a member of
+the BDR cluster.
+
+### `bdr.node_conflict_resolvers`
+
+Currently configured conflict resolution for all known conflict types.
+
+#### `bdr.node_conflict_resolvers` Columns
+
+| Name | Type | Description |
+| ----------------- | ---- | ------------------------------------ |
+| conflict_type | text | Type of the conflict |
+| conflict_resolver | text | Resolver used for this conflict type |
+
+### `bdr.node_group`
+
+This catalog table lists all the BDR node groups.
+
+#### `bdr.node_group` Columns
+
+| Name | Type | Description |
+| ----------------------------- | -------- | --------------------------------------------------------------------- |
+| node_group_id | oid | ID of the node group |
+| node_group_name | name | Name of the node group |
+| node_group_default_repset | oid | Default replication set for this node group |
+| node_group_default_repset_ext | oid | Default replication set for this node group |
+| node_group_parent_id | oid | ID of parent group (0 if this is a root group) |
+| node_group_flags | int | The group flags |
+| node_group_uuid | uuid | The uuid of the group |
+| node_group_apply_delay | interval | How long a subscriber waits before applying changes from the provider |
+| node_group_check_constraints | bool | Whether the apply process should check constraints when applying data |
+| node_group_num_writers | int | Number of writers to use for subscriptions backing this node group |
+| node_group_enable_wal_decoder | bool | Whether the group has enable_wal_decoder set |
+
+### `bdr.node_group_replication_sets`
+
+A view showing default replication sets create for BDR groups. See also
+`bdr.replication_sets`.
+
+#### `bdr.node_group_replication_sets` Columns
+
+| Name | Type | Description |
+| ------------------ | ------- | ------------------------------------------------------------------------------------ |
+| node_group_name | name | Name of the BDR group |
+| def_repset | name | Name of the default repset |
+| def_repset_ops | text\[] | Actions replicated by the default repset |
+| def_repset_ext | name | Name of the default "external" repset (usually same as def_repset) |
+| def_repset_ext_ops | text\[] | Actions replicated by the default "external" repset (usually same as def_repset_ops) |
+
+### `bdr.node_local_info`
+
+A catalog table used to store per-node information that changes less
+frequently than peer progress.
+
+#### `bdr.node_local_info` Columns
+
+| Name | Type | Description |
+| ------------- | ------- | ------------------------------------------------------------------------- |
+| node_id | oid | The OID of the node (including the local node) |
+| applied_state | oid | Internal id of the node state |
+| ddl_epoch | int8 | Last epoch number processed by the node |
+| pub_repsets | text\[] | List of replication sets publised by the node (only for the local node) |
+| slot_name | name | Name of the slot used to connect to that node (NULL for the local node) |
+| sub_repsets | text\[] | List of replication sets subscribed by the node (only for the local node) |
+
+### `bdr.node_location`
+
+A catalog view that stores user-defined information on node locations.
+
+#### `bdr.node_location` Columns
+
+| Name | Type | Description |
+| --------------- | ---- | --------------------------- |
+| node_group_name | name | Name of the BDR group |
+| node_id | oid | Id of the node |
+| node_region | text | User supplied region name |
+| node_location | text | User supplied location name |
+
+### `bdr.node_log_config`
+
+A catalog view that stores information on the conflict logging configurations.
+
+#### `bdr.node_log_config` Columns
+
+| Name | Description |
+| ----------------- | --------------------------------------------------------- |
+| log_name | name of the logging configuration |
+| log_to_file | whether it logs to the server log file |
+| log_to_table | whether it logs to a table, and which table is the target |
+| log_conflict_type | which conflict types it logs, if NULL means all |
+| log_conflict_res | which conflict resolutions it logs, if NULL means all |
+
+### `bdr.node_peer_progress`
+
+Catalog used to keep track of every node's progress in the replication stream.
+Every node in the cluster regularly broadcasts its progress every
+`bdr.replay_progress_frequency` milliseconds to all other nodes (default
+is 60000 ms - i.e 1 minute). Expect N \* (N-1) rows in this relation.
+
+You may be more interested in the `bdr.node_slots` view for monitoring
+purposes. See also [Monitoring](monitoring).
+
+#### `bdr.node_peer_progress` Columns
+
+| Name | Type | Description |
+| ----------------------- | ----------- | ------------------------------------------------------------------------------------ |
+| node_id | oid | The OID of the originating node which reported this position info |
+| peer_node_id | oid | The OID of the node's peer (remote node) for which this position info was reported |
+| last_update_sent_time | timestamptz | The time at which the report was sent by the originating node |
+| last_update_recv_time | timestamptz | The time at which the report was received by the local server |
+| last_update_node_lsn | pg_lsn | LSN on the originating node at the time of the report |
+| peer_position | pg_lsn | Latest LSN of the node's peer seen by the originating node |
+| peer_replay_time | timestamptz | Latest replay time of peer seen by the reporting node |
+| last_update_horizon_xid | oid | Internal resolution horizon: all lower xids are known resolved on the reporting node |
+| last_update_horizon_lsn | pg_lsn | Internal resolution horizon: same in terms of an LSN of the reporting node |
+
+### `bdr.node_pre_commit`
+
+Used internally on a node configured as a Commit At Most Once (CAMO)
+partner. Shows the decisions a CAMO partner took on transactions in
+the last 15 minutes.
+
+#### `bdr.node_pre_commit` Columns
+
+| Name | Type | Description |
+| -------------- | ----------- | ---------------------------------------------- |
+| origin_node_id | oid | OID of the node where the transaction executed |
+| origin_xid | oid | Transaction Id on the remote origin node |
+| decision | char | 'c' for commit, 'a' for abort |
+| local_xid | xid | Transaction Id on the local node |
+| commit_ts | timestamptz | commit timestamp of the transaction |
+| decision_ts | timestamptz | decision time |
+
+!!! Note
+ This catalog is only present when bdr-enterprise extension is installed.
+
+### `bdr.node_replication_rates`
+
+This view contains information about outgoing replication activity from a
+given node
+
+#### `bdr.node_replication_rates` Columns
+
+| Column | Type | Description |
+| ---------------- | -------- | ---------------------------------------------------------------------------------------------------- |
+| peer_node_id | oid | The OID of node's peer (remote node) for which this info was reported |
+| target_name | name | Name of the target peer node |
+| sent_lsn | pg_lsn | Latest sent position |
+| replay_lsn | pg_lsn | Latest position reported as replayed (visible) |
+| replay_lag | interval | Approximate lag time for reported replay |
+| replay_lag_bytes | int8 | Bytes difference between replay_lsn and current WAL write position on origin |
+| replay_lag_size | text | Human-readable bytes difference between replay_lsn and current WAL write position |
+| apply_rate | bigint | LSNs being applied per second at the peer node |
+| catchup_interval | interval | Approximate time required for the peer node to catchup to all the changes that are yet to be applied |
+
+!!! Note
+ The `replay_lag` is set immediately to zero after reconnect; we suggest
+ as a workaround to use `replay_lag_bytes`, `replay_lag_size` or
+ `catchup_interval`.
+
+!!! Note
+ This catalog is only present when bdr-enterprise extension is installed.
+
+### `bdr.node_slots`
+
+This view contains information about replication slots used in the current
+database by BDR.
+
+See [Monitoring Outgoing Replication](monitoring#Monitoring-Outgoing-Replication)
+for guidance on the use and interpretation of this view's fields.
+
+#### `bdr.node_slots` Columns
+
+| Name | Type | Description |
+| ------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------- |
+| target_dbname | name | Database name on the target node |
+| node_group_name | name | Name of the BDR group |
+| node_group_id | oid | The OID of the BDR group |
+| origin_name | name | Name of the origin node |
+| target_name | name | Name of the target node |
+| origin_id | oid | The OID of the origin node |
+| target_id | oid | The OID of the target node |
+| local_slot_name | name | Name of the replication slot according to BDR |
+| slot_name | name | Name of the slot according to Postgres (should be same as above) |
+| is_group_slot | boolean | True if the slot is the node-group crash recovery slot for this node (see ["Group Replication Slot"]\(nodes.md#Group Replication Slot)) |
+| plugin | name | Logical decoding plugin using this slot (should be pglogical_output) |
+| slot_type | text | Type of the slot (should be logical) |
+| datoid | oid | The OID of the current database |
+| database | name | Name of the current database |
+| temporary | bool | Is the slot temporary |
+| active | bool | Is the slot active (does it have a connection attached to it) |
+| active_pid | int4 | The PID of the process attached to the slot |
+| xmin | xid | The XID needed by the slot |
+| catalog_xmin | xid | The catalog XID needed by the slot |
+| restart_lsn | pg_lsn | LSN at which the slot can restart decoding |
+| confirmed_flush_lsn | pg_lsn | Latest confirmed replicated position |
+| usesysid | oid | sysid of the user the replication session is running as |
+| usename | name | username of the user the replication session is running as |
+| application_name | text | Application name of the client connection (used by `synchronous_standby_names`) |
+| client_addr | inet | IP address of the client connection |
+| client_hostname | text | Hostname of the client connection |
+| client_port | int4 | Port of the client connection |
+| backend_start | timestamptz | When the connection started |
+| state | text | State of the replication (catchup, streaming, ...) or 'disconnected' if offline |
+| sent_lsn | pg_lsn | Latest sent position |
+| write_lsn | pg_lsn | Latest position reported as written |
+| flush_lsn | pg_lsn | Latest position reported as flushed to disk |
+| replay_lsn | pg_lsn | Latest position reported as replayed (visible) |
+| write_lag | interval | Approximate lag time for reported write |
+| flush_lag | interval | Approximate lag time for reported flush |
+| replay_lag | interval | Approximate lag time for reported replay |
+| sent_lag_bytes | int8 | Bytes difference between sent_lsn and current WAL write position |
+| write_lag_bytes | int8 | Bytes difference between write_lsn and current WAL write position |
+| flush_lag_bytes | int8 | Bytes difference between flush_lsn and current WAL write position |
+| replay_lag_bytes | int8 | Bytes difference between replay_lsn and current WAL write position |
+| sent_lag_size | text | Human-readable bytes difference between sent_lsn and current WAL write position |
+| write_lag_size | text | Human-readable bytes difference between write_lsn and current WAL write position |
+| flush_lag_size | text | Human-readable bytes difference between flush_lsn and current WAL write position |
+| replay_lag_size | text | Human-readable bytes difference between replay_lsn and current WAL write position |
+
+!!! Note
+ The `replay_lag` is set immediately to zero after reconnect; we suggest
+ as a workaround to use `replay_lag_bytes` or `replay_lag_size`.
+
+### `bdr.node_summary`
+
+This view contains summary information about all BDR nodes known to the local
+node.
+
+#### `bdr.node_summary` Columns
+
+| Name | Type | Description |
+| ---------------------- | ------- | --------------------------------------------------------------------------- |
+| node_name | name | Name of the node |
+| node_group_name | name | Name of the BDR group the node is part of |
+| interface_name | name | Name of the connection interface used by the node |
+| interface_connstr | text | Connection string to the node |
+| peer_state_name | text | Consistent state of the node in human readable form |
+| peer_target_state_name | text | State which the node is trying to reach (during join or promotion) |
+| node_seq_id | int4 | Sequence identifier of the node used for generating unique sequence numbers |
+| node_local_dbname | name | Database name of the node |
+| pub_repsets | text\[] | Deprecated column, always NULL, will be removed in 4.0 |
+| sub_repsets | text\[] | Deprecated column, always NULL, will be removed in 4.0 |
+| set_repl_ops | text | Which operations does the default replication set replicate |
+| node_id | oid | The OID of the node |
+| node_group_id | oid | The OID of the BDR node group |
+| if_id | oid | The OID of the connection interface used by the node |
+
+### `bdr.replication_sets`
+
+A view showing replication sets defined in the BDR group, even if they are not
+currently used by any node.
+
+#### `bdr.replication_sets` Columns
+
+| Name | Type | Description |
+| ------------------ | ------- | ------------------------------------------------------------------------------ |
+| set_id | oid | The OID of the replication set |
+| set_name | name | Name of the replication set |
+| replicate_insert | boolean | Indicates if the replication set replicates INSERTs |
+| replicate_update | boolean | Indicates if the replication set replicates UPDATEs |
+| replicate_delete | boolean | Indicates if the replication set replicates DELETEs |
+| replicate_truncate | boolean | Indicates if the replication set replicates TRUNCATEs |
+| set_autoadd_tables | boolean | Indicates if new tables will be automatically added to this replication set |
+| set_autoadd_seqs | boolean | Indicates if new sequences will be automatically added to this replication set |
+
+### `bdr.schema_changes`
+
+A simple view to show all the changes to schemas within BDR.
+
+#### `bdr.schema_changes` Columns
+
+| Name | Type | Description |
+| ------------------------ | ------------ | ------------------------- |
+| schema_changes_ts | timestampstz | The ID of the trigger |
+| schema_changes_change | char | A flag of change type |
+| schema_changes_classid | oid | Class ID |
+| schema_changes_objectid | oid | Object ID |
+| schema_changes_subid | smallint | The subscription |
+| schema_changes_descr | text | The object changed |
+| schema_changes_addrnames | text\[] | Location of schema change |
+
+### `bdr.sequence_alloc`
+
+A view to see the allocation details for galloc sequences.
+
+#### `bdr.sequence_alloc` Columns
+
+| Name | Type | Description |
+| ------------------- | ----------- | ------------------------------------------------ |
+| seqid | regclass | The ID of the sequence |
+| seq_chunk_size | bigint | A sequence number for the chunk within its value |
+| seq_allocated_up_to | bigint | |
+| seq_nallocs | bigint | |
+| seq_last_alloc | timestamptz | Last sequence allocated |
+
+### `bdr.schema_changes`
+
+A simple view to show all the changes to schemas within BDR.
+
+#### `bdr.schema_changes` Columns
+
+| Name | Type | Description |
+| ------------------------ | ------------ | ------------------------- |
+| schema_changes_ts | timestampstz | The ID of the trigger |
+| schema_changes_change | char | A flag of change type |
+| schema_changes_classid | oid | Class ID |
+| schema_changes_objectid | oid | Object ID |
+| schema_changes_subid | smallint | The subscription |
+| schema_changes_descr | text | The object changed |
+| schema_changes_addrnames | text\[] | Location of schema change |
+
+### `bdr.sequence_alloc`
+
+A view to see the sequences allocated.
+
+#### `bdr.sequence_alloc` Columns
+
+| Name | Type | Description |
+| ------------------- | ----------- | ------------------------------------------------ |
+| seqid | regclass | The ID of the sequence |
+| seq_chunk_size | bigint | A sequence number for the chunk within its value |
+| seq_allocated_up_to | bigint | |
+| seq_nallocs | bigint | |
+| seq_last_alloc | timestamptz | Last sequence allocated |
+
+### `bdr.sequences`
+
+This view lists all sequences with their kind, excluding sequences
+for internal BDR book-keeping.
+
+#### `bdr.sequences` Columns
+
+| Name | Type | Description |
+| ------- | ---- | ----------------------------------------------------- |
+| nspname | name | Namespace containing the sequence |
+| relname | name | Name of the sequence |
+| seqkind | text | Type of the sequence ('local', 'timeshard', 'galloc') |
+
+### `bdr.stat_activity`
+
+Dynamic activity for each backend or worker process.
+
+This contains the same information as pg_stat_activity, except wait_event
+is set correctly when the wait relates to BDR.
+
+### `bdr.stat_relation`
+
+Apply statistics for each relation. Only contains data if the tracking
+is enabled and something was replicated for a given relation.
+
+#### `bdr.stat_relation` Columns
+
+| Column | Type | Description |
+| ------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------- |
+| nspname | name | Name of the relation's schema |
+| relname | name | Name of the relation |
+| relid | oid | Oid of the relation |
+| total_time | double precision | Total time spent processing replication for the relation |
+| ninsert | bigint | Number of inserts replicated for the relation |
+| nupdate | bigint | Number of updates replicated for the relation |
+| ndelete | bigint | Number of deletes replicated for the relation |
+| ntruncate | bigint | Number of truncates replicated for the relation |
+| shared_blks_hit | bigint | Total number of shared block cache hits for the relation |
+| shared_blks_read | bigint | Total number of shared blocks read for the relation |
+| shared_blks_dirtied | bigint | Total number of shared blocks dirtied for the relation |
+| shared_blks_written | bigint | Total number of shared blocks written for the relation |
+| blk_read_time | double precision | Total time spent reading blocks for the relation, in milliseconds (if `track_io_timing` is enabled, otherwise zero) |
+| blk_write_time | double precision | Total time spent writing blocks for the relation, in milliseconds (if `track_io_timing` is enabled, otherwise zero) |
+| lock_acquire_time | double precision | Total time spent acquiring locks on the relation, in milliseconds (if `pglogical.track_apply_lock_timing` is enabled, otherwise zero) |
+
+### `bdr.stat_subscription`
+
+Apply statistics for each subscription. Only contains data if the tracking
+is enabled.
+
+#### `bdr.stat_subscription` Columns
+
+| Column | Type | Description |
+| -------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------- |
+| sub_name | name | Name of the subscription |
+| subid | oid | Oid of the subscription |
+| nconnect | bigint | Number of times this subscription has connected upstream |
+| ncommit | bigint | Number of commits this subscription did |
+| nabort | bigint | Number of aborts writer did for this subscription |
+| nerror | bigint | Number of errors writer has hit for this subscription |
+| nskippedtx | bigint | Number of transactions skipped by writer for this subscription (due to `skip_transaction` conflict resolver) |
+| ninsert | bigint | Number of inserts this subscription did |
+| nupdate | bigint | Number of updates this subscription did |
+| ndelete | bigint | Number of deletes this subscription did |
+| ntruncate | bigint | Number of truncates this subscription did |
+| nddl | bigint | Number of DDL operations this subscription has executed |
+| ndeadlocks | bigint | Number of errors that were caused by deadlocks |
+| nretries | bigint | Number of retries the writer did (without going for full restart/reconnect) |
+| shared_blks_hit | bigint | Total number of shared block cache hits by the subscription |
+| shared_blks_read | bigint | Total number of shared blocks read by the subscription |
+| shared_blks_dirtied | bigint | Total number of shared blocks dirtied by the subscription |
+| shared_blks_written | bigint | Total number of shared blocks written by the subscription |
+| blk_read_time | double precision | Total time the subscription spent reading blocks, in milliseconds (if `track_io_timing` is enabled, otherwise zero) |
+| blk_write_time | double precision | Total time the subscription spent writing blocks, in milliseconds (if `track_io_timing` is enabled, otherwise zero) |
+| connect_time | timestamp with time zone | Time when the current upstream connection was established, NULL if not connected |
+| last_disconnect_time | timestamp with time zone | Time when the last upstream connection was dropped |
+| start_lsn | pg_lsn | LSN from which this subscription requested to start replication from the upstream |
+| retries_at_same_lsn | bigint | Number of attempts the subscription was restarted from the same LSN value |
+| curr_ncommit | bigint | Number of commits this subscription did after the current connection was established |
+
+### `bdr.subscription`
+
+This catalog table lists all the subscriptions owned by the local BDR
+node, and which mode they are in.
+
+#### `bdr.subscription` Columns
+
+| Name | Type | Description |
+| ------------------- | ---- | ------------------------- |
+| pgl_subscription_id | oid | Subscription in pglogical |
+| nodegroup_id | oid | Id of nodegroup |
+| origin_node_id | oid | Id of origin node |
+| target_node_id | oid | Id of target node |
+| subscription_mode | char | Mode of subscription |
+| source_node_id | oid | Id of source node |
+| ddl_epoch | int8 | DDL epoch |
+
+### `bdr.subscription_summary`
+
+This view contains summary information about all BDR subscriptions that the
+local node has to other nodes.
+
+#### `bdr.subscription_summary` Columns
+
+| Name | Type | Description |
+| -------------------------- | ----------- | ---------------------------------------------------------------------------------------- |
+| node_group_name | name | Name of the BDR group the node is part of |
+| sub_name | name | Name of the subscription |
+| origin_name | name | Name of the origin node |
+| target_name | name | Name of the target node (normally local node) |
+| sub_enabled | bool | Is the subscription enabled |
+| sub_slot_name | name | Slot name on the origin node used by this subscription |
+| sub_replication_sets | text\[] | Replication sets subscribed |
+| sub_forward_origins | text\[] | Does the subscription accept changes forwarded from other nodes besides the origin |
+| sub_apply_delay | interval | Delay transactions by this much compared to the origin |
+| sub_origin_name | name | Replication origin name used by this subscription |
+| bdr_subscription_mode | char | Subscription mode |
+| subscription_status | text | Status of the subscription worker |
+| node_group_id | oid | The OID of the BDR group the node is part of |
+| sub_id | oid | The OID of the subscription |
+| origin_id | oid | The OID of the origin node |
+| target_id | oid | The OID of the target node |
+| receive_lsn | pg_lsn | Latest LSN of any change or message received (this can go backwards in case of restarts) |
+| receive_commit_lsn | pg_lsn | Latest LSN of last COMMIT received (this can go backwards in case of restarts) |
+| last_xact_replay_lsn | pg_lsn | LSN of last transaction replayed on this subscription |
+| last_xact_flush_lsn | timestamptz | LSN of last transaction replayed on this subscription that's flushed durably to disk |
+| last_xact_replay_timestamp | timestamptz | Timestamp of last transaction replayed on this subscription |
+
+### `bdr.replication_status`
+
+This view shows incoming replication status between the local node and all other
+nodes in the BDR cluster. If this is a logical standby node, then only
+the status for its upstream node is shown. Similarly, replication
+status is not shown for subscriber-only nodes since they never send
+replication changes to other nodes.
+
+#### `bdr.replication_status` Columns
+
+| Column | Type | Description |
+| ------------------- | ------------------------ | --------------------------------------------------------------- |
+| node_id | oid | OID of the local node |
+| node_name | name | Name of the local node |
+| origin_node_id | oid | OID of the origin node |
+| origin_node_name | name | Name of the origin node |
+| sub_id | oid | OID of the subscription for this origin node |
+| sub_name | name | Name of the subscription for this origin node |
+| connected | boolean | Is this node connected to the origin node? |
+| replication_blocked | boolean | Is the replication currently blocked for this origin? |
+| connect_time | timestamp with time zone | Time when the current connection was established |
+| disconnect_time | timestamp with time zone | Time when the last connection was dropped |
+| uptime | interval | Duration since the current connection is active for this origin |
+
+### `bdr.tables`
+
+This view lists information about table membership in replication sets.
+If a table exists in multiple replication sets, it will appear multiple times
+in this table.
+
+#### `bdr.tables` Columns
+
+| Name | Type | Description |
+| ------------------ | ------- | --------------------------------------------------------------------------------- |
+| relid | oid | The OID of the relation |
+| nspname | name | Name of the schema relation is in |
+| relname | name | Name of the relation |
+| set_name | name | Name of the replication set |
+| set_ops | text\[] | List of replicated operations |
+| rel_columns | text\[] | List of replicated columns (NULL = all columns) (\*) |
+| row_filter | text | Row filtering expression |
+| conflict_detection | text | Conflict detection method used: row_origin (default), row_version or column_level |
+
+(\*) These columns are reserved for future use and should currently be NULL
+
+### `bdr.trigger`
+
+Within this view, you can see all the stream triggers created.
+Often triggers here are created from `bdr.create_conflict_trigger`.
+
+#### `bdr.trigger` Columns
+
+| Name | Type | Description |
+| -------------- | -------- | ----------------------------- |
+| trigger_id | oid | The ID of the trigger |
+| trigger_reloid | regclass | Name of the relating function |
+| trigger_pgtgid | oid | Postgres trigger ID |
+| trigger_type | char | Type of trigger call |
+| trigger_name | name | Name of the trigger |
+
+### `bdr.triggers`
+
+An expanded view of `bdr.trigger` with more easy to read columns.
+
+| Name | Type | Description |
+| ------------------ | ------------------ | ----------------------- |
+| trigger_name | name | The name of the trigger |
+| event_manipulation | text | The operation(s) |
+| trigger_type | bdr.trigger_type | Type of trigger |
+| trigger_table | bdr.trigger_reloid | The table that calls it |
+| trigger_function | name | The function used |
+
+### `bdr.workers`
+
+Information about running BDR worker processes.
+
+This can be joined with `bdr.stat_activity` using pid to get even more insight
+into the state of BDR workers.
+
+#### `bdr.workers` Columns
+
+| Name | Type | Description |
+| ----------------------- | ----------- | --------------------------------------------------------- |
+| worker_pid | int | Process Id of the worker process |
+| worker_role | int | Numeric representation of worker role |
+| worker_role_name | text | Name of the worker role |
+| worker_subid | oid | Subscription Id if the worker is associated with one |
+| worker_commit_timestamp | timestamptz | Last commit timestamp processed by this worker if any |
+| worker_local_timestamp | timestamptz | Local time at which the above commit was processed if any |
+
+### `bdr.worker_errors`
+
+A persistent log of errors from BDR background worker processes, which
+includes errors from the underlying pglogical worker processes.
+
+#### `bdr.worker_errors` Columns
+
+| Name | Type | Description |
+| --------------------- | ----------- | ----------------------------------------------------------------------------------------------------------- |
+| node_group_name | name | Name of the BDR group |
+| origin_name | name | Name of the origin node |
+| source_name | name | |
+| target_name | name | Name of the target node (normally local node) |
+| sub_name | name | Name of the subscription |
+| worker_role | int4 | Internal identifier of the role of this worker (1: manager, 2: receive, 3: writer, 4: output, 5: extension) |
+| worker_role_name | text | Role name |
+| worker_pid | int4 | Process id of the worker causing the error |
+| error_time | timestamptz | Date and time of the error |
+| error_age | interval | Duration since error |
+| error_message | text | Description of the error |
+| error_context_message | text | Context in which the error happened |
+| remoterelid | oid | Oid of remote relation on that node |
+| subwriter_id | oid | |
+| subwriter_name | name | |
+
+### `bdr.autopartition_work_queue`
+
+Contains work items created and processed by autopartition worker. The work items are created on
+only one node and processed on different nodes.
+
+#### `bdr.autopartition_work_queue` Columns
+
+| Column | Type | Description |
+| ------------------ | ------ | ------------------------------------------------------------------------------------------------------------------------ |
+| ap_wq_workid | bigint | The Unique ID of the work item |
+| ap_wq_ruleid | int | ID of the rule listed in autopartition_rules. Rules are specified using bdr.autoscale/autopartition commands |
+| ap_wq_relname | name | Name of the relation being autopartitioned |
+| ap_wq_relnamespace | name | Name of the tablespace specified in rule for this work item. |
+| ap_wq_partname | name | Name of the partition created by the workitem |
+| ap_wq_work_kind | char | The work kind can be either 'c' (Create Partition), 'm' (Migrate Partition), 'd' (Drop Partition), 'a' (Alter Partition) |
+| ap_wq_work_sql | text | SQL query for the work item |
+| ap_wq_work_depends | Oid\[] | Oids of the nodes on which the work item depends |
+
+### `bdr.autopartition_workitem_status`
+
+The status of the work items which is updated locally on each node.
+
+#### `bdr.autopartition_workitem_status` Columns
+
+| Column | Type | Description |
+| ----------------- | ----------- | ---------------------------------------------------------------------------------- |
+| ap_wi_workid | bigint | The ID of the work item |
+| ap_wi_nodeid | Oid | Oid of the node on which the work item is being processed |
+| ap_wi_status | char | The status can be either 'q' (Queued), 'c' (Complete), 'f' (Failed), 'u' (Unknown) |
+| ap_wi_started_at | timestamptz | The start timestamptz of work item |
+| ap_wi_finished_at | timestamptz | The end timestamptz of work item |
+
+### `bdr.autopartition_local_work_queue`
+
+Contains work items created and processed by autopartition worker. This is
+similar to bdr.autopartition_work_queue, except that these work items are for
+locally managed tables. Each node creates and processes its own local work
+items, independent of other nodes in the cluster.
+
+#### `bdr.autopartition_local_work_queue` Columns
+
+| Column | Type | Description |
+| ------------------ | ------ | ------------------------------------------------------------------------------------------------------------------------ |
+| ap_wq_workid | bigint | The Unique ID of the work item |
+| ap_wq_ruleid | int | ID of the rule listed in autopartition_rules. Rules are specified using bdr.autoscale/autopartition commands |
+| ap_wq_relname | name | Name of the relation being autopartitioned |
+| ap_wq_relnamespace | name | Name of the tablespace specified in rule for this work item. |
+| ap_wq_partname | name | Name of the partition created by the workitem |
+| ap_wq_work_kind | char | The work kind can be either 'c' (Create Partition), 'm' (Migrate Partition), 'd' (Drop Partition), 'a' (Alter Partition) |
+| ap_wq_work_sql | text | SQL query for the work item |
+| ap_wq_work_depends | Oid\[] | Always NULL |
+
+### `bdr.autopartition_local_workitem_status`
+
+The status of the work items for locally managed tables.
+
+#### `bdr.autopartition_local_workitem_status` Columns
+
+| Column | Type | Description |
+| ----------------- | ----------- | ---------------------------------------------------------------------------------- |
+| ap_wi_workid | bigint | The ID of the work item |
+| ap_wi_nodeid | Oid | Oid of the node on which the work item is being processed |
+| ap_wi_status | char | The status can be either 'q' (Queued), 'c' (Complete), 'f' (Failed), 'u' (Unknown) |
+| ap_wi_started_at | timestamptz | The start timestamptz of work item |
+| ap_wi_finished_at | timestamptz | The end timestamptz of work item |
+
+### `bdr.group_camo_details`
+
+Uses `bdr.run_on_all_nodes` to gather CAMO-related information from all nodes.
+
+#### `bdr.group_camo_details` Columns
+
+| Name | Type | Description |
+| -------------------------- | ---- | ----------------------------------------------------------------------------------- |
+| node_id | text | Internal node id |
+| node_name | text | Name of the node |
+| camo_partner_of | text | Node name for whom this node is partner |
+| camo_origin_for | text | Node name for whom this node is origin |
+| is_camo_partner_connected | text | Connection status |
+| is_camo_partner_ready | text | Readiness status |
+| camo_transactions_resolved | text | Are there any pending and unresolved CAMO transactions |
+| apply_lsn | text | Latest position reported as replayed (visible) |
+| receive_lsn | text | Latest LSN of any change or message received (can go backwards in case of restarts) |
+| apply_queue_size | text | Bytes difference between apply_lsn and receive_lsn |
+
+!!! Note
+ This catalog is only present when bdr-enterprise extension is installed.
+
+### `bdr.group_raft_details`
+
+Uses `bdr.run_on_all_nodes` to gather Raft Consensus status from all nodes.
+
+#### `bdr.group_raft_details` Columns
+
+| Name | Type | Description |
+| ---------------- | ---- | ------------------------------ |
+| node_id | oid | Internal node id |
+| node_name | name | Name of the node |
+| state | text | Raft worker state on the node |
+| leader_id | oid | Node id of the RAFT_LEADER |
+| current_term | int | Raft election internal id |
+| commit_index | int | Raft snapshot internal id |
+| nodes | int | Number of nodes accessible |
+| voting_nodes | int | Number of nodes voting |
+| protocol_version | int | Protocol version for this node |
+
+### `bdr.group_replslots_details`
+
+Uses `bdr.run_on_all_nodes` to gather BDR/pglogical slot information from all nodes.
+
+#### `bdr.group_replslots_details` Columns
+
+| Name | Type | Description |
+| --------------- | -------- | ------------------------------------------------------------------------------- |
+| node_group_name | text | Name of the BDR group |
+| origin_name | text | Name of the origin node |
+| target_name | text | Name of the target node |
+| slot_name | text | Slot name on the origin node used by this subscription |
+| active | text | Is the slot active (does it have a connection attached to it) |
+| state | text | State of the replication (catchup, streaming, ...) or 'disconnected' if offline |
+| write_lag | interval | Approximate lag time for reported write |
+| flush_lag | interval | Approximate lag time for reported flush |
+| replay_lag | interval | Approximate lag time for reported replay |
+| sent_lag_bytes | int8 | Bytes difference between sent_lsn and current WAL write position |
+| write_lag_bytes | int8 | Bytes difference between write_lsn and current WAL write position |
+| flush_lag_bytes | int8 | Bytes difference between flush_lsn and current WAL write position |
+| replay_lag_byte | int8 | Bytes difference between replay_lsn and current WAL write position |
+
+### `bdr.group_subscription_summary`
+
+Uses `bdr.run_on_all_nodes` to gather subscription status from all nodes.
+
+#### `bdr.group_subscription_summary` Columns
+
+| Name | Type | Description |
+| -------------------------- | ---- | ---------------------------------------------- |
+| origin_node_name | text | Name of the origin of the subscription |
+| target_node_name | text | Name of the target of the subscription |
+| last_xact_replay_timestamp | text | Timestamp of the last replayed transaction |
+| sub_lag_seconds | text | Lag between now and last_xact_replay_timestamp |
+
+### `bdr.group_versions_details`
+
+Uses `bdr.run_on_all_nodes` to gather BDR/pglogical information from all nodes.
+
+#### `bdr.group_versions_details` Columns
+
+| Name | Type | Description |
+| ----------------- | ---- | ---------------------------------- |
+| node_id | oid | Internal node id |
+| node_name | name | Name of the node |
+| postgres_version | text | PostgreSQL version on the node |
+| pglogical_version | text | Pglogical version on the node |
+| bdr_version | text | BDR version on the node |
+| bdr_edition | text | BDR edition (SE or EE) on the node |
+
+## Internal Catalogs and Views
+
+### `bdr.ddl_epoch`
+
+An internal catalog table holding state per DDL epoch.
+
+#### `bdr.ddl_epoch` Columns
+
+| Name | Type | Description |
+| --------------------- | ----------- | ------------------------------------------------------------------------ |
+| ddl_epoch | int8 | Monotonically increasing epoch number |
+| origin_node_id | oid | Internal node id of the node that requested creation of this epoch |
+| epoch_consume_timeout | timestamptz | Timeout of this epoch |
+| epoch_consumed | boolean | Switches to true as soon as the local node has fully processed the epoch |
+
+### `bdr.internal_node_pre_commit`
+
+Internal catalog table; please use the `bdr.node_pre_commit` view.
+
+!!! Note
+ This catalog is only present when bdr-enterprise extension is installed.
+
+### `bdr.sequence_kind`
+
+An internal state table storing the type of each non-local sequence. The view
+`bdr.sequences` is recommended for diagnostic purposes.
+
+#### `bdr.sequence_kind` Columns
+
+| Name | Type | Description |
+| ------- | ---- | ----------------------------------------------------------- |
+| seqid | oid | Internal OID of the sequence |
+| seqkind | char | Internal sequence kind ('l'=local,'t'=timeshard,'g'=galloc) |
+
+### `bdr.state_journal`
+
+An internal node state journal. Please use `bdr.state_journal_details` for
+diagnostic purposes instead.
+
+### `bdr.state_journal_details`
+
+Every change of node state of each node is logged permanently in `bdr.state_journal`
+for diagnostic purposes.
+This view provides node names and human-readable state names and carries all of
+the information in that journal.
+Once a node has successfully joined, the last state entry will be
+`BDR_PEER_STATE_ACTIVE`. This differs from the state of each replication connection
+listed in `bdr.node_slots.state`.
+
+#### `bdr.state_journal_details` Columns
+
+| Name | Type | Description |
+| ------------- | ----------- | -------------------------------------------------------- |
+| state_counter | oid | Monotonically increasing event counter, per node |
+| node_id | oid | Internal node id |
+| node_name | name | Name of the node |
+| state | oid | Internal state id |
+| state_name | text | Human-readable state name |
+| entered_time | timestamptz | Point in time the current node observed the state change |
diff --git a/product_docs/docs/bdr/3.7/durability.mdx b/product_docs/docs/bdr/3.7/durability.mdx
index 1451f7ee4ac..e9c8435b823 100644
--- a/product_docs/docs/bdr/3.7/durability.mdx
+++ b/product_docs/docs/bdr/3.7/durability.mdx
@@ -4,4 +4,182 @@ originalFilePath: durability.md
---
-
+## Overview
+
+Synchronous or *Eager Replication* synchronizes between at least two
+nodes of the cluster before committing a transaction. This provides
+three properties of interest to applications, which are related, but
+can all be implemented individually:
+
+- *Durability*: writing to multiple nodes increases crash resilience
+ and allows the data to be recovered after a crash and restart.
+- *Visibility*: with the commit confirmation to the client, the database
+ guarantees immediate visibility of the committed transaction on some
+ sets of nodes.
+- *No Conflicts After Commit*: the client can rely on the transaction to
+ eventually be applied on all nodes without further conflicts, or get
+ an abort directly informing the client of an error.
+
+PGLogical (PGL) integrates with the `synchronous_commit` option of
+Postgres itself, providing a variant of synchronous replication,
+which can be used between BDR nodes. In addition, BDR offers
+[Eager All-Node Replication](eager) and
+[Commit At Most Once](camo).
+
+Postgres itself provides [Physical Streaming
+Replication](https://www.postgresql.org/docs/11/warm-standby.html#SYNCHRONOUS-REPLICATION)
+(PSR), which is uni-directional, but offers a synchronous variant that
+can used in combination with BDR.
+
+This chapter covers the various forms of synchronous or eager
+replication and its timing aspects.
+
+## Comparison
+
+Most options for synchronous replication available to
+BDR allow for different levels of synchronization, offering different
+trade-offs between performance and protection against node or network
+outages.
+
+The following table summarizes what a client can expect from a peer
+node replicated to after having received a COMMIT confirmation from
+the origin node the transaction was issued to.
+
+| Variant | Mode | Received | Visible | Durable |
+| ------- | ----------------------- | -------- | ------- | ------- |
+| PGL/BDR | off (default) | no | no | no |
+| PGL/BDR | remote_write (2) | yes | no | no |
+| PGL/BDR | on (2) | yes | yes | yes |
+| PGL/BDR | remote_apply (2) | yes | yes | yes |
+| PSR | remote_write (2) | yes | no | no (1) |
+| PSR | on (2) | yes | no | yes |
+| PSR | remote_apply (2) | yes | yes | yes |
+| CAMO | remote_write (2) | yes | no | no |
+| CAMO | remote_commit_async (2) | yes | yes | no |
+| CAMO | remote_commit_flush (2) | yes | yes | yes |
+| Eager | n/a | yes | yes | yes |
+
+*(1) written to the OS, durable if the OS remains running and only
+Postgres crashes.*
+
+*(2) unless switched to Local mode (if allowed) by setting
+`synchronous_replication_availability` to `async'`, otherwise the
+values for the asynchronous BDR default apply.*
+
+Reception ensures the peer will be able to eventually apply all
+changes of the transaction without requiring any further
+communication, i.e. even in the face of a full or partial network
+outage. All modes considered synchronous provide this protection.
+
+Visibility implies the transaction was applied remotely, and any possible
+conflicts with concurrent transactions have been resolved. Without
+durability, i.e. prior to persisting the transaction, a crash of the
+peer node may revert this state (and require re-transmission and
+re-application of the changes).
+
+Durability relates to the peer node's storage and provides protection
+against loss of data after a crash and recovery of the peer node. If
+the transaction has already been visible before the crash, it will be
+recovered to be visible, again. Otherwise, the transaction's payload
+is persisted and the peer node will be able to apply the transaction
+eventually (without requiring any re-transmission of data).
+
+## Internal Timing of Operations
+
+For a better understanding of how the different modes work, it is
+helpful to realize PSR and PGLogical apply transactions rather
+differently.
+
+With physical streaming replication, the order of operations is:
+
+- origin flushes a commit record to WAL, making the transaction
+ visible locally
+- peer node receives changes and issues a write
+- peer flushes the received changes to disk
+- peer applies changes, making the transaction visible locally
+
+With PGLogical, the order of operations is different:
+
+- origin flushes a commit record to WAL, making the transaction
+ visible locally
+- peer node receives changes into its apply queue in memory
+- peer applies changes, making the transaction visible locally
+- peer persists the transaction by flushing to disk
+
+For CAMO and Eager All Node Replication, note that the origin node
+waits for a confirmation prior to making the transaction visible
+locally. The order of operations is:
+
+- origin flushes a prepare or pre-commit record to WAL
+- peer node receives changes into its apply queue in memory
+- peer applies changes, making the transaction visible locally
+- peer persists the transaction by flushing to disk
+- origin commits and makes the transaction visible locally
+
+The following table summarizes the differences.
+
+| Variant | Order of apply vs persist on peer nodes | Replication before or after origin WAL commit record write |
+| :------ | :-------------------------------------: | :--------------------------------------------------------- |
+| PSR | persist first | after |
+| PGL | apply first | after |
+| CAMO | apply first | before (triggered by pre-commit) |
+| Eager | apply first | before (triggered by prepare) |
+
+## Configuration
+
+The following table provides an overview of which configuration
+settings are required to be set to a non-default value (req) or
+optional (opt), but affecting a specific variant.
+
+| setting (GUC) | PSR | PGL | CAMO | Eager |
+| ------------------------------------- | :-: | :-: | :--: | :---: |
+| synchronous_standby_names | req | req | n/a | n/a |
+| synchronous_commit | opt | opt | n/a | n/a |
+| synchronous_replication_availability | opt | opt | opt | n/a |
+| bdr.enable_camo | n/a | n/a | req | n/a |
+| bdr.camo_origin_for | n/a | n/a | req | n/a |
+| bdr.camo_partner_of (on partner node) | n/a | n/a | req | n/a |
+| bdr.commit_scope | n/a | n/a | n/a | req |
+| bdr.global_commit_timeout | n/a | n/a | opt | opt |
+
+## Planned Shutdown and Restarts
+
+When using PGL or CAMO in combination with `remote_write`, care must be taken
+with planned shutdown or restart. By default, the apply queue is consumed
+prior to shutting down. However, in the `immediate` shutdown mode, the queue
+is discarded at shutdown, leading to the stopped node "forgetting"
+transactions in the queue. A concurrent failure of another node could
+lead to loss of data, as if both nodes failed.
+
+To ensure the apply queue gets flushed to disk, please use either
+`smart` or `fast` shutdown for maintenance tasks. This maintains the
+required synchronization level and prevents loss of data.
+
+## Synchronous Replication using PGLogical
+
+### Usage
+
+To enable synchronous replication using PGLogical, the application
+name of the relevant BDR peer nodes need to be added to
+`synchronous_standby_names`. The use of `FIRST x` or `ANY x` offers a
+lot of flexibility, if this does not conflict with the requirements of
+non-BDR standby nodes.
+
+Once added, the level of synchronization can be configured per
+transaction via `synchronous_commit`, which defaults to `on` - meaning that
+adding to `synchronous_standby_names` already enables synchronous
+replication. Setting `synchronous_commit` to `local` or `off` turns
+off synchronous replication.
+
+Due to PGLogical applying the transaction before persisting it, the
+values `on` and `remote_apply` are equivalent (for logical
+replication).
+
+### Limitations
+
+PGLogical uses the same configuration (and internal mechanisms) as
+Physical Streaming Replication, therefore the needs for (physical,
+non-BDR) standbys needs to be considered when configuring synchronous
+replication between BDR nodes using PGLogical. Most importantly, it is
+not possible to use different synchronization modes for a single
+transaction.
diff --git a/product_docs/docs/bdr/3.7/known-issues.mdx b/product_docs/docs/bdr/3.7/known-issues.mdx
index 9fe3a39e2c3..86c7982b14b 100644
--- a/product_docs/docs/bdr/3.7/known-issues.mdx
+++ b/product_docs/docs/bdr/3.7/known-issues.mdx
@@ -4,4 +4,62 @@ originalFilePath: known-issues.md
---
-
+This section discusses currently known issues in BDR3.
+
+## Data Consistency
+
+Please remember to read about [Conflicts](conflicts) to understand
+the implications of the asynchronous operation mode in terms of data
+consistency.
+
+## List of Issues
+
+In the remaining part of this section we list a number of known issues
+that are tracked in BDR3's ticketing system, each marked with an
+unique identifier.
+
+- If the resolver for the `update_origin_change` conflict
+ is set to `skip`, and `synchronous_commit=remote_apply` is used, and
+ concurrent updates of the same row are repeatedly applied on two
+ different nodes, then one of the update statements might hang due
+ to a deadlock with the pglogical writer. As mentioned in the
+ [Conflicts](conflicts) chapter, `skip` is not the default
+ resolver for the `update_origin_change` conflict, and this
+ combination is not intended to be used in production: it discards
+ one of the two conflicting updates based on the order of arrival
+ on that node, which is likely to cause a divergent cluster.
+ In the rare situation that you do choose to use the `skip`
+ conflict resolver, please note the issue with the use of the
+ `remote_apply` mode.
+
+- A `galloc` sequence might skip some chunks if the
+ sequence is created in a rolled back transaction and then created
+ again with the same name, or if it is created and dropped when DDL
+ replication is not active and then it is created again when DDL
+ replication is active.
+ The impact of the problem is mild, because the sequence
+ guarantees are not violated; the sequence will only skip some
+ initial chunks. Also, as a workaround the user can specify the
+ starting value for the sequence as an argument to the
+ `bdr.alter_sequence_set_kind()` function.
+
+- Upgrades on 2ndQPostgres 13 from BDR 3.7.7 are only supported by adding
+ new nodes, and **not** through in-place upgrade of the same data directory.
+
+- The `bdr.monitor_local_replslots()` function may return CRITICAL result
+ saying "There is at least 1 BDR replication slot which is missing" even if
+ all slots exists in presence of logical standbys or subscribe-only node
+ groups.
+
+- Decoding Worker feature does not work with CAMO/EAGER
+
+- Decoding Worker works only with the default replication sets
+
+- When Decoding Worker is enabled in BDR node group and a BDR node is shutdown
+ in fast mode immediately after starting it, the shutdown may not complete
+ because WAL sender does not exit. This happens because WAL sender waits for
+ the Decoding Worker process to start, but it may never start since the node is
+ shutting down. The situation can be worked around by using an immediate
+ shutdown or waiting for the Decoding Worker to start. The Decoding Worker
+ process is
+ reported in `pglogical.workers` as well as `pg_stat_activity` catalogs.
diff --git a/product_docs/docs/bdr/3.7/libraries.mdx b/product_docs/docs/bdr/3.7/libraries.mdx
index 709871b9637..7c5ba472ce4 100644
--- a/product_docs/docs/bdr/3.7/libraries.mdx
+++ b/product_docs/docs/bdr/3.7/libraries.mdx
@@ -4,4 +4,183 @@ originalFilePath: libraries.md
---
-
+In this section we list the libraries used by BDR3, with the
+corresponding licenses.
+
+| Library | License |
+| :------ | :--------------------------------- |
+| LLVM | BSD (3-clause) |
+| OpenSSL | SSLeay License AND OpenSSL License |
+| Libpq | PostgreSQL License |
+
+## LLVM
+
+Copyright © 1994 The Regents of the University of California. All
+rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+3. Neither the name of the University nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS \`\`AS IS''
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+## OpenSSL
+
+====================================================================
+
+Copyright © 1998-2004 The OpenSSL Project. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+3. All advertising materials mentioning features or use of this
+ software must display the following acknowledgment:
+ "This product includes software developed by the OpenSSL Project
+ for use in the OpenSSL Toolkit. (
+
+4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ endorse or promote products derived from this software without
+ prior written permission. For written permission, please contact
+ openssl-coreopenssl.org.
+
+5. Products derived from this software may not be called "OpenSSL"
+ nor may "OpenSSL" appear in their names without prior written
+ permission of the OpenSSL Project.
+
+6. Redistributions of any form whatsoever must retain the following
+ acknowledgment:
+ "This product includes software developed by the OpenSSL Project
+ for use in the OpenSSL Toolkit (
+
+THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT \`\`AS IS'' AND ANY
+EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+OF THE POSSIBILITY OF SUCH DAMAGE.
+
+====================================================================
+
+This product includes cryptographic software written by Eric Young
+(eaycryptsoft.com). This product includes software written by Tim
+Hudson (tjhcryptsoft.com).
+
+## Original SSLeay Licence
+
+Copyright © 1995-1998 Eric Young (eaycryptsoft.com)
+All rights reserved.
+
+This package is an SSL implementation written
+by Eric Young (eaycryptsoft.com).
+The implementation was written so as to conform with Netscapes SSL.
+
+This library is free for commercial and non-commercial use as long as
+the following conditions are aheared to. The following conditions
+apply to all code found in this distribution, be it the RC4, RSA,
+lhash, DES, etc., code; not just the SSL code. The SSL documentation
+included with this distribution is covered by the same copyright terms
+except that the holder is Tim Hudson (tjhcryptsoft.com).
+
+Copyright remains Eric Young's, and as such any Copyright notices in
+the code are not to be removed.
+If this package is used in a product, Eric Young should be given attribution
+as the author of the parts of the library used.
+This can be in the form of a textual message at program startup or
+in documentation (online or textual) provided with the package.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. All advertising materials mentioning features or use of this software
+ must display the following acknowledgement:
+ "This product includes cryptographic software written by
+ Eric Young (eaycryptsoft.com)"
+ The word 'cryptographic' can be left out if the rouines from the library
+ being used are not cryptographic related :-).
+4. If you include any Windows specific code (or a derivative thereof) from
+ the apps directory (application code) you must include an acknowledgement:
+ "This product includes software written by Tim Hudson (tjhcryptsoft.com)"
+
+THIS SOFTWARE IS PROVIDED BY ERIC YOUNG \`\`AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+The licence and distribution terms for any publically available version or
+derivative of this code cannot be changed. i.e. this code cannot simply be
+copied and put under another distribution licence
+[including the GNU Public Licence.]
+
+## PostgreSQL License
+
+PostgreSQL Database Management System
+(formerly known as Postgres, then as Postgres95)
+
+Portions Copyright © 1996-2020, The PostgreSQL Global Development Group
+
+Portions Copyright © 1994, The Regents of the University of California
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose, without fee, and without a written agreement
+is hereby granted, provided that the above copyright notice and this paragraph
+and the following two paragraphs appear in all copies.
+
+IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
+DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
+LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
+EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE,
+SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
diff --git a/product_docs/docs/bdr/3.7/repsets.mdx b/product_docs/docs/bdr/3.7/repsets.mdx
index 6e89d3af86a..54792b1035f 100644
--- a/product_docs/docs/bdr/3.7/repsets.mdx
+++ b/product_docs/docs/bdr/3.7/repsets.mdx
@@ -4,4 +4,670 @@ originalFilePath: repsets.md
---
-
+A replication set is a group of tables which can be subscribed to by a BDR node.
+Replication sets can be used to create more complex replication topologies
+than regular symmetric multi-master where each node is exact copy of the other
+nodes.
+
+Every BDR group automatically creates a replication set with the same name as
+the group itself. This replication set is the default replication set, which is
+used for all user tables and DDL replication and all nodes are subscribed to it.
+In other words, by default all user tables are replicated between all nodes.
+
+## Using Replication Sets
+
+Additional replication sets can be created using `create_replication_set()`,
+specifying whether to include insert, update, delete or truncate actions.
+An option exists to add existing tables to the set automatically, and
+a second option defines whether to add tables automatically when they are
+created.
+
+You may also define manually which tables are added or removed from a
+replication set.
+
+Tables included in the replication set will be maintained when the node
+joins the cluster and afterwards.
+
+Once the node is joined, you may still remove tables from the replication
+set, but adding new tables must be done via a resync operation.
+
+By default, a newly defined replication set does not replicate DDL or BDR
+administration function calls. Use the `replication_set_add_ddl_filter`
+to define which commands will be replicated.
+
+BDR creates replication set definitions on all nodes. Each node can then be
+defined to publish and/or subscribe to each replication set using
+`alter_node_replication_sets`.
+
+Functions exist to alter these definitions later, or to drop the replication
+set.
+
+!!! Note
+ Do not use the default replication set for selective replication.
+ You should not drop or modify the default replication set on any of
+ the BDR nodes in the cluster as it is also used by default for DDL
+ replication and administration function calls.
+
+## Behavior of Partitioned Tables
+
+BDR supports partitioned tables transparently, meaning that a partitioned
+table can be added to a replication set and
+changes that involve any of the partitions will be replicated downstream.
+
+!!! Note
+ When partitions are replicated through a partitioned table, the
+ statements executed directly on a partition are replicated as they
+ were executed on the parent table. The exception is the `TRUNCATE` command
+ which always replicates with the list of affected tables or partitions.
+
+It's possible to add individual partitions to the replication set, in
+which case they will be replicated like regular tables (to the table of the
+same name as the partition on the downstream). This has some performance
+advantages if the partitioning definition is the same on both provider and
+subscriber, as the partitioning logic does not have to be executed.
+
+!!! Note
+ If a root partitioned table is part of any replication set, memberships
+ of individual partitions are ignored, and only the membership of said root
+ table will be taken into account.
+
+## Behavior with Foreign Keys
+
+A Foreign Key constraint ensures that each row in the referencing
+table matches a row in the referenced table. Therefore, if the
+referencing table is a member of a replication set, the referenced
+table must also be a member of the same replication set.
+
+The current version of BDR does not automatically check or enforce
+this condition. It is therefore the responsibility of the database
+administrator to make sure, when adding a table to a replication set,
+that all the tables referenced via foreign keys are also added.
+
+The following query can be used to list all the foreign keys and
+replication sets that do not satisfy this requirement, i.e. such that
+the referencing table is a member of the replication set, while the
+referenced table is not:
+
+```sql
+SELECT t1.relname,
+ t1.nspname,
+ fk.conname,
+ t1.set_name
+ FROM bdr.tables AS t1
+ JOIN pg_catalog.pg_constraint AS fk
+ ON fk.conrelid = t1.relid
+ AND fk.contype = 'f'
+ WHERE NOT EXISTS (
+ SELECT *
+ FROM bdr.tables AS t2
+ WHERE t2.relid = fk.confrelid
+ AND t2.set_name = t1.set_name
+);
+```
+
+The output of this query looks like the following:
+
+```sql
+ relname | nspname | conname | set_name
+---------+---------+-----------+----------
+ t2 | public | t2_x_fkey | s2
+(1 row)
+```
+
+This means that table `t2` is a member of replication set `s2`, but the
+table referenced by the foreign key `t2_x_fkey` is not.
+
+!!! Note
+ The `TRUNCATE CASCADE` command takes into account the
+ replication set membership before replicating the command, e.g.
+
+```sql
+TRUNCATE table1 CASCADE;
+```
+
+This will become a `TRUNCATE` without cascade on all the tables that are
+part of the replication set only:
+
+```sql
+TRUNCATE table1, referencing_table1, referencing_table2 ...
+```
+
+## Replication Set Management
+
+Management of replication sets.
+
+Note that, with the exception of `bdr.alter_node_replication_sets`, the following
+functions are considered to be `DDL` so DDL replication and global locking
+applies to them, if that is currently active. See [DDL Replication].
+
+### bdr.create_replication_set
+
+This function creates a replication set.
+
+Replication of this command is affected by DDL replication configuration
+including DDL filtering settings.
+
+#### Synopsis
+
+```sql
+bdr.create_replication_set(set_name name,
+ replicate_insert boolean DEFAULT true,
+ replicate_update boolean DEFAULT true,
+ replicate_delete boolean DEFAULT true,
+ replicate_truncate boolean DEFAULT true,
+ autoadd_tables boolean DEFAULT false,
+ autoadd_existing boolean DEFAULT true)
+```
+
+#### Parameters
+
+- `set_name` - name of the new replication set; must be unique across the BDR
+ group
+- `replicate_insert` - indicates whether inserts into tables in this
+ replication set should be replicated
+- `replicate_update` - indicates whether updates of tables in this
+ replication set should be replicated
+- `replicate_delete` - indicates whether deletes from tables in this
+ replication set should be replicated
+- `replicate_truncate` - indicates whether truncates of tables in this
+ replication set should be replicated
+- `autoadd_tables` - indicates whether newly created (future) tables should be
+ added to this replication set
+- `autoadd_existing` - indicates whether all existing user tables should be
+ added to this replication set; this only has effect if `autoadd_tables` is
+ set to true
+
+#### Notes
+
+By default, new replication sets do not replicate DDL or BDR administration
+function calls. See [ddl filters](repsets#ddl-replication-filtering) below on how to set
+up DDL replication for replication sets. There is a preexisting DDL filter
+set up for the default group replication set that replicates all DDL and admin
+function calls, which is created when the group is created, but can be dropped
+in case it's not desirable for the BDR group default replication set to replicate
+DDL or the BDR administration function calls.
+
+This function uses the same replication mechanism as `DDL` statements. This means
+that the replication is affected by the [ddl filters](repsets#ddl-replication-filtering)
+configuration.
+
+The function will take a `DDL` global lock.
+
+This function is transactional - the effects can be rolled back with the
+`ROLLBACK` of the transaction and the changes are visible to the current
+transaction.
+
+### bdr.alter_replication_set
+
+This function modifies the options of an existing replication set.
+
+Replication of this command is affected by DDL replication configuration,
+including DDL filtering settings.
+
+#### Synopsis
+
+```sql
+bdr.alter_replication_set(set_name name,
+ replicate_insert boolean DEFAULT NULL,
+ replicate_update boolean DEFAULT NULL,
+ replicate_delete boolean DEFAULT NULL,
+ replicate_truncate boolean DEFAULT NULL,
+ autoadd_tables boolean DEFAULT NULL)
+```
+
+#### Parameters
+
+- `set_name` - name of an existing replication set
+- `replicate_insert` - indicates whether inserts into tables in this
+ replication set should be replicated
+- `replicate_update` - indicates whether updates of tables in this
+ replication set should be replicated
+- `replicate_delete` - indicates whether deletes from tables in this
+ replication set should be replicated
+- `replicate_truncate` - indicates whether truncates of tables in this
+ replication set should be replicated
+- `autoadd_tables` - indicates whether newly created (future) tables should be
+ added to this replication set
+
+Any of the options that are set to NULL (the default) will remain the same as
+before.
+
+#### Notes
+
+This function uses the same replication mechanism as `DDL` statements. This means
+the replication is affected by the [ddl filters](repsets#ddl-replication-filtering)
+configuration.
+
+The function will take a `DDL` global lock.
+
+This function is transactional - the effects can be rolled back with the
+`ROLLBACK` of the transaction, and the changes are visible to the current
+transaction.
+
+### bdr.drop_replication_set
+
+This function removes an existing replication set.
+
+Replication of this command is affected by DDL replication configuration,
+including DDL filtering settings.
+
+#### Synopsis
+
+```sql
+bdr.drop_replication_set(set_name name)
+```
+
+#### Parameters
+
+- `set_name` - name of an existing replication set
+
+#### Notes
+
+This function uses the same replication mechanism as `DDL` statements. This means
+the replication is affected by the [ddl filters](repsets#ddl-replication-filtering)
+configuration.
+
+The function will take a `DDL` global lock.
+
+This function is transactional - the effects can be rolled back with the
+`ROLLBACK` of the transaction, and the changes are visible to the current
+transaction.
+
+!!! Warning
+ Do not drop a replication set which is being used by at least
+ another node, because this will stop replication on that
+ node. Should this happen, please unsubscribe the affected node
+ from that replication set.
+
+ For the same reason, you should not drop a replication set if
+ there is a join operation in progress, and the node being joined
+ is a member of that replication set; replication set membership is
+ only checked at the beginning of the join.
+
+ This happens because the information on replication set usage is
+ local to each node, so that it can be configured on a node before
+ it joins the group.
+
+You can manage replication set subscription for a node using `alter_node_replication_sets`
+which is mentioned below.
+
+### bdr.alter_node_replication_sets
+
+This function changes which replication sets a node publishes and is subscribed to.
+
+#### Synopsis
+
+```sql
+bdr.alter_node_replication_sets(node_name name,
+ set_names text[])
+```
+
+#### Parameters
+
+- `node_name` - which node to modify; currently has to be local node
+- `set_names` - array of replication sets to replicate to the specified
+ node; an empty array will result in the use of the group default replication set
+
+#### Notes
+
+This function is only executed on the local node and is not replicated in any manner.
+
+The replication sets listed are *not* checked for existence,
+since this function is designed to be executed before the node joins. Be careful
+to specify replication set names correctly to avoid errors.
+
+This allows for calling the function not only on the node that is part of the
+BDR group, but also on a node that has not joined any group yet in order to limit
+what data is synchronized during the join. However, please note that schema is
+*always fully synchronized* without regard to the replication sets setting,
+meaning that all tables are copied across, not just the ones specified
+in the replication set. Unwanted tables can be dropped by referring to
+the `bdr.tables` catalog table. These might be removed automatically in later
+versions of BDR. This is currently true even if the [ddl filters](repsets#ddl-replication-filtering)
+configuration would otherwise prevent replication of DDL.
+
+The replication sets that the node subscribes to after this call should be published
+by the other node/s for actually replicating the changes from those nodes to
+the node where this function is executed.
+
+## Replication Set Membership
+
+Tables can be added and removed to one or multiple replication sets. This only
+affects replication of changes (DML) in those tables, schema changes (DDL) are
+handled by DDL replication set filters (see [DDL Replication Filtering] below).
+
+The replication uses the table membership in replication sets in combination
+with the node replication sets configuration to determine which actions should be
+replicated to which node. The decision is done using the union of all the
+memberships and replication set options. This means that if a table is a member
+of replication set A which replicates only INSERTs, and replication set B which
+replicates only UPDATEs, both INSERTs and UPDATEs will be replicated if the
+target node is also subscribed to both replication set A and B.
+
+### bdr.replication_set_add_table
+
+This function adds a table to a replication set.
+
+This will add a table to replication set and start replication of changes
+from this moment (or rather transaction commit). Any existing data the table
+may have on a node will not be synchronized.
+
+Replication of this command is affected by DDL replication configuration,
+including DDL filtering settings.
+
+#### Synopsis
+
+```sql
+bdr.replication_set_add_table(relation regclass,
+ set_name name DEFAULT NULL,
+ columns text[] DEFAULT NULL,
+ row_filter text DEFAULT NULL)
+```
+
+#### Parameters
+
+- `relation` - name or Oid of a table
+- `set_name` - name of the replication set; if NULL (the default) then the BDR
+ group default replication set is used
+- `columns` - reserved for future use (currently does nothing and must be NULL)
+- `row_filter` - SQL expression to be used for filtering the replicated rows;
+ if this expression is not defined (i.e. NULL - the default) then all rows are sent
+
+The `row_filter` specifies an expression producing a Boolean result, with NULLs.
+Expressions evaluating to True or Unknown will replicate the row; a False value
+will not replicate the row. Expressions cannot contain subqueries, nor refer to
+variables other than columns of the current row being replicated. No system
+columns may be referenced.
+
+`row_filter` executes on the origin node, not on the target node. This puts an
+additional CPU overhead on replication for this specific table, but will
+completely avoid sending data for filtered rows, hence reducing network
+bandwidth and apply overhead on the target node.
+
+`row_filter` will never remove `TRUNCATE` commands for a specific table.
+`TRUNCATE` commands can be filtered away at the replication set level; see
+earlier.
+
+It is possible to replicate just some columns of a table, see
+[Replicating between nodes with differences](appusage).
+
+#### Notes
+
+This function uses same replication mechanism as `DDL` statements. This means
+that the replication is affected by the [ddl filters](repsets#ddl-replication-filtering)
+configuration.
+
+The function will take a `DML` global lock on the relation that is being
+added to the replication set if the `row_filter` is not NULL, otherwise
+it will take just a `DDL` global lock.
+
+This function is transactional - the effects can be rolled back with the
+`ROLLBACK` of the transaction and the changes are visible to the current
+transaction.
+
+### bdr.replication_set_remove_table
+
+This function removes a table from the replication set.
+
+Replication of this command is affected by DDL replication configuration,
+including DDL filtering settings.
+
+#### Synopsis
+
+```sql
+bdr.replication_set_remove_table(relation regclass,
+ set_name name DEFAULT NULL)
+```
+
+#### Parameters
+
+- `relation` - name or Oid of a table
+- `set_name` - name of the replication set; if NULL (the default) then the BDR
+ group default replication set is used
+
+#### Notes
+
+This function uses same replication mechanism as `DDL` statements. This means
+the replication is affected by the [ddl filters](repsets#ddl-replication-filtering)
+configuration.
+
+The function will take a `DDL` global lock.
+
+This function is transactional - the effects can be rolled back with the
+`ROLLBACK` of the transaction and the changes are visible to the current
+transaction.
+
+## Listing Replication Sets
+
+Existing replication sets can be listed with the following query:
+
+```sql
+SELECT set_name
+FROM bdr.replication_sets;
+```
+
+This query can be used to list all the tables in a given replication
+set:
+
+```sql
+SELECT nspname, relname
+FROM bdr.tables
+WHERE set_name = 'myrepset';
+```
+
+In the section [Behavior with Foreign Keys] above, we report a
+query that lists all the foreign keys whose referenced table is not
+included in the same replication set as the referencing table.
+
+Use the following SQL to show those replication sets that the
+current node publishes and subscribes from:
+
+```sql
+SELECT s.node_id,
+ s.node_name,
+ COALESCE(
+ i.pub_repsets, s.pub_repsets
+ ) AS pub_repsets,
+ COALESCE(
+ i.sub_repsets, s.sub_repsets
+ ) AS sub_repsets
+FROM bdr.local_node_summary s
+INNER JOIN bdr.node_local_info i ON i.node_id = s.node_id;
+```
+
+This produces output like this:
+
+```sql
+ node_id | node_name | pub_repsets | sub_repsets
+------------+-----------+----------------------------------------
+ 1834550102 | s01db01 | {bdrglobal,bdrs01} | {bdrglobal,bdrs01}
+(1 row)
+```
+
+To get the same query executed on against all nodes in the cluster, thus getting
+which replication sets are associated to all nodes at the same time, we can use
+the following query:
+
+```sql
+WITH node_repsets AS (
+ SELECT jsonb_array_elements(
+ bdr.run_on_all_nodes($$
+ SELECT s.node_id,
+ s.node_name,
+ COALESCE(
+ i.pub_repsets, s.pub_repsets
+ ) AS pub_repsets,
+ COALESCE(
+ i.sub_repsets, s.sub_repsets
+ ) AS sub_repsets
+ FROM bdr.local_node_summary s
+ INNER JOIN bdr.node_local_info i
+ ON i.node_id = s.node_id;
+ $$)::jsonb
+ ) AS j
+)
+SELECT j->'response'->'command_tuples'->0->>'node_id' AS node_id,
+ j->'response'->'command_tuples'->0->>'node_name' AS node_name,
+ j->'response'->'command_tuples'->0->>'pub_repsets' AS pub_repsets,
+ j->'response'->'command_tuples'->0->>'sub_repsets' AS sub_repsets
+FROM node_repsets;;
+```
+
+This will show, for example:
+
+```sql
+ node_id | node_name | pub_repsets | sub_repsets
+------------+-----------+----------------------------------------
+ 933864801 | s02db01 | {bdrglobal,bdrs02} | {bdrglobal,bdrs02}
+ 1834550102 | s01db01 | {bdrglobal,bdrs01} | {bdrglobal,bdrs01}
+ 3898940082 | s01db02 | {bdrglobal,bdrs01} | {bdrglobal,bdrs01}
+ 1102086297 | s02db02 | {bdrglobal,bdrs02} | {bdrglobal,bdrs02}
+(4 rows)
+```
+
+## DDL Replication Filtering
+
+By default, the replication of all supported DDL happens via the default BDR
+group replication set. This is achieved by the existence of a DDL filter with
+the same name as the BDR group, which is automatically added to the default BDR
+group replication set when the BDR group is created.
+
+The above can be adjusted by changing the DDL replication filters for
+all existing replication sets. These filters are independent of table
+membership in the replication sets. Just like data changes, each DDL statement
+will be replicated only once, no matter if it is matched by multiple filters on
+multiple replication sets.
+
+You can list existing DDL filters with the following query, which
+shows for each filter the regular expression applied to the command
+tag and to the role name:
+
+```sql
+SELECT * FROM bdr.ddl_replication;
+```
+
+The following functions can be used to manipulate DDL filters. Note
+that they are considered to be `DDL`, and therefore subject to DDL
+replication and global locking.
+
+### bdr.replication_set_add_ddl_filter
+
+This function adds a DDL filter to a replication set.
+
+Any DDL that matches the given filter will be replicated to any node which
+is subscribed to that set. This also affects replication of BDR admin functions.
+
+Note that this does not prevent execution of DDL on any node, it only
+alters whether DDL is replicated, or not, to other nodes. So if two nodes have
+a replication filter between them that excludes all index commands, then
+index commands can still be executed freely by directly connecting to
+each node and executing the desired DDL on that node.
+
+The DDL filter can specify a `command_tag` and `role_name` to allow
+replication of only some DDL statements. The `command_tag` is same as those
+used by [EVENT TRIGGERs](https://www.postgresql.org/docs/current/static/event-trigger-matrix.html)
+for regular PostgreSQL commands. A typical example might be to create a
+filter that prevents additional index commands on a logical standby from
+being replicated to all other nodes.
+
+The BDR admin functions use can be filtered using a tagname matching the
+qualified function name (for example `bdr.replication_set_add_table` will be the
+command tag for the function of the same name). For example, this allows all BDR
+functions to be filtered using `bdr.*`.
+
+The `role_name` is used for matching against the current role which is executing
+the command. Both `command_tag` and `role_name` are evaluated as regular
+expressions which are case sensitive.
+
+#### Synopsis
+
+```sql
+bdr.replication_set_add_ddl_filter(set_name name,
+ ddl_filter_name text,
+ command_tag text,
+ role_name text DEFAULT NULL)
+```
+
+#### Parameters
+
+- `set_name` - name of the replication set; if NULL then the BDR
+ group default replication set is used
+- `ddl_filter_name` - name of the DDL filter; this must be unique across the
+ whole BDR group
+- `command_tag` - regular expression for matching command tags; NULL means
+ match everything
+- `role_name` - regular expression for matching role name; NULL means
+ match all roles
+
+#### Notes
+
+This function uses the same replication mechanism as `DDL` statements. This means
+that the replication is affected by the [ddl filters](repsets#ddl-replication-filtering)
+configuration. Please note - this means that replication of changes to ddl
+filter configuration is affected by existing ddl filter configuration!
+
+The function will take a `DDL` global lock.
+
+This function is transactional - the effects can be rolled back with the
+`ROLLBACK` of the transaction, and the changes are visible to the current
+transaction.
+
+To view which replication filters are defined, use the view `bdr.ddl_replication`.
+
+#### Examples
+
+To include only BDR admin functions, define a filter like this:
+
+```sql
+SELECT bdr.replication_set_add_ddl_filter('mygroup', 'mygroup_admin', $$bdr\..*$$);
+```
+
+To exclude everything apart from index DDL:
+
+```sql
+SELECT bdr.replication_set_add_ddl_filter('mygroup', 'index_filter',
+ '^(?!(CREATE INDEX|DROP INDEX|ALTER INDEX)).*');
+```
+
+To include all operations on tables and indexes, but exclude all others, add
+two filters, one for tables, one for indexes. This illustrates that
+multiple filters provide the union of all allowed DDL commands:
+
+```sql
+SELECT bdr.replication_set_add_ddl_filter('bdrgroup','index_filter', '^((?!INDEX).)*$');
+SELECT bdr.replication_set_add_ddl_filter('bdrgroup','table_filter', '^((?!TABLE).)*$');
+```
+
+### bdr.replication_set_remove_ddl_filter
+
+This function removes the DDL filter from a replication set.
+
+Replication of this command is affected by DDL replication configuration,
+including DDL filtering settings themselves!
+
+#### Synopsis
+
+```sql
+bdr.replication_set_remove_ddl_filter(set_name name,
+ ddl_filter_name text)
+```
+
+#### Parameters
+
+- `set_name` - name of the replication set; if NULL then the BDR
+ group default replication set is used
+- `ddl_filter_name` - name of the DDL filter to remove
+
+#### Notes
+
+This function uses the same replication mechanism as `DDL` statements. This
+means that the replication is affected by the
+[ddl filters](repsets#ddl-replication-filtering) configuration. Please note
+that this means that replication of changes to the DDL filter configuration is
+affected by the existing DDL filter configuration.
+
+The function will take a `DDL` global lock.
+
+This function is transactional - the effects can be rolled back with the
+`ROLLBACK` of the transaction, and the changes are visible to the current
+transaction.
diff --git a/product_docs/docs/bdr/3.7/scaling.mdx b/product_docs/docs/bdr/3.7/scaling.mdx
index 20c5e2eab66..54eda881f51 100644
--- a/product_docs/docs/bdr/3.7/scaling.mdx
+++ b/product_docs/docs/bdr/3.7/scaling.mdx
@@ -1,8 +1,350 @@
---
-navTitle: AutoPartition
-title: Database Scaling
+title: AutoPartition
originalFilePath: scaling.md
---
-
+AutoPartition provides automatic management of partitioned tables.
+
+AutoPartition allows tables to grow easily to large sizes by automatic
+partitioning management. This utilizes the additional features of BDR
+such as low-conflict locking of creating and dropping partitions.
+
+New partitions can be created regularly and then dropped when the
+data retention period expires.
+
+BDR management is primarily accomplished via SQL-callable functions.
+All functions in BDR are exposed in the `bdr` schema. Unless you put it into
+your `search_path`, you will need to schema-qualify the name of each function.
+
+!!! Note
+ This feature is currently only available on EDB Postgres Extended and
+ EDB Postgres Advanced.
+
+## Auto Creation of Partitions
+
+`bdr.autopartition()` is used to create or alter the definition of automatic
+range partitioning for a table. If no definition exists, it will be created,
+otherwise later executions will alter the definition.
+
+`bdr.autopartition()` does not lock the actual table, it only changes the
+definition of when and how new partition maintenance actions will take place.
+
+`bdr.autopartition()` leverages the EDB Postgres Extended features to allow a
+partition to be attached or detached/dropped without locking the rest of the
+table, the feature to set a new tablespace while allowing SELECT queries.
+
+An ERROR is raised if the table is not RANGE partitioned or a multi-column
+partition key is used.
+
+A new partition is added for every `partition_increment` range of values, with
+lower and upper bound `partition_increment` apart. For tables with a partition
+key of type `timestamp` or `date`, the `partition_increment` must be a valid
+constant of type `interval`. For example, specifying `1 Day` will cause a new
+partition to be added each day, with partition bounds that are 1 day apart.
+
+If the partition column is connected to a `timeshard` or `ksuuid` sequence,
+the `partition_increment` must be specified as type `interval`. Otherwise,
+if the partition key is integer or numeric, then the `partition_increment`
+must be a valid constant of the same datatype. For example, specifying
+'1000000' will cause new partitions to be added every 1 million values.
+
+If the table has no existing partition, then the specified
+`partition_initial_lowerbound` is used as the lower bound for the first
+partition. If `partition_initial_lowerbound` is not specified, then the system
+tries to derive its value from the partition column type and the specified
+`partition_increment`. For example, if `partition_increment` is specified as `1
+Day`, then `partition_initial_lowerbound` will be automatically set to CURRENT
+DATE. If `partition_increment` is specified as `1 Hour`, then
+`partition_initial_lowerbound` will be set to the current hour of the current
+date. The bounds for the subsequent partitions will be set using the
+`partition_increment` value.
+
+The system always tries to have a certain minimum number of advance partitions.
+In order to decide whether to create new partitions or not, it uses the
+specified `partition_autocreate_expression`. This can be a SQL evaluable
+expression, which is evaluated every time a check is performed. For example,
+for a partitioned table on column type `date`, if
+`partition_autocreate_expression` is specified as `DATE_TRUNC('day',
+CURRENT_DATE)`, `partition_increment` is specified as `1 Day` and
+`minimum_advance_partitions` is specified as 2, then new partitions will be
+created until the upper bound of the last partition is less than
+`DATE_TRUNC('day', CURRENT_DATE) + '2 Days'::interval`.
+
+The expression is evaluated each time the system checks for new partitions.
+
+For a partitioned table on column type `integer`, the
+`partition_autocreate_expression` may be specified as `SELECT max(partcol) FROM
+schema.partitioned_table`. The system then regularly checks if the maximum value of
+the partitioned column is within the distance of `minimum_advance_partitions *
+partition_increment`of the last partition's upper bound. It is expected that
+the user creates an index on the `partcol` so that the query runs efficiently.
+If the `partition_autocreate_expression` is not specified for a partition table
+on column type `integer`, `smallint` or `bigint`, then the system will
+automatically set it to `max(partcol)`.
+
+If the `data_retention_period` is set, partitions will be automatically
+dropped after this period. Partitions will be dropped at the same time as new
+partitions are added, to minimize locking. If not set, partitions must
+be dropped manually.
+
+The `data_retention_period` parameter is only supported for timestamp (and
+related) based partitions. The period is calculated by considering the upper
+bound of the partition and the partition is either migrated to the secondary
+tablespace or dropped if either of the given period expires, relative to the
+upper bound.
+
+By default, AutoPartition manages partitions globally. In other words, when a
+partition is created on one node, the same partition is also created on all
+other nodes in the cluster. So all partitions are consistent and guaranteed to
+be available. For this, AutoPartition makes use of RAFT. This behaviour can be
+changed by passing `managed_locally` as `true`. In that case, all partitions
+are managed locally on each node. This is useful for the case when the
+partitioned table is not a replicated table and hence it may not be necessary
+or even desirable to have all partitions on all nodes. For example, the
+built-in `bdr.conflict_history` table is not a replicated table, and is
+managed by AutoPartition locally. Each node creates partitions for this table
+locally and drops them once they are old enough.
+
+Tables once marked as `managed_locally` cannot be later changed to be managed
+globally and vice versa.
+
+Activities are performed only when the entry is marked `enabled = on`.
+
+The user is not expected to manually create or drop partitions for tables
+managed by AutoPartition. Doing so can make the AutoPartition metadata
+inconsistent and could cause it to fail.
+
+### Configure AutoPartition
+
+The `bdr.autopartition` function configures automatic partinioning of a table.
+
+#### Synopsis
+
+```sql
+bdr.autopartition(relation regclass,
+ partition_increment text,
+ partition_initial_lowerbound text DEFAULT NULL,
+ partition_autocreate_expression text DEFAULT NULL,
+ minimum_advance_partitions integer DEFAULT 2,
+ maximum_advance_partitions integer DEFAULT 5,
+ data_retention_period interval DEFAULT NULL,
+ managed_locally boolean DEFAULT false,
+ enabled boolean DEFAULT on);
+```
+
+#### Parameters
+
+- `relation` - name or Oid of a table.
+- `partition_increment` - interval or increment to next partition creation.
+- `partition_initial_lowerbound` - if the table has no partition, then the
+ first partition with this lower bound and `partition_increment` apart upper
+ bound will be created.
+- `partition_autocreate_expression` - is used to detect if it is time to create new partitions.
+- `minimum_advance_partitions` - the system will attempt to always have at
+ least `minimum_advance_partitions` partitions.
+- `maximum_advance_partitions` - number of partitions to be created in a single
+ go once the number of advance partitions falls below `minimum_advance_partitions`.
+- `data_retention_period` - interval until older partitions are dropped, if
+ defined. This must be greater than `migrate_after_period`.
+- `managed_locally` - if true then the partitions will be managed locally.
+- `enabled` - allows activity to be disabled/paused and later resumed/re-enabled.
+
+#### Examples
+
+Daily partitions, keep data for one month:
+
+```sql
+CREATE TABLE measurement (
+logdate date not null,
+peaktemp int,
+unitsales int
+) PARTITION BY RANGE (logdate);
+
+bdr.autopartition('measurement', '1 day', data_retention_period := '30 days');
+```
+
+Create 5 advance partitions when there are only 2 more partitions remaining (each partition can hold 1 billion orders):
+
+```sql
+bdr.autopartition('Orders', '1000000000',
+ partition_initial_lowerbound := '0',
+ minimum_advance_partitions := 2,
+ maximum_advance_partitions := 5
+ );
+```
+
+### Create One AutoPartition
+
+Use `bdr.autopartition_create_partition()` to create a standalone AutoPartition
+on the parent table.
+
+#### Synopsis
+
+```sql
+bdr.autopartition_create_partition(relname regclass,
+ partname name,
+ lowerb text,
+ upperb text,
+ nodes oid[]);
+```
+
+#### Parameters
+
+- `relname` - Name or Oid of the parent table to attach to
+- `partname` - Name of the new AutoPartition
+- `lowerb` - The lower bound of the partition
+- `upperb` - The upper bound of the partition
+- `nodes` - List of nodes that the new partition resides on
+
+### Stopping Auto-Creation of Partitions
+
+Use `bdr.drop_autopartition()` to drop the auto-partitioning rule for the
+given relation. All pending work items for the relation are deleted and no new
+work items are created.
+
+```sql
+bdr.drop_autopartition(relation regclass);
+```
+
+#### Parameters
+
+- `relation` - name or Oid of a table
+
+### Drop one AutoPartition
+
+Use `bdr.autopartition_drop_partition` once a BDR AutoPartition table has been
+made, as this function can specify single partitions to drop. If the partitioned
+table has successfully been dropped, the function will return true.
+
+#### Synopsis
+
+```sql
+bdr.autopartition_drop_partition(relname regclass)
+```
+
+#### Parameters
+
+- `relname` - The name of the partitioned table to be dropped
+
+### Notes
+
+This will place a DDL lock on the parent table, before using DROP TABLE on the
+chosen partition table.
+
+### Wait for Partition Creation
+
+Use `bdr.autopartition_wait_for_partitions()` to wait for the creation of
+partitions on the local node. The function takes the partitioned table name and
+a partition key column value and waits until the partition that holds that
+value is created.
+
+The function only waits for the partitions to be created locally. It does not guarantee
+that the partitions also exists on the remote nodes.
+
+In order to wait for the partition to be created on all BDR nodes, use the
+`bdr.autopartition_wait_for_partitions_on_all_nodes()` function. This function
+internally checks local as well as all remote nodes and waits until the
+partition is created everywhere.
+
+#### Synopsis
+
+```sql
+bdr.autopartition_wait_for_partitions(relation regclass, text bound);
+```
+
+#### Parameters
+
+- `relation` - name or Oid of a table
+- `bound` - partition key column value.
+
+#### Synopsis
+
+```sql
+bdr.autopartition_wait_for_partitions_on_all_nodes(relation regclass, text bound);
+```
+
+#### Parameters
+
+- `relation` - name or Oid of a table.
+- `bound` - partition key column value.
+
+### Find Partition
+
+Use the `bdr.autopartition_find_partition()` function to find the partition for the
+given partition key value. If partition to hold that value does not exist, then
+the function returns NULL. Otherwise OID of the partition is returned.
+
+#### Synopsis
+
+```sql
+bdr.autopartition_find_partition(relname regclass, searchkey text);
+```
+
+#### Parameters
+
+- `relname` - name of the partitioned table.
+- `searchkey` - partition key value to search.
+
+### Enable/Disable AutoPartitioning
+
+Use `bdr.autopartition_enable()` to enable AutoPartitioning on the given table.
+If AutoPartitioning is already enabled, then it will be a no-op. Similarly, use
+`bdr.autopartition_disable()` to disable AutoPartitioning on the given table.
+
+#### Synopsis
+
+```sql
+bdr.autopartition_enable(relname regclass);
+```
+
+#### Parameters
+
+- `relname` - name of the relation to enable AutoPartitioning.
+
+#### Synopsis
+
+```sql
+bdr.autopartition_disable(relname regclass);
+```
+
+#### Parameters
+
+- `relname` - name of the relation to disable AutoPartitioning.
+
+#### Synopsis
+
+```sql
+bdr.autopartition_get_last_completed_workitem();
+```
+
+Return the `id` of the last workitem successfully completed on all nodes in the
+cluster.
+
+### Check AutoPartition Workers
+
+From using the `bdr.autopartition_work_queue_check_status` function, you can
+see the status of the background workers that are doing their job to maintain
+AutoPartitions.
+
+The workers can be seen through these views:
+`autopartition_work_queue_local_status`
+`autopartition_work_queue_global_status`
+
+#### Synopsis
+
+```sql
+bdr.autopartition_work_queue_check_status(workid bigint
+ local boolean DEFAULT false);
+```
+
+#### Parameters
+
+- `workid` - The key of the AutoPartition worker
+- `local` - Check the local status only
+
+#### Notes
+
+AutoPartition workers are ALWAYS running in the background, even before the
+bdr.autopartition function is called for the first time. If an invalid worker ID
+is used, the function will return 'unknown'. 'In-progress' is the typical status.
diff --git a/product_docs/docs/bdr/3.7/striggers.mdx b/product_docs/docs/bdr/3.7/striggers.mdx
index 72f9724d317..52087d76bee 100644
--- a/product_docs/docs/bdr/3.7/striggers.mdx
+++ b/product_docs/docs/bdr/3.7/striggers.mdx
@@ -4,4 +4,699 @@ originalFilePath: striggers.md
---
-
+BDR introduces new types of triggers which can be used for additional
+data processing on the downstream/target node.
+
+- Conflict Triggers
+- Transform Triggers
+
+Together, these types of triggers are known as Stream Triggers.
+
+!!! Note
+ This feature is currently only available on EDB Postgres Extended and
+ EDB Postgres Advanced.
+
+Stream Triggers are designed to be trigger-like in syntax, they leverage the
+PostgreSQL BEFORE trigger architecture, and are likely to have similar
+performance characteristics as PostgreSQL BEFORE Triggers.
+
+One trigger function can be used by multiple trigger definitions, just as with
+normal PostgreSQL triggers.
+A trigger function is simply a program defined in this form:
+`CREATE FUNCTION ... RETURNS TRIGGER`. Creating the actual trigger does not
+require use of the CREATE TRIGGER command. Instead, stream triggers
+are created using the special BDR functions
+`bdr.create_conflict_trigger()` and `bdr.create_transform_trigger()`.
+
+Once created, the trigger will be visible in the catalog table `pg_trigger`.
+The stream triggers will be marked as `tgisinternal = true` and
+`tgenabled = 'D'` and will have name suffix '\_bdrc' or '\_bdrt'. The view
+`bdr.triggers` provides information on the triggers in relation to the table,
+the name of the procedure that is being executed, the event that triggers it,
+and the trigger type.
+
+Note that stream triggers are NOT therefore enabled for normal SQL processing.
+Because of this the `ALTER TABLE ... ENABLE TRIGGER` is blocked for stream
+triggers in both its specific name variant and the ALL variant, to prevent
+the trigger from executing as a normal SQL trigger.
+
+Note that these triggers execute on the downstream or target node. There is no
+option for them to execute on the origin node, though one may wish to consider
+the use of `row_filter` expressions on the origin.
+
+Also, any DML which is applied during the execution of a stream
+trigger will not be replicated to other BDR nodes, and will not
+trigger the execution of standard local triggers. This is intentional,
+and can be used for instance to log changes or conflicts captured by a
+stream trigger into a table that is crash-safe and specific of that
+node; a working example is provided at the end of this chapter.
+
+## Trigger execution during Apply
+
+Transform triggers execute first, once for each incoming change in the
+triggering table. These triggers fire before we have even attempted to locate a
+matching target row, allowing a very wide range of transforms to be applied
+efficiently and consistently.
+
+Next, for UPDATE and DELETE changes we locate the target row. If there is no
+target row, then there is no further processing for those change types.
+
+We then execute any normal triggers that previously have been explicitly enabled
+as replica triggers at table-level:
+
+```sql
+ALTER TABLE tablename
+ENABLE REPLICA TRIGGER trigger_name;
+```
+
+We then decide whether a potential conflict exists and if so, we then call any
+conflict trigger that exists for that table.
+
+### Missing Column Conflict Resolution
+
+Before transform triggers are executed, PostgreSQL tries to match the
+incoming tuple against the rowtype of the target table.
+
+Any column that exists on the input row but not on the target table
+will trigger a conflict of type `target_column_missing`; conversely, a
+column existing on the target table but not in the incoming row
+triggers a `source_column_missing` conflict. The default resolutions
+for those two conflict types are respectively `ignore_if_null` and
+`use_default_value`.
+
+This is relevant in the context of rolling schema upgrades; for
+instance, if the new version of the schema introduces a new
+column. When replicating from an old version of the schema to a new
+one, the source column is missing, and the `use_default_value`
+strategy is appropriate, as it populates the newly introduced column
+with the default value.
+
+However, when replicating from a node having the new schema version to
+a node having the old one, the column is missing from the target
+table, and the `ignore_if_null` resolver is not appropriate for a
+rolling upgrade, because it will break replication as soon as the user
+inserts, in any of the upgraded nodes, a tuple with a non-NULL value
+in the new column.
+
+In view of this example, the appropriate setting for rolling schema
+upgrades is to configure each node to apply the `ignore` resolver in
+case of a `target_column_missing` conflict.
+
+This is done with the following query, that must be **executed
+separately on each node**, after replacing `node1` with the actual
+node name:
+
+```sql
+SELECT bdr.alter_node_set_conflict_resolver('node1',
+ 'target_column_missing', 'ignore');
+```
+
+#### Data Loss and Divergence Risk
+
+In this section, we show how setting the conflict resolver to `ignore`
+can lead to data loss and cluster divergence.
+
+Consider the following example: table `t` exists on nodes 1 and 2, but
+its column `col` only exists on node 1.
+
+If the conflict resolver is set to `ignore`, then there can be rows on
+node 1 where `c` is not null, e.g. `(pk=1, col=100)`. That row will be
+replicated to node 2, and the value in column `c` will be discarded,
+e.g. `(pk=1)`.
+
+If column `c` is then added to the table on node 2, it will initially
+be set to NULL on all existing rows, and the row considered above
+becomes `(pk=1, col=NULL)`: the row having `pk=1` is no longer
+identical on all nodes, and the cluster is therefore divergent.
+
+Note that the default `ignore_if_null` resolver is not affected by
+this risk, because any row that is replicated to node 2 will have
+`col=NULL`.
+
+Based on this example, we recommend running LiveCompare against the
+whole cluster at the end of a rolling schema upgrade where the
+`ignore` resolver was used, to make sure that any divergence is
+detected and fixed.
+
+## Terminology of row-types
+
+This document uses these row-types:
+
+- `SOURCE_OLD` is the row before update, i.e. the key.
+- `SOURCE_NEW` is the new row coming from another node.
+- `TARGET` is row that exists on the node already, i.e. conflicting row.
+
+## Conflict Triggers
+
+Conflict triggers are executed when a conflict is detected by BDR, and
+are used to decide what happens when the conflict has occurred.
+
+- If the trigger function returns a row, the action will be applied to the target.
+- If the trigger function returns NULL row, the action will be skipped.
+
+To clarify, if the trigger is called for a `DELETE`, the trigger should
+return NULL if it wants to skip the `DELETE`. If you wish the DELETE to proceed,
+then return a row value - either `SOURCE_OLD` or `TARGET` will work.
+When the conflicting operation is either `INSERT` or `UPDATE`, and the
+chosen resolution is the deletion of the conflicting row, the trigger
+must explicitly perform the deletion and return NULL.
+The trigger function may perform other SQL actions as it chooses, but
+those actions will only be applied locally, not replicated.
+
+When a real data conflict occurs between two or more nodes, there will be
+two or more concurrent changes occurring. When we apply those changes, the
+conflict resolution occurs independently on each node. This means the conflict
+resolution will occur once on each node, and can occur with a
+significant time difference between then. As a result, there is no
+possibility of communication between the multiple executions of the conflict
+trigger. It is the responsibility of the author of the conflict trigger to
+ensure that the trigger gives exactly the same result for all related events,
+otherwise data divergence will occur. Technical Support recommends that all conflict
+triggers are formally tested using the isolationtester tool supplied with
+BDR.
+
+!!! Warning
+ - Multiple conflict triggers can be specified on a single table, but
+ they should match distinct event, i.e. each conflict should only
+ match a single conflict trigger.
+ - Multiple triggers matching the same event on the same table are
+ not recommended; they might result in inconsistent behaviour, and
+ will be forbidden in a future release.
+
+If the same conflict trigger matches more than one event, the `TG_OP`
+variable can be used within the trigger to identify the operation that
+produced the conflict.
+
+By default, BDR detects conflicts by observing a change of replication origin
+for a row, hence it is possible for a conflict trigger to be called even
+when there is only one change occurring. Since in this case there is no
+real conflict, we say that this conflict detection mechanism can generate
+false positive conflicts. The conflict trigger must handle all of those
+identically, as mentioned above.
+
+Note that in some cases, timestamp conflict detection will not detect a
+conflict at all. For example, in a concurrent UPDATE/DELETE where the
+DELETE occurs just after the UPDATE, any nodes that see first the UPDATE
+and then the DELETE will not see any conflict. If no conflict is seen,
+the conflict trigger will never be called. The same situation, but using
+row version conflict detection, *will* see a conflict, which can then be
+handled by a conflict trigger.
+
+The trigger function has access to additional state information as well as
+the data row involved in the conflict, depending upon the operation type:
+
+- On `INSERT`, conflict triggers would be able to access `SOURCE_NEW` row from
+ source and `TARGET` row
+- On `UPDATE`, conflict triggers would be able to access `SOURCE_OLD` and
+ `SOURCE_NEW` row from source and `TARGET` row
+- On `DELETE`, conflict triggers would be able to access `SOURCE_OLD` row from
+ source and `TARGET` row
+
+The function `bdr.trigger_get_row()` can be used to retrieve `SOURCE_OLD`, `SOURCE_NEW`
+or `TARGET` rows, if a value exists for that operation.
+
+Changes to conflict triggers happen transactionally and are protected by
+Global DML Locks during replication of the configuration change, similarly
+to how some variants of `ALTER TABLE` are handled.
+
+If primary keys are updated inside a conflict trigger, it can
+sometimes leads to unique constraint violations error due to a difference
+in timing of execution.
+Hence, users should avoid updating primary keys within conflict triggers.
+
+## Transform Triggers
+
+These triggers are similar to the Conflict Triggers, except they are executed
+for every row on the data stream against the specific table. The behaviour of
+return values and the exposed variables are similar, but transform triggers
+execute before a target row is identified, so there is no `TARGET` row.
+
+Specify multiple Transform Triggers on each table in BDR, if desired.
+Transform triggers execute in alphabetical order.
+
+A transform trigger can filter away rows, and it can do additional operations
+as needed. It can alter the values of any column, or set them to `NULL`. The
+return value decides what further action is taken:
+
+- If the trigger function returns a row, it will be applied to the target.
+- If the trigger function returns a `NULL` row, there is no further action to be
+ performed and as-yet unexecuted triggers will never execute.
+- The trigger function may perform other actions as it chooses.
+
+The trigger function has access to additional state information as well as
+rows involved in the conflict:
+
+- On `INSERT`, transform triggers would be able to access `SOURCE_NEW` row from source.
+- On `UPDATE`, transform triggers would be able to access `SOURCE_OLD` and `SOURCE_NEW` row from source.
+- On `DELETE`, transform triggers would be able to access `SOURCE_OLD` row from source.
+
+The function `bdr.trigger_get_row()` can be used to retrieve `SOURCE_OLD` or `SOURCE_NEW`
+rows; `TARGET` row is not available, since this type of trigger executes before such
+a target row is identified, if any.
+
+Transform Triggers look very similar to normal BEFORE row triggers, but have these
+important differences:
+
+- Transform trigger gets called for every incoming change.
+ BEFORE triggers will not be called at all for UPDATE and DELETE changes
+ if we don't find a matching row in a table.
+
+- Transform triggers are called before partition table routing occurs.
+
+- Transform triggers have access to the lookup key via SOURCE_OLD,
+ which is not available to normal SQL triggers.
+
+## Stream Triggers Variables
+
+Both Conflict Trigger and Transform Triggers have access to information about
+rows and metadata via the predefined variables provided by trigger API and
+additional information functions provided by BDR.
+
+In PL/pgSQL, the following predefined variables exist:
+
+### TG_NAME
+
+Data type name; variable that contains the name of the trigger actually fired.
+Note that the actual trigger name has a '\_bdrt' or '\_bdrc' suffix
+(depending on trigger type) compared to the name provided during trigger creation.
+
+### TG_WHEN
+
+Data type text; this will say `BEFORE` for both Conflict and Transform triggers.
+The stream trigger type can be obtained by calling the `bdr.trigger_get_type()`
+information function (see below).
+
+### TG_LEVEL
+
+Data type text; a string of `ROW`.
+
+### TG_OP
+
+Data type text; a string of `INSERT`, `UPDATE` or `DELETE`
+ telling for which operation the trigger was fired.
+
+### TG_RELID
+
+Data type oid; the object ID of the table that caused the trigger invocation.
+
+### TG_TABLE_NAME
+
+Data type name; the name of the table that caused the trigger invocation.
+
+### TG_TABLE_SCHEMA
+
+Data type name; the name of the schema of the table that caused the trigger
+invocation. For partitioned tables, this is the name of the root table.
+
+### TG_NARGS
+
+Data type integer; the number of arguments given to the trigger function in
+the `bdr.create_conflict_trigger()` or `bdr.create_transform_trigger()`
+statement.
+
+### TG_ARGV\[]
+
+Data type array of text; the arguments from the `bdr.create_conflict_trigger()`
+or `bdr.create_transform_trigger()` statement. The index counts from 0.
+Invalid indexes (less than 0 or greater than or equal to `TG_NARGS`) result in
+a `NULL` value.
+
+## Information functions
+
+### bdr.trigger_get_row
+
+This function returns the contents of a trigger row specified by an identifier
+as a `RECORD`. This function returns NULL if called inappropriately, i.e.
+called with SOURCE_NEW when the operation type (TG_OP) is DELETE.
+
+#### Synopsis
+
+```sql
+bdr.trigger_get_row(row_id text)
+```
+
+#### Parameters
+
+- `row_id` - identifier of the row; can be any of SOURCE_NEW, SOURCE_OLD and
+ TARGET, depending on the trigger type and operation (see documentation of
+ individual trigger types).
+
+### bdr.trigger_get_committs
+
+This function returns the commit timestamp of a trigger row specified by an
+identifier. If not available because a row is frozen or row is not available,
+this will return NULL. Always returns NULL for row identifier SOURCE_OLD.
+
+#### Synopsis
+
+```sql
+bdr.trigger_get_committs(row_id text)
+```
+
+#### Parameters
+
+- `row_id` - identifier of the row; can be any of SOURCE_NEW, SOURCE_OLD and
+ TARGET, depending on trigger type and operation (see documentation of
+ individual trigger types).
+
+### bdr.trigger_get_xid
+
+This function returns the local transaction id of a TARGET row specified by an
+identifier. If not available because a row is frozen or row is not available,
+this will return NULL. Always returns NULL for SOURCE_OLD and SOURCE_NEW row
+identifiers.
+
+This is only available for conflict triggers.
+
+#### Synopsis
+
+```sql
+bdr.trigger_get_xid(row_id text)
+```
+
+#### Parameters
+
+- `row_id` - identifier of the row; can be any of SOURCE_NEW, SOURCE_OLD and
+ TARGET, depending on trigger type and operation (see documentation of
+ individual trigger types).
+
+### bdr.trigger_get_type
+
+This function returns the current trigger type, which can be either `CONFLICT`
+or `TRANSFORM`. Returns null if called outside a Stream Trigger.
+
+#### Synopsis
+
+```sql
+bdr.trigger_get_type()
+```
+
+### bdr.trigger_get_conflict_type
+
+This function returns the current conflict type if called inside a conflict
+trigger, or `NULL` otherwise.
+
+See [Conflict Types]\(conflicts.md#List of Conflict Types)
+for possible return values of this function.
+
+#### Synopsis
+
+```sql
+bdr.trigger_get_conflict_type()
+```
+
+### bdr.trigger_get_origin_node_id
+
+This function returns the node id corresponding to the origin for the trigger
+row_id passed in as argument. If the origin is not valid (which means the row
+has originated locally), return the node id of the source or target node,
+depending on the trigger row argument. Always returns NULL for row identifier
+SOURCE_OLD. This can be used to define conflict triggers to always favour a
+trusted source node. See the example given below.
+
+#### Synopsis
+
+```sql
+bdr.trigger_get_origin_node_id(row_id text)
+```
+
+#### Parameters
+
+- `row_id` - identifier of the row; can be any of SOURCE_NEW, SOURCE_OLD and
+ TARGET, depending on trigger type and operation (see documentation of
+ individual trigger types).
+
+### bdr.ri_fkey_on_del_trigger
+
+When called as a BEFORE trigger, this function will use FOREIGN KEY information
+to avoid FK anomalies.
+
+#### Synopsis
+
+```sql
+bdr.ri_fkey_on_del_trigger()
+```
+
+## Row Contents
+
+The SOURCE_NEW, SOURCE_OLD and TARGET contents depend on the operation, REPLICA
+IDENTITY setting of a table, and the contents of the target table.
+
+The TARGET row is only available in conflict triggers. The TARGET row only
+contains data if a row was found when applying UPDATE or DELETE in the target
+table; if the row is not found, the TARGET will be NULL.
+
+## Triggers Notes
+
+Execution order for triggers:
+
+- Transform triggers - execute once for each incoming row on the target
+- Normal triggers - execute once per row
+- Conflict triggers - execute once per row where a conflict exists
+
+## Stream Triggers Manipulation Interfaces
+
+Stream Triggers are managed using SQL interfaces provided as part of
+bdr-enterprise extension.
+
+Stream Triggers can only be created on tables with `REPLICA IDENTITY FULL`
+or tables without any `TOAST`able columns.
+
+### bdr.create_conflict_trigger
+
+This function creates a new conflict trigger.
+
+#### Synopsis
+
+```sql
+bdr.create_conflict_trigger(trigger_name text,
+ events text[],
+ relation regclass,
+ function regprocedure,
+ args text[] DEFAULT '{}')
+```
+
+#### Parameters
+
+- `trigger_name` - name of the new trigger
+- `events` - array of events on which to fire this trigger; valid values are
+ '`INSERT`', '`UPDATE`' and '`DELETE`'
+- `relation` - for which relation to fire this trigger
+- `function` - which function to execute
+- `args` - optional; specifies the array of parameters the trigger function will
+ receive upon execution (contents of `TG_ARGV` variable)
+
+#### Notes
+
+This function uses the same replication mechanism as `DDL` statements. This
+means that the replication is affected by the
+[ddl filters](repsets#ddl-replication-filtering) configuration.
+
+The function will take a global DML lock on the relation on which the trigger
+is being created.
+
+This function is transactional - the effects can be rolled back with the
+`ROLLBACK` of the transaction, and the changes are visible to the current
+transaction.
+
+Similarly to normal PostgreSQL triggers, the `bdr.create_conflict_trigger`
+function requires `TRIGGER` privilege on the `relation` and `EXECUTE`
+privilege on the function. This applies with a
+`bdr.backwards_compatibility` of 30619 or above. Additional
+security rules apply in BDR to all triggers including conflict
+triggers; see the [security chapter on triggers](security#triggers).
+
+### bdr.create_transform_trigger
+
+This function creates a new transform trigger.
+
+#### Synopsis
+
+```sql
+bdr.create_transform_trigger(trigger_name text,
+ events text[],
+ relation regclass,
+ function regprocedure,
+ args text[] DEFAULT '{}')
+```
+
+#### Parameters
+
+- `trigger_name` - name of the new trigger
+- `events` - array of events on which to fire this trigger, valid values are
+ '`INSERT`', '`UPDATE`' and '`DELETE`'
+- `relation` - for which relation to fire this trigger
+- `function` - which function to execute
+- `args` - optional, specify array of parameters the trigger function will
+ receive upon execution (contents of `TG_ARGV` variable)
+
+#### Notes
+
+This function uses the same replication mechanism as `DDL` statements. This
+means that the replication is affected by the
+[ddl filters](repsets#ddl-replication-filtering) configuration.
+
+The function will take a global DML lock on the relation on which the trigger
+is being created.
+
+This function is transactional - the effects can be rolled back with the
+`ROLLBACK` of the transaction, and the changes are visible to the current
+transaction.
+
+Similarly to normal PostgreSQL triggers, the `bdr.create_transform_trigger`
+function requires the `TRIGGER` privilege on the `relation` and `EXECUTE`
+privilege on the function. Additional security rules apply in BDR to all
+triggers including transform triggers; see the
+[security chapter on triggers](security#triggers).
+
+### bdr.drop_trigger
+
+This function removes an existing stream trigger (both conflict and transform).
+
+#### Synopsis
+
+```sql
+bdr.drop_trigger(trigger_name text,
+ relation regclass,
+ ifexists boolean DEFAULT false)
+```
+
+#### Parameters
+
+- `trigger_name` - name of an existing trigger
+- `relation` - which relation is the trigger defined for
+- `ifexists` - when set to true `true`, this command will ignore missing
+ triggers
+
+#### Notes
+
+This function uses the same replication mechanism as `DDL` statements. This
+means that the replication is affected by the
+[ddl filters](repsets#ddl-replication-filtering) configuration.
+
+The function will take a global DML lock on the relation on which the trigger
+is being created.
+
+This function is transactional - the effects can be rolled back with the
+`ROLLBACK` of the transaction, and the changes are visible to the current
+transaction.
+
+The `bdr.drop_trigger` function can be only executed by the owner of
+the `relation`.
+
+## Stream Triggers Examples
+
+A conflict trigger which provides similar behaviour as the update_if_newer
+conflict resolver:
+
+```sql
+CREATE OR REPLACE FUNCTION update_if_newer_trig_func
+RETURNS TRIGGER
+LANGUAGE plpgsql
+AS $$
+BEGIN
+ IF (bdr.trigger_get_committs('TARGET') >
+ bdr.trigger_get_committs('SOURCE_NEW')) THEN
+ RETURN TARGET;
+ ELSIF
+ RETURN SOURCE;
+ END IF;
+END;
+$$;
+```
+
+A conflict trigger which applies a delta change on a counter column and uses
+SOURCE_NEW for all other columns:
+
+```sql
+CREATE OR REPLACE FUNCTION delta_count_trg_func
+RETURNS TRIGGER
+LANGUAGE plpgsql
+AS $$
+DECLARE
+ DELTA bigint;
+ SOURCE_OLD record;
+ SOURCE_NEW record;
+ TARGET record;
+BEGIN
+ SOURCE_OLD := bdr.trigger_get_row('SOURCE_OLD');
+ SOURCE_NEW := bdr.trigger_get_row('SOURCE_NEW');
+ TARGET := bdr.trigger_get_row('TARGET');
+
+ DELTA := SOURCE_NEW.counter - SOURCE_OLD.counter;
+ SOURCE_NEW.counter = TARGET.counter + DELTA;
+
+ RETURN SOURCE_NEW;
+END;
+$$;
+```
+
+A transform trigger which logs all changes to a log table instead of applying them:
+
+```sql
+CREATE OR REPLACE FUNCTION log_change
+RETURNS TRIGGER
+LANGUAGE plpgsql
+AS $$
+DECLARE
+ SOURCE_NEW record;
+ SOURCE_OLD record;
+ COMMITTS timestamptz;
+BEGIN
+ SOURCE_NEW := bdr.trigger_get_row('SOURCE_NEW');
+ SOURCE_OLD := bdr.trigger_get_row('SOURCE_OLD');
+ COMMITTS := bdr.trigger_get_committs('SOURCE_NEW');
+
+ IF (TG_OP = 'INSERT') THEN
+ INSERT INTO log SELECT 'I', COMMITTS, row_to_json(SOURCE_NEW);
+ ELSIF (TG_OP = 'UPDATE') THEN
+ INSERT INTO log SELECT 'U', COMMITTS, row_to_json(SOURCE_NEW);
+ ELSIF (TG_OP = 'DELETE') THEN
+ INSERT INTO log SELECT 'D', COMMITTS, row_to_json(SOURCE_OLD);
+ END IF;
+
+ RETURN NULL; -- do not apply the change
+END;
+$$;
+```
+
+The example below shows a conflict trigger that implements Trusted Source
+conflict detection, also known as trusted site, preferred node or Always Wins
+resolution. This uses the bdr.trigger_get_origin_node_id() function to provide
+a solution that works with 3 or more nodes.
+
+```sql
+CREATE OR REPLACE FUNCTION test_conflict_trigger()
+RETURNS TRIGGER
+LANGUAGE plpgsql
+AS $$
+DECLARE
+ SOURCE record;
+ TARGET record;
+
+ TRUSTED_NODE bigint;
+ SOURCE_NODE bigint;
+ TARGET_NODE bigint;
+BEGIN
+ TARGET := bdr.trigger_get_row('TARGET');
+ IF (TG_OP = 'DELETE')
+ SOURCE := bdr.trigger_get_row('SOURCE_OLD');
+ ELSE
+ SOURCE := bdr.trigger_get_row('SOURCE_NEW');
+ END IF;
+
+ TRUSTED_NODE := current_setting('customer.trusted_node_id');
+
+ SOURCE_NODE := bdr.trigger_get_origin_node_id('SOURCE_NEW');
+ TARGET_NODE := bdr.trigger_get_origin_node_id('TARGET');
+
+ IF (TRUSTED_NODE = SOURCE_NODE) THEN
+ RETURN SOURCE;
+ ELSIF (TRUSTED_NODE = TARGET_NODE) THEN
+ RETURN TARGET;
+ ELSE
+ RETURN NULL; -- do not apply the change
+ END IF;
+END;
+$$;
+```
diff --git a/product_docs/docs/bdr/3.7/tssnapshots.mdx b/product_docs/docs/bdr/3.7/tssnapshots.mdx
index 67d6a867ba6..bf8853be4da 100644
--- a/product_docs/docs/bdr/3.7/tssnapshots.mdx
+++ b/product_docs/docs/bdr/3.7/tssnapshots.mdx
@@ -1,8 +1,66 @@
---
-navTitle: Timestamp-Based Snapshots
-title: Timestamp-based Snapshots
+title: Timestamp-Based Snapshots
originalFilePath: tssnapshots.md
---
-
+The Timestamp-Based Snapshots feature of 2ndQPostgres allows reading data in
+a consistent manner via a user-specified timestamp rather than the usual
+MVCC snapshot. This can be used to access data on different BDR nodes
+at a common point-in-time; for example, as a way to compare data on
+multiple nodes for data quality checking. At this time, this feature does
+not work with write transactions.
+
+!!! Note
+ This feature is currently only available on EDB Postgres Extended.
+
+The use of timestamp-based snapshots are enabled via the `snapshot_timestamp`
+parameter; this accepts either a timestamp value or
+a special value, 'current', which represents the current timestamp (now). If
+`snapshot_timestamp` is set, queries will use that timestamp to determine
+visibility of rows, rather than the usual MVCC semantics.
+
+For example, the following query will return state of the `customers` table at
+2018-12-08 02:28:30 GMT:
+
+```sql
+SET snapshot_timestamp = '2018-12-08 02:28:30 GMT';
+SELECT count(*) FROM customers;
+```
+
+In plain 2ndQPostgres, this only works with future timestamps or the above
+mentioned special 'current' value, so it cannot be used for historical queries
+(though that is on the longer-term roadmap).
+
+BDR works with and improves on that feature in a multi-node environment. Firstly,
+BDR will make sure that all connections to other nodes replicated any
+outstanding data that were added to the database before the specified
+timestamp, so that the timestamp-based snapshot is consistent across the whole
+multi-master group. Secondly, BDR adds an additional parameter called
+`bdr.timestamp_snapshot_keep`. This specifies a window of time during which
+queries can be executed against the recent history on that node.
+
+You can specify any interval, but be aware that VACUUM (including autovacuum)
+will not clean dead rows that are newer than up to twice the specified
+interval. This also means that transaction ids will not be freed for the same
+amount of time. As a result, using this can leave more bloat in user tables.
+Initially, we recommend 10 seconds as a typical setting, though you may wish
+to change that as needed.
+
+Note that once the query has been accepted for execution, the query may run
+for longer than `bdr.timestamp_snapshot_keep` without problem, just as normal.
+
+Also please note that info about how far the snapshots were kept does not
+survive server restart, so the oldest usable timestamp for the timestamp-based
+snapshot is the time of last restart of the PostgreSQL instance.
+
+One can combine the use of `bdr.timestamp_snapshot_keep` with the
+`postgres_fdw` extension to get a consistent read across multiple nodes in a
+BDR group. This can be used to run parallel queries across nodes, when used in
+conjunction with foreign tables.
+
+There are no limits on the number of nodes in a multi-node query when using this
+feature.
+
+Use of timestamp-based snapshots does not increase inter-node traffic or
+bandwidth. Only the timestamp value is passed in addition to query data.
diff --git a/product_docs/docs/bdr/3.7/twophase.mdx b/product_docs/docs/bdr/3.7/twophase.mdx
index 8a6f5ad03d7..bd0ed9d5d38 100644
--- a/product_docs/docs/bdr/3.7/twophase.mdx
+++ b/product_docs/docs/bdr/3.7/twophase.mdx
@@ -5,4 +5,62 @@ originalFilePath: twophase.md
---
-
+An application may opt to use two-phase commit explicitly with BDR. See
+[Distributed Transaction Processing: The XA Specification](http://pubs.opengroup.org/onlinepubs/009680699/toc.pdf).
+
+The X/Open Distributed Transaction Processing (DTP) model envisages three
+software components:
+
+- An application program (AP) that defines transaction boundaries and specifies
+ actions that constitute a transaction.
+- Resource managers (RMs, such as databases or file access systems) that provide
+ access to shared resources.
+- A separate component called a transaction manager (TM) that assigns identifiers
+ to transactions, monitors their progress, and takes responsibility for
+ transaction completion and for failure recovery.
+
+BDR supports explicit external 2PC using the PREPARE TRANSACTION and
+COMMIT PREPARED/ROLLBACK PREPARED commands. Externally, a BDR cluster
+appears to be a single Resource Manager to the Transaction Manager for a
+single session.
+
+When `bdr.commit_scope` is `local`, the transaction is prepared only
+on the local node. Once committed, changes will be replicated, and
+BDR then applies post-commit conflict resolution.
+
+Using `bdr.commit_scope` set to `local` may seem nonsensical with
+explicit two-phase commit, but the option is offered to allow the user
+to control the trade-off between transaction latency and robustness.
+
+Explicit two-phase commit does not work in combination with either CAMO
+or the global commit scope. Future releases may enable this combination.
+
+## Usage
+
+Two-phase commits with a local commit scope work exactly like standard
+PostgreSQL. Please use the local commit scope and disable CAMO.
+
+```sql
+BEGIN;
+
+SET LOCAL bdr.enable_camo = 'off';
+SET LOCAL bdr.commit_scope = 'local';
+
+... other commands possible...
+```
+
+To start the first phase of the commit, the client must assign a
+global transaction id, which can be any unique string identifying the
+transaction:
+
+```sql
+PREPARE TRANSACTION 'some-global-id';
+```
+
+After a successful first phase, all nodes have applied the changes and
+are prepared for committing the transaction. The client must then invoke
+the second phase from the same node:
+
+```sql
+COMMIT PREPARED 'some-global-id';
+```
diff --git a/product_docs/docs/bdr/3.7/upgrades.mdx b/product_docs/docs/bdr/3.7/upgrades.mdx
index ac140a27cb6..56e752249be 100644
--- a/product_docs/docs/bdr/3.7/upgrades.mdx
+++ b/product_docs/docs/bdr/3.7/upgrades.mdx
@@ -1,7 +1,339 @@
---
-title: Upgrades
+navTitle: Upgrades
+title: Application Schema Upgrades
originalFilePath: upgrades.md
---
-
+In this chapter we discuss upgrading software on a BDR cluster and how
+to minimize downtime for applications during the upgrade.
+
+## Overview
+
+BDR cluster has two sets of software, the underlying PostgreSQL software
+or some flavor of it and the PGLogical/BDR software. We will discuss
+upgrading either or both of these softwares versions to their supported
+major releases.
+
+To upgrade a BDR cluster, the following steps need to be performed on
+each node:
+
+- plan the upgrade
+- prepare for the upgrade
+- upgrade the server software
+- restart Postgres
+- check and validate the upgrade
+
+## Upgrade Planning
+
+While BDR 3.6 release supports PostgreSQL 10 and 11 major versions, BDR
+3.7 supports PostgreSQL 11, 12 and 13. Please refer to [this](index)
+page for the full list compatible software. Since BDR 3.7 supports newer
+PostgreSQL releases, while upgrading from BDR 3.6 to BDR 3.7, it's also
+possible to upgrade the newer PostgreSQL releases with minimum or no
+application downtime.
+
+There are broadly two ways to upgrade the BDR version.
+
+- Upgrading one node at a time to the newer BDR version.
+- Joining a new node running a newer version of the BDR software and
+ then optionally drop one of the old nodes.
+
+If you are only interested in upgrading the BDR software, any of the two
+methods can be used. But if you also want to upgrade the PostgreSQL
+version, then the second method must be used.
+
+### Rolling Server Software Upgrades
+
+A rolling upgrade is the process where the below [Server
+Software Upgrade](#Server-Software-Upgrade) is performed on each node in the
+BDR Group one after another, while keeping the replication working.
+
+An upgrade to 3.7 is only supported from 3.6, using a specific minimum
+maintenance release (e.g. 3.6.25). Please consult the Release Notes
+for the actual required minimum version. So if a node
+is running with an older 3.6 release, it must first be upgraded to
+the minimum and can only then be upgraded to 3.7.
+
+Just as with a single-node database, it's possible to stop all nodes,
+perform the upgrade on all nodes and only then restart the entire
+cluster. This strategy of upgrading all nodes at the same time avoids
+running with mixed BDR versions and therefore is the simplest, but
+obviously incurs some downtime.
+
+During the upgrade process, the application can be switched over to a node
+which is currently not being upgraded to provide continuous availability of
+the BDR group for applications.
+
+While the cluster is going through a rolling upgrade, replication happens
+between mixed versions of BDR3. For example, nodeA will have BDR 3.6.25, while
+nodeB and nodeC will have 3.7.8. In this state, the replication and group
+management will use the protocol and features from the oldest version (3.6.25
+in case of this example), so any new features provided by the newer version
+which require changes in the protocol will be disabled. Once all nodes are
+upgraded to the same version, the new features are automatically enabled.
+
+A BDR cluster is designed to be easily upgradeable. Most BDR releases
+support rolling upgrades, which means running part of the cluster on one
+release level and the remaining part of the cluster on a second, compatible,
+release level.
+
+A rolling upgrade starts with a cluster with all nodes at a prior release,
+then proceeds by upgrading one node at a time to the newer release, until
+all nodes are at the newer release. Should problems occur, do not attempt
+to downgrade without contacting Technical Support to discuss and provide
+options.
+
+An upgrade process may take an extended period of time when the user decides
+caution is required to reduce business risk, though this should not take any
+longer than 30 days without discussion and explicit agreement from Technical
+Support to extend the period of coexistence of two release levels.
+
+In case of problems during upgrade, do not initiate a second upgrade to a
+newer/different release level. Two upgrades should never occur concurrently
+in normal usage. Nodes should never be upgraded to a third release without
+specific and explicit instructions from Technical Support. A case where
+that might occur is if an upgrade failed for some reason and a Hot Fix was
+required to continue the current cluster upgrade process to successful
+conclusion. BDR has been designed and tested with more than 2 release
+levels, but this cannot be relied upon for production usage except in
+specific cases.
+
+### Rolling Upgrade Using Node Join
+
+The other method of upgrading BDR software, along with or without upgrading
+the underlying PostgreSQL major version, is to join a new node
+to the cluster and later drop one of the existing nodes running
+the older version of the software. Even with this method, some features
+that are available only in the newer version of the software may remain
+unavailable until all nodes are finally upgraded to the newer versions.
+
+A new node running this release of BDR 3.7.8 can join a 3.6 cluster,
+where each node in the cluster is running the latest 3.6.x version of
+BDR. The joining node may run any of the supported PostgreSQL versions
+11-13, but you must not mix the Standard and Enterprise editions.
+If the older cluster is running a Standard Edition then it's recommended
+that the new joining node should also run a Standard Edition. Similarly,
+if the old cluster is running Enterprise Edition, the new joining node
+should also run the Enterprise Edition.
+
+Care must be taken to not use features that are available only in
+the newer PostgreSQL versions 12-13, until all nodes are upgraded to the
+newer and same release of PostgreSQL. This is especially true for any
+new DDL syntax that may have been added to newer release of PostgreSQL.
+
+Note that `bdr_init_physical` makes a byte-by-byte of the source node.
+So it cannot be used while upgrading from one major PostgreSQL version
+to another. In fact, currently `bdr_init_physical` requires that even
+BDR version of the source and the joining node is exactly the same. So
+it cannot be used for rolling upgrades via joining a new node method. In
+all such cases, a logical join must be used.
+
+### Upgrading a CAMO-Enabled cluster
+
+CAMO protection requires at least one of the nodes of a CAMO pair to
+be operational. For upgrades, we recommend to ensure that no CAMO
+protected transactions are running concurrent to the upgrade, or to
+use a rolling upgrade strategy, giving the nodes enough time to
+reconcile in between the upgrades and the corresponding node downtime
+due to the upgrade.
+
+## Upgrade Preparation
+
+BDR 3.7 contains several changes that may affect compatibility with
+previous releases. These may affect the Postgres configuration,
+deployment scripts as well as applications using BDR. We recommend to
+consider and possibly adjust in advance of the upgrade.
+
+### Node Management
+
+The `bdr.create_node_group()` function has seen a number of changes:
+
+- It is now possible to create sub-groups, resulting in a tree-of-groups
+ structure of the whole BDR cluster. Monitoring views were updated
+ accordingly.
+- The deprecated parameters `insert_to_update`, `update_to_insert`,
+ `ignore_redundant_updates`, `check_full_tuple` and `apply_delay` were
+ removed.
+ Use `bdr.alter_node_set_conflict_resolver()` instead of `insert_to_update`,
+ `update_to_insert`. The `check_full_tuple` is no longer needed as it is
+ handled automatically based on table conflict detection configuration.
+
+### Conflicts
+
+The configuration of conflict resolution and logging is now copied from
+join source node to the newly joining node, rather than using defaults on the
+new node.
+
+The default conflict resolution for some of the conflict types was changed.
+See (conflicts.md#default-conflict-resolvers) for the new defaults.
+
+The conflict logging interfaces have changed from `bdr.alter_node_add_log_config`
+and `bdr.alter_node_remove_log_config` to `bdr.alter_node_set_log_config`.
+
+The default conflict logging table is now named `bdr.conflict_history` and the
+old `bdr.apply_log` no longer exists. The new table is partitioned using the
+new Autopartition feature of BDR 3.7.
+
+All conflicts are now logged by default to both log file and the conflict
+table.
+
+Deprecated functions `bdr.row_version_tracking_enable()` and
+`bdr.row_version_tracking_disable()` were removed. Use
+`bdr.alter_table_conflict_detection()` instead.
+
+Some of the configuration for conflict handling is no longer stored in
+`pglogical` schema. Any diagnostic queries that were using the `pglogical`
+tables directly will have to switch to appropriate tables in `bdr` schema.
+Queries using `bdr.node_group`, `bdr.local_node_summary`, `bdr.local_node_summary`or
+`bdr.node_local_info` will need to use the new columns `sub_repsets` and
+`pub_repsets` instead of `replication_sets`.
+
+### Removed Or Renamed Settings (GUCs)
+
+The setting `report_transaction_id` has been removed and is no longer
+known to Postgres. It had been deprecated in the 3.6.x branch already
+and the underlying functionality is enabled automatically when needed,
+instead. So it's safe to remove `report_transaction_id` from your
+configuration or reset it via `ALTER SYSTEM` even on 3.6.22 (and
+newer). Otherwise, Postgres refuses to start after the upgrade and
+will report an "unrecognized configuration parameter".
+
+The GUC to enable CAMO has moved from Postgres to BDR and got renamed
+from `pg2q.enable_camo` to `bdr.enable_camo`.
+
+## Server Software Upgrade
+
+The upgrade of BDR software on individual nodes happens in-place. There is no need for
+backup and restore when upgrading the BDR extension.
+
+The first step in the upgrade is to install the new version of the BDR packages, which
+will install both the new binary and the extension SQL script. This step depends
+on the operating system used.
+
+### Restart Postgres
+
+Upgrading the binary and extension scripts by itself does not upgrade BDR
+in the running instance of PostgreSQL. To do that, the PostgreSQL instance
+needs to be restarted so that the new BDR binary can be loaded (the BDR binary
+is loaded at the start of the PostgreSQL server). After that, the node is
+upgraded. The extension SQL upgrade scripts are executed automatically as
+needed.
+
+!!! Warning
+ It's important to never run the `ALTER EXTENSION ... UPDATE` command before the
+ PostgreSQL instance is restarted, as that will only upgrade the SQL-visible
+ extension but keep the old binary, which can cause unpredictable behaviour or
+ even crashes. The `ALTER EXTENSION ... UPDATE` command should never be needed;
+ BDR3 maintains the SQL-visible extension automatically as needed.
+
+### Upgrade Check and Validation
+
+After this procedure, your BDR node is upgraded. You can verify the current
+version of BDR3 binary like this:
+
+```sql
+SELECT bdr.bdr_version();
+```
+
+The upgrade of BDR3 will usually also upgrade the version of pglogical 3
+installed in the system. The current version of pglogical can be checked using:
+
+```sql
+SELECT pglogical.pglogical_version();
+```
+
+Always check the [monitoring](monitoring) after upgrade
+of a node to confirm that the upgraded node is working as expected.
+
+## Database Encoding
+
+We recommend using `UTF-8` encoding in all replicated databases.
+BDR does not support replication between databases with different
+encoding. There is currently no supported path to upgrade/alter encoding.
+
+Similar to the upgrade of BDR itself, there are two approaches to
+upgrading the application schema. The simpler option is to stop all
+applications affected, preform the schema upgrade and restart the
+application upgraded to use the new schema variant. Again, this
+imposes some downtime.
+
+To eliminate this downtime, BDR offers ways to perform a rolling
+application schema upgrade as documented in the following section.
+
+## Rolling Application Schema Upgrades
+
+By default, DDL will automatically be sent to all nodes. This can be
+controlled manually, as described in [DDL Replication](ddl), which
+could be used to create differences between database schemas across nodes.
+BDR is designed to allow replication to continue even while minor
+differences exist between nodes. These features are designed to allow
+application schema migration without downtime, or to allow logical
+standby nodes for reporting or testing.
+
+!!! Warning
+ Application Schema Upgrades are managed by the user, not by BDR.
+ Careful scripting will be required to make this work correctly
+ on production clusters. Extensive testing is advised.
+
+Details of this are covered here
+[Replicating between nodes with differences](appusage).
+
+When one node runs DDL that adds a new table, nodes that have not
+yet received the latest DDL will need to cope with the extra table.
+In view of this, the appropriate setting for rolling schema upgrades
+is to configure all nodes to apply the `skip` resolver in case of a
+`target_table_missing` conflict. This must be performed before any
+node has additional tables added, and is intended to be a permanent
+setting.
+
+This is done with the following query, that must be **executed
+separately on each node**, after replacing `node1` with the actual
+node name:
+
+```sql
+SELECT bdr.alter_node_set_conflict_resolver('node1',
+ 'target_table_missing', 'skip');
+```
+
+When one node runs DDL that adds a column to a table, nodes that have not
+yet received the latest DDL will need to cope with the extra columns.
+In view of this, the appropriate setting for rolling schema
+upgrades is to configure all nodes to apply the `ignore` resolver in
+case of a `target_column_missing` conflict. This must be performed
+before one node has additional columns added and is intended to be a
+permanent setting.
+
+This is done with the following query, that must be **executed
+separately on each node**, after replacing `node1` with the actual
+node name:
+
+```sql
+SELECT bdr.alter_node_set_conflict_resolver('node1',
+ 'target_column_missing', 'ignore');
+```
+
+When one node runs DDL that removes a column from a table, nodes that
+have not yet received the latest DDL will need to cope with the missing column.
+This situation will cause a `source_column_missing` conflict, which uses
+the `use_default_value` resolver. Thus, columns that neither
+accept NULLs nor have a DEFAULT value will require a two step process:
+
+1. Remove NOT NULL constraint or add a DEFAULT value for a column
+ on all nodes.
+2. Remove the column.
+
+Constraints can be removed in a rolling manner.
+There is currently no supported way for coping with adding table
+constraints in a rolling manner, one node at a time.
+
+When one node runs a DDL that changes the type of an existing column,
+depending on the existence of binary coercibility between the current
+type and the target type, the operation may not rewrite the underlying
+table data. In that case, it will be only a metadata update of the
+underlying column type. Rewrite of a table is normally restricted.
+However, in controlled DBA environments, it is possible to change
+the type of a column to an automatically castable one by adopting
+a rolling upgrade for the type of this column in a non-replicated
+environment on all the nodes, one by one. More details are provided in the
+[ALTER TABLE](ddl) section.
diff --git a/product_docs/docs/edbcloud/beta/administering_cluster/01_portal_access.mdx b/product_docs/docs/edbcloud/beta/administering_cluster/01_portal_access.mdx
index e7caed105b1..a45fa4bfc59 100644
--- a/product_docs/docs/edbcloud/beta/administering_cluster/01_portal_access.mdx
+++ b/product_docs/docs/edbcloud/beta/administering_cluster/01_portal_access.mdx
@@ -1,5 +1,5 @@
---
-title: "Managing Portal Access"
+title: "Managing portal access"
---
EDB Cloud uses Azure Active Directory (AD) to authenticate users and role based access controls to grant users access to different parts of the application.
@@ -32,7 +32,7 @@ The available *objects* are: backups, billing, clusters, events, permissions, ro
!!! Note
Not every object supports all the actions. A typical example is *versions* object is always *read* only.
-### Permissions by Role
+### Permissions by role
The following are the default permission by role:
@@ -52,14 +52,14 @@ The following are the default permission by role:
| | delete | | | | | | | | |
-### Editing Roles
+### Editing roles
To edit roles:
1. Navigate to **Admin > Roles**.
3. Select the edit icon for the role in the list.
-#### Changing Role Name
+#### Changing role name
To change the name or description of the role:
1. Select the **Settings** tab.
@@ -67,7 +67,7 @@ To change the name or description of the role:
2. Edit **Name** or **Description**.
3. Press **Save**.
-#### Changing Role Permissions
+#### Changing role permissions
To change permissions associated with the role:
1. Select the **Permissions** tab.
@@ -83,7 +83,7 @@ To change permissions associated with the role:
When you configured your Azure subscription, you also enabled EDB Cloud to authenticate users from your organization using Azure AD. Before users become visible in the EDB Cloud **Users** screen they need to sign in using Azure AD after receiving special emails sent by your organization. New users signed in to EDB Cloud have a minimum set of permissions until you assign them a role.
-### Assigning Roles to Users
+### Assigning roles to users
To assign appropriate roles to users:
1. Navigate to **Admin > Users**.
@@ -95,7 +95,7 @@ To assign appropriate roles to users:
!!! Note
For a user's role assignment to take effect, the user needs to log out from EDB Cloud and log in again.
-### Viewing Users
+### Viewing users
To view all users from your organization that have logged in at least once:
@@ -103,7 +103,7 @@ To view all users from your organization that have logged in at least once:
2. View the list of users sorted by most recent log in.
-## Example Scenario
+## Example scenario
1. The EDB Cloud organization is created, and Tom logs in and is granted the owner role.
diff --git a/product_docs/docs/edbcloud/beta/administering_cluster/03_account_activity.mdx b/product_docs/docs/edbcloud/beta/administering_cluster/03_account_activity.mdx
index 1a8ab8825de..c0246fd4681 100644
--- a/product_docs/docs/edbcloud/beta/administering_cluster/03_account_activity.mdx
+++ b/product_docs/docs/edbcloud/beta/administering_cluster/03_account_activity.mdx
@@ -1,5 +1,5 @@
---
-title: "Reviewing Account Activity"
+title: "Reviewing account activity"
---
The activity log collects EDB Cloud events based on user activity within the portal. It can be used to audit activities performed by users from your organizations or research activities that may have affected your account.
@@ -22,9 +22,9 @@ Events are related to the following resource types:
!!! Note
Database events are **not** logging activity on the Postgres server. They are logging the use of the portal to create or modify database clusters.
-## Viewing and Searching the Activity Log
+## Viewing and searching the activity log
-To view events, navigate to the [Activity Log](https://portal.edbcloud.com/activityLog) page on the [EDB Cloud](https://portal.edbcloud.com) portal. To search events, use the filters at the top of the page.
+To view events, navigate to the [**Activity Log**](https://portal.edbcloud.com/activityLog) page on the [EDB Cloud](https://portal.edbcloud.com) portal. To search events, use the filters at the top of the page.
The following fields are in the activity log:
@@ -33,5 +33,5 @@ The following fields are in the activity log:
| **Activity Name** | Name of an event in the format _Action Resource-Type, Resource-name_ |
| **User** | User responsible for the event |
| **Date** | Date when the action was performed |
-| **Resource** | Resource Type of the resource |
+| **Resource** | Resource type of the resource |
diff --git a/product_docs/docs/edbcloud/beta/administering_cluster/index.mdx b/product_docs/docs/edbcloud/beta/administering_cluster/index.mdx
index 90719fd39d1..e5088568d9b 100644
--- a/product_docs/docs/edbcloud/beta/administering_cluster/index.mdx
+++ b/product_docs/docs/edbcloud/beta/administering_cluster/index.mdx
@@ -1,5 +1,5 @@
---
-title: "Administering Your Account"
+title: "Administering your account"
---
-In this section, account owners can find information on administrative activities for the EDB Clouud account including portal and database user access management as well as account activity reviews.
+In this section, account owners can find information on administrative activities for the EDB Cloud account including portal and database user access management as well as account activity reviews.
diff --git a/product_docs/docs/edbcloud/beta/getting_started/01_check_resource_limits.mdx b/product_docs/docs/edbcloud/beta/getting_started/01_check_resource_limits.mdx
index 6e62dea6741..1a874eacf62 100644
--- a/product_docs/docs/edbcloud/beta/getting_started/01_check_resource_limits.mdx
+++ b/product_docs/docs/edbcloud/beta/getting_started/01_check_resource_limits.mdx
@@ -1,5 +1,5 @@
---
-title: Raising Azure Resource Limits
+title: Raising Azure resource limits
---
By default, Azure sets a very low limit on the number of virtual machines and cores per region and on the number of Public IP Addresses per region that are available in a given subscription.
@@ -10,7 +10,7 @@ The default Public IP address limits for Public IP Addresses Basic and Public IP
VIDEO
-## Virtual Machine Quota Requirements
+## Virtual machine quota requirements
In each region, EDB Cloud uses six ESv3 and six DSv2 virtual machine cores to manage your EDB Cloud infrastructure.
Your Postgres clusters deployed in the region use separate ESv3 virtual machine cores.
The number of cores depends on the Instance Type and High Availability options of the clusters you provision. You can calculate the number of ESv3 cores required for your cluster based on the following:
@@ -21,7 +21,7 @@ The number of cores depends on the Instance Type and High Availability options o
As an example, if you provision the largest virtual machine E64Sv3 with high availability enabled, it requires (3 * 64)=192 ESv3 cores per region. EDB Cloud infrastructure requires an additional six ESv3 and six DSv2 virtual machine cores per region.
-## Checking Current Utilization
+## Checking current utilization
To check if you have adequate Azure resources to provision new clusters:
@@ -33,7 +33,7 @@ To check if you have adequate Azure resources to provision new clusters:
6. Search for Public IP to view networks limits.
-## Increasing Network Quota
+## Increasing network quota
You can increase the number of public IP addresses for your account either by using Azure's portal if you have appropriate privileges or by submitting a support request. See:
@@ -42,7 +42,7 @@ You can increase the number of public IP addresses for your account either by us
- [Request networking quota increase at subscription level using Usages + quotas](https://docs.microsoft.com/en-us/azure/azure-portal/supportability/networking-quota-requests#request-networking-quota-increase-at-subscription-level-using-usages--quotas)
-## Increasing Virtual Machine Quota
+## Increasing virtual machine quota
You can increase the number of ESv3 Series virtual machines per region for your account either by using Azure's portal if you have appropriate privileges or by submitting a support request. See:
diff --git a/product_docs/docs/edbcloud/beta/getting_started/02_connect_cloud_account.mdx b/product_docs/docs/edbcloud/beta/getting_started/02_connect_cloud_account.mdx
index 4c20f49c0fa..3b893b9d097 100644
--- a/product_docs/docs/edbcloud/beta/getting_started/02_connect_cloud_account.mdx
+++ b/product_docs/docs/edbcloud/beta/getting_started/02_connect_cloud_account.mdx
@@ -1,16 +1,16 @@
---
-title: "Connecting Your Cloud Account"
+title: "Connecting your EDB Cloud account"
---
This topic describes how to set up your EDB Cloud account on Azure Marketplace. Your Azure subscription for EDB Cloud is where you create and manage Postgres clusters.
-## Before You Connect Your Cloud Account
+## Before you connect your cloud account
1. Ensure you have an active Microsoft Azure subscription. If you need to create one, see [Create an additional Azure subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription).
1. Within Azure Active Directory, ensure your role is owner and your user type is member (not guest) for the subscription you are using.
-1. Create an Azure Active Directory Application client to delegate Identity and Access Management functions to Azure Active Directory (AD). You can create the Azure Active Directory Application using the Azure Portal, but a simpler and less error-prone approach is to use the `create-spn` script (see [Create Azure Active Directory Application Using `create-spn`](#create-azure-active-directory-application-using-create-spn)). The script approach requires the Azure API.
+1. Create an Azure Active Directory Application client to delegate Identity and Access Management functions to Azure Active Directory (AD). You can create the Azure Active Directory Application using the Azure Portal, but a simpler and less error-prone approach is to use the `create-spn` script (see [Create Azure Active Directory Application using `create-spn`](#create-azure-active-directory-application-using-create-spn)). The script approach requires the Azure API.
!!! Note
Some steps of the subscription process require approval of an Azure AD global administrator.
@@ -18,7 +18,7 @@ This topic describes how to set up your EDB Cloud account on Azure Marketplace.
-### Create Azure Active Directory Application Using Azure Portal
+### Create Azure Active Directory Application using the Azure portal
!!! Note
Create your Azure AD Application in the same tenant as the subscription you want it associated with.
@@ -31,7 +31,7 @@ Take note of the **Application (client) ID**, you need it to configure your EDB
1. Choose _application secret_ as an authentication option for the application. See [Create a new Azure AD application secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret) for instructions. Take note of the Azure AD App Secret, you need it to configure your cloud account.
1. Assign the owner role to the application. See [Assign a role to the application](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#assign-a-role-to-the-application) for instructions. Enter the **Display name** of the Azure AD application in the **Select** field of the **Add role assignment** panel. See [Open the Add role assignment pane](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current#step-2-open-the-add-role-assignment-pane) for instructions.
-### Create Azure Active Directory Application Using `create-spn`
+### Create Azure Active Directory Application using `create-spn`
To simplify the process of creating an Azure AD Application, EDB provides the [`create-spn`](https://github.com/EnterpriseDB/cloud-utilities/blob/main/azure/create-spn.sh) script for Azure API users. The script automates the manual steps described in the previous section. You can download it [here](https://github.com/EnterpriseDB/cloud-utilities/tree/main/azure).
Before using the script ensure that the following utilities are installed on your machine:
@@ -44,7 +44,7 @@ The syntax of the command is:
```
Flag and option details:
-| Flag/Option Shortcut | Flag/Option Long Name | Description |
+| Flag/option shortcut | Flag/option long name | Description |
| -------------------- | --------------------- | ----------- |
| -d *NAME* | --display-name *NAME* | Name of Azure AD Application |
| -s *SUBSCRIPTION_ID* | --subscription *SUBSCRIPTION_ID* | Azure Subscription ID used by EDB Cloud |
@@ -77,7 +77,7 @@ WARNING: 'name' property in the output is deprecated and will be removed in the
}
```
-## Connect Your Cloud Account
+## Connect your cloud account
To connect your cloud account with your Azure subscription:
@@ -117,15 +117,15 @@ To connect your cloud account with your Azure subscription:
11. Select **Submit.**
-## What's Next
+## What's next
### Login
You can now log in to your EDB Cloud account using your Azure AD identity if you filled in the parameters correctly.
-### Invite Users
+### Invite users
-You can invite new users by sharing the link to the EDB Cloud portal and having them log in with their Microsoft Azure Active Directory account. New users are not assigned any roles by default. After they log in the first time, you see them in the User list and are able to assign them a role with permissions to EDB Cloud. See [Assigning Roles to Users](../administering_cluster/01_portal_access/#assigning-roles-to-users) for instructions.
+You can invite new users by sharing the link to the EDB Cloud portal and having them log in with their Microsoft Azure Active Directory account. New users are not assigned any roles by default. After they log in the first time, you see them in the User list and are able to assign them a role with permissions to EDB Cloud. See [Assigning roles to users](../administering_cluster/01_portal_access/#assigning-roles-to-users) for instructions.
!!! Note
Azure AD email domain will likely be different than the email domain regularly used by your organization.
diff --git a/product_docs/docs/edbcloud/beta/getting_started/03_create_cluster.mdx b/product_docs/docs/edbcloud/beta/getting_started/03_create_cluster.mdx
index 65ffbc32846..f6da75f8b49 100644
--- a/product_docs/docs/edbcloud/beta/getting_started/03_create_cluster.mdx
+++ b/product_docs/docs/edbcloud/beta/getting_started/03_create_cluster.mdx
@@ -1,8 +1,8 @@
---
-title: "Creating a Cluster"
+title: "Creating a cluster"
---
!!! Note
-Prior to creating your cluster, make sure you have adequate Azure resources or your request to create a cluster will fail. See [Raising Your Azure Resource Limits](01_check_resource_limits).
+Prior to creating your cluster, make sure you have adequate Azure resources or your request to create a cluster will fail. See [Raising your Azure resource limits](01_check_resource_limits).
!!!
To create a cluster:
@@ -22,13 +22,13 @@ To create a cluster:
!!! Note
When you elect not to configure settings on optional tabs, the default values are used.
-## Cluster Info
+## Cluster Info tab
1. Enter the name for your cluster in the **Cluster Name** field.
2. Enter a password for your cluster in the **Password** field. This will be the password for the user edb_admin.
3. Select **Next: Operational Settings**.
-## Operational Settings
+## Operational Settings tab
1. In the **Database Type** section,
1. Select the type of Postgres you want to use in the **Postgres Type** field:
- [*PostgreSQL*](../../../supported-open-source/postgresql/) is an open-source object-relational database management system.
@@ -48,7 +48,7 @@ To create a cluster:
Private networking allows only IP addresses within your private network to connect to your cluster.
7. To optionally make updates to your database configuration parameters, select **Next: DB Configuration**.
-## DB Configuration
+## DB Configuration tab
In the **Parameters** section, you can update the value of the database configuration parameters, as needed.
To update the parameter values, see [Modifying Your Database Configuration Parameters](../using_cluster/03_modifying_your_cluster/05_db_configuration_parameters)
@@ -61,10 +61,10 @@ To update the parameter values, see [Modifying Your Database Configuration Param
Clusters are configured across availability zones in regions with availability zones. When high availability is disabled, only one instance is provisioned.
See [Supported Architectures](../overview/02_high_availibility) for more information.
-## What’s Next
+## What’s next
Now that you’ve created your cluster, here are some additional resources for cluster use and management:
-* [Using your Cluster]( ../using_cluster/)
-* [Managing Postgres Access](../../administering_cluster/02_postgres_access/)
+* [Using your cluster]( ../using_cluster/)
+* [Managing Postgres access](../../administering_cluster/02_postgres_access/)
diff --git a/product_docs/docs/edbcloud/beta/getting_started/index.mdx b/product_docs/docs/edbcloud/beta/getting_started/index.mdx
index 8e708fab683..e5010697056 100644
--- a/product_docs/docs/edbcloud/beta/getting_started/index.mdx
+++ b/product_docs/docs/edbcloud/beta/getting_started/index.mdx
@@ -1,5 +1,5 @@
---
-title: "Getting Started"
+title: "Getting started"
indexCards: simple
---
diff --git a/product_docs/docs/edbcloud/beta/overview/02_high_availibility.mdx b/product_docs/docs/edbcloud/beta/overview/02_high_availibility.mdx
index be0be096111..7eb081f6a7d 100644
--- a/product_docs/docs/edbcloud/beta/overview/02_high_availibility.mdx
+++ b/product_docs/docs/edbcloud/beta/overview/02_high_availibility.mdx
@@ -1,10 +1,10 @@
---
-title: "Supported Architectures"
+title: "Supported architectures"
---
EDB Cloud enables deploying a cluster with or without high availability. The option is controlled with the **High Availablity** slide button on the [Create Cluster](https://portal.edbcloud.com/create-cluster) page in the [EDB Cloud](https://portal.edbcloud.com) portal.
-## High Availability - Enabled
+## High availability - enabled
The high availability option is provided to minimize downtime in cases of failures. High Availability clusters are configured automatically with - one _primary_ and two _replicas_ - with replicas staying up-to-date through physical streaming replication. In cloud regions with availability zones, clusters are provisioned across multiple availability zones to provide fault tolerance in the face of a datacenter failure
@@ -18,7 +18,7 @@ Incoming client connections are always routed to the current primary. In case of
By default, replication is synchronous to one replica and asynchronous to the other. That is, one replica must confirm that a transaction record was written to disk before the client receives acknowledgment of a successful commit. In PostgreSQL terms, `synchronous_commit` is set to `on` and `synchronous_standby_names` is set to `ANY 1 (replica-1, replica-2)`. This behavior can be modified on a per-transaction, per-session, per-user, or per-database basis with appropriate `SET` or `ALTER` commands.
-## High Availability - Not Enabled
+## High availability - not enabled
For non-production use cases where high availability is not a primary concern, a cluster deployment with high availability not enabled provides one primary with no standby servers for failover or read-only workloads.
diff --git a/product_docs/docs/edbcloud/beta/overview/03_security.mdx b/product_docs/docs/edbcloud/beta/overview/03_security.mdx
index a42c04cfcf9..00a191c3f87 100644
--- a/product_docs/docs/edbcloud/beta/overview/03_security.mdx
+++ b/product_docs/docs/edbcloud/beta/overview/03_security.mdx
@@ -3,12 +3,12 @@ title: "Security"
---
EDB Cloud runs in your own cloud account, isolates your data from other users, and gives you control over our access to it. The key security features are:
-- **Data Isolation:** Clusters are installed and managed in your cloud environment. Complete segregation of your data is ensured: your data never leaves your cloud account, and compromise of another EDB Cloud customer's systems does not put your data at risk.
+- **Data isolation:** Clusters are installed and managed in your cloud environment. Complete segregation of your data is ensured: your data never leaves your cloud account, and compromise of another EDB Cloud customer's systems does not put your data at risk.
-- **Granular Access Control:** You can use Single Sign On (SSO) and define your own sets of roles and Role Based Access Control (RBAC) policies to manage your individual cloud environments. See [Managing Portal Access](../administering_cluster/01_user_access) for more information.
-- **Data Encryption:** All data in EDB Cloud is encrypted in motion and at rest. Network traffic is encrypted using Transport Layer Security (TLS) v1.2 or greater, where applicable. Data at rest is encrypted using AES with 256 bit keys. Data encryption keys are envelope encrypted and the wrapped data encryption keys are securely stored in an Azure Key Vault instance in your account. Encryption keys never leave your environment.
-- **Portal Audit Logging:** Activities in the portal, such as those related to user roles, organization updates, and cluster creation and deletion are tracked automatically and viewed in the activity log.
-- **Database Logging and Auditing:** Functionality to track and analyze database activities is enabled automatically. For PostgreSQL, the PostgreSQL Audit Extension (pgAudit) is enabled automatically for you when deploying a Postgres cluster. For EDB Postgres Advanced Server, the EDB Audit extension (edbAudit) is enabled automatically for you.
+- **Granular access control:** You can use Single Sign On (SSO) and define your own sets of roles and Role Based Access Control (RBAC) policies to manage your individual cloud environments. See [Managing portal access](../administering_cluster/01_user_access) for more information.
+- **Data encryption:** All data in EDB Cloud is encrypted in motion and at rest. Network traffic is encrypted using Transport Layer Security (TLS) v1.2 or greater, where applicable. Data at rest is encrypted using AES with 256 bit keys. Data encryption keys are envelope encrypted and the wrapped data encryption keys are securely stored in an Azure Key Vault instance in your account. Encryption keys never leave your environment.
+- **Portal audit logging:** Activities in the portal, such as those related to user roles, organization updates, and cluster creation and deletion are tracked automatically and viewed in the activity log.
+- **Database logging and auditing:** Functionality to track and analyze database activities is enabled automatically. For PostgreSQL, the PostgreSQL Audit Extension (pgAudit) is enabled automatically for you when deploying a Postgres cluster. For EDB Postgres Advanced Server, the EDB Audit extension (edbAudit) is enabled automatically for you.
- **pgAudit:** The classes of statements being logged for pgAudit are set globally on a cluster with `pgaudit.log = 'write,ddl'`. The following statements made on tables will be logged by default when the cluster type is PostgreSQL: `INSERT`, `UPDATE`, `DELETE`, `TRUNCATE`, AND `COPY`. All `DDL` will be logged.
-- **Database Cluster Permissions** The edb_admin account created during the _Create Cluster_ process includes the `CREATEDB` and `CREATEROLE` database roles. EDB recommends using the edb_admin account to create a new application user and new application database for further isolation. See [Managing Postgres Access](../administering_cluster/02_postgres_access) for more information.
+- **Database cluster permissions** The edb_admin account created during the *create cluster* process includes the `CREATEDB` and `CREATEROLE` database roles. EDB recommends using the edb_admin account to create a new application user and new application database for further isolation. See [Managing Postgres access](../administering_cluster/02_postgres_access) for more information.
diff --git a/product_docs/docs/edbcloud/beta/overview/04_responsibility_model.mdx b/product_docs/docs/edbcloud/beta/overview/04_responsibility_model.mdx
index 9a8621769cd..1ec47fe7c9e 100644
--- a/product_docs/docs/edbcloud/beta/overview/04_responsibility_model.mdx
+++ b/product_docs/docs/edbcloud/beta/overview/04_responsibility_model.mdx
@@ -1,18 +1,18 @@
---
-title: "Responsibility Model"
+title: "Responsibility model"
---
Security and confidentiality is a shared responsibility between you and EDB. EDB provides a secure platform that enables you to create and maintain secure database clusters deployed on EDB Cloud. You have numerous responsibilities around the security of your clusters and data held within them.
-The following Responsibility Model describes the distribution of specific responsibilities between you and EDB.
+The following responsibility model describes the distribution of specific responsibilities between you and EDB.
-## High Availability
+## High availability
- EDB is responsible for deploying clusters with one primary and two replicas. In cloud regions with availability zones, clusters will be deployed across multiple availability zones.
- You are responsible for choosing if you want to enable high availability.
-## Database Performance
+## Database performance
- EDB is responsible for managing and monitoring the underlying infrastructure resources.
- You are responsible for data modeling, query performance, and scaling the cluster to meet your performance needs.
@@ -20,7 +20,7 @@ The following Responsibility Model describes the distribution of specific respon
- EDB is responsible for managing and monitoring the underlying infrastructure.
- You are responsible for choosing the appropriate resources for your workload, including instance type, storage, and connections. You are also responsible for managing your Azure resource limits to ensure the underlying infrastructure can be scaled.
-## Backups and Restores
+## Backups and restores
- EDB is responsible for taking automatic backups and storing them in cross-regional Azure Blob Storage.
- You are responsible for periodically restoring and verifying the restores to ensure that archives are completing frequently and successfully to meet your needs.
@@ -28,6 +28,6 @@ The following Responsibility Model describes the distribution of specific respon
- EDB is responsible for data encryption at rest and in transit. EDB is also responsible for encrypting backups.
- You are responsible for column level encryption to protect sensitive database attributes from unauthorized access by authorized users and applications of the database.
-## Credential Management
+## Credential management
- EDB is responsible for making credentials available to customers.
- You are responsible for managing and securing your passwords, both for EDB Cloud and your database passwords.
diff --git a/product_docs/docs/edbcloud/beta/overview/05_database_version_policy.mdx b/product_docs/docs/edbcloud/beta/overview/05_database_version_policy.mdx
index 4257b3f9748..4b24abf71d2 100644
--- a/product_docs/docs/edbcloud/beta/overview/05_database_version_policy.mdx
+++ b/product_docs/docs/edbcloud/beta/overview/05_database_version_policy.mdx
@@ -1,23 +1,23 @@
---
-title: "Database Version Policy"
+title: "Database version policy"
---
-## Supported Postgres Types and Versions
+## Supported Postgres types and versions
-| **Postgres Type** | **Major Versions** |
+| **Postgres type** | **Major versions** |
| ----- | ------------------------- |
| PostgreSQL | 11 - 13 |
| EDB Postgres Advanced Server | 11 - 13 |
-## Major Version Support
+## Major version support
PostgreSQL and EDB Postgres Advanced Server major versions are supported from the date they are made available until the version is retired by EDB (generally 5 years). See [Platform Compatibility ](https://www.enterprisedb.com/product-compatibility#epas) for more details.
-## Minor Version Support
+## Minor version support
EDB performs periodic maintenance to ensure stability and security. EDB automatically performs minor version upgrades and patch updates as part of periodic maintenance. Customers are notified within the EDB Cloud portal prior to maintenance occurring. Minor versions are not user configurable.
diff --git a/product_docs/docs/edbcloud/beta/overview/06_support.mdx b/product_docs/docs/edbcloud/beta/overview/06_support.mdx
index 71de34c237e..b9b49307879 100644
--- a/product_docs/docs/edbcloud/beta/overview/06_support.mdx
+++ b/product_docs/docs/edbcloud/beta/overview/06_support.mdx
@@ -1,12 +1,12 @@
---
-title: "Support Options"
+title: "Support options"
---
If you experience problems with EDB Cloud, you have several options to engage with EDB's Support team to get help. If you have an EDB Cloud account, you can go directly to the Support portal or the EDB Cloud portal to open a support case, or you can leave Support a message using the Support Case widget.
If you can’t log in to your account, send us an email to [cloudsupport@enterprisedb.com](mailto:cloudsupport@enterprisedb.com).
-## Creating a Support Case From the Support Portal (recommended)
+## Creating a support case from the Support portal (recommended)
1. Initiate a support case using any one of these options:
@@ -21,7 +21,7 @@ If you can’t log in to your account, send us an email to [cloudsupport@enterpr
1. (Optional) Attach files to provide more details about the issue you're experiencing.
1. Select **Submit**.
-## Creating a Support Case From the Support Widget
+## Creating a support case from the **Support** widget
1. Log in to EDB Cloud and select **Support** on the bottom of the left navigation pane.
@@ -34,7 +34,7 @@ If you can’t log in to your account, send us an email to [cloudsupport@enterpr
6. (Optional) Attach files to provide more details about the issue you're experiencing.
7. Select **Submit**.
-## Case Severity Level
+## Case severity level
| Level | Description |
| -------- | ----------- |
diff --git a/product_docs/docs/edbcloud/beta/overview/index.mdx b/product_docs/docs/edbcloud/beta/overview/index.mdx
index e18a5d5c1aa..313409ac6a5 100644
--- a/product_docs/docs/edbcloud/beta/overview/index.mdx
+++ b/product_docs/docs/edbcloud/beta/overview/index.mdx
@@ -1,5 +1,5 @@
---
-title: "Overview of Service"
+title: "Overview of service"
---
diff --git a/product_docs/docs/edbcloud/beta/pricing_and_billing/index.mdx b/product_docs/docs/edbcloud/beta/pricing_and_billing/index.mdx
index a79e94d3b83..b0a09961412 100644
--- a/product_docs/docs/edbcloud/beta/pricing_and_billing/index.mdx
+++ b/product_docs/docs/edbcloud/beta/pricing_and_billing/index.mdx
@@ -1,5 +1,5 @@
---
-title: "Pricing and Billing "
+title: "Pricing and billing "
---
This section covers the pricing breakdown for EDB Cloud as well as how to view invoices and infrastructure usage through Microsoft Azure.
@@ -7,7 +7,7 @@ This section covers the pricing breakdown for EDB Cloud as well as how to view i
## Pricing
Pricing is based on the number of Virtual Central Processing Units (vCPUs) provisioned for the database software offering. Consumption of vCPUs is metered hourly. A deployment is comprised of either one instance or one primary and two replica instances of either PostgreSQL or EDB Postgres Advanced Server. When high availability is enabled, the number of vCPU per instance should be multiplied by three to calculate the full price for all resources used. See the full cost breakdown below:
-| Database Type | Hourly Price | Monthly Price* |
+| Database type | Hourly price | Monthly price* |
| --------------------- | -------------- | -------------- |
| PostgreSQL | $0.1655 / vCPU | $120.82 / vCPU |
| EDB Postgres Advanced Server | $0.2397 / vCPU | $174.98 / vCPU |
@@ -17,5 +17,5 @@ Pricing is based on the number of Virtual Central Processing Units (vCPUs) provi
## Billing
All billing is handled directly by Microsoft Azure. Invoices and usage can be viewed on the Azure Portal billing page. [Learn more](https://docs.microsoft.com/en-us/azure/cost-management-billing/)
-## Cloud Infrastructure Costs
+## Cloud infrastructure costs
EDB does not bill you for cloud infrastructure such as compute, storage, data transfer, monitoring, and logging. EDB Cloud clusters run in your Microsoft Azure account. Azure bills you directly for the cloud infrastructure provisioned according to the terms of your account agreement. Invoices and usage can be viewed on the Azure Portal billing page. [Learn more](https://docs.microsoft.com/en-us/azure/cost-management-billing/)
diff --git a/product_docs/docs/edbcloud/beta/reference/index.mdx b/product_docs/docs/edbcloud/beta/reference/index.mdx
index 638a2cb9fee..e0639d92680 100644
--- a/product_docs/docs/edbcloud/beta/reference/index.mdx
+++ b/product_docs/docs/edbcloud/beta/reference/index.mdx
@@ -14,10 +14,10 @@ To access the API, you need a token. The high-level steps to obtain a token are:
5. [Exchange for the raw token for the EDB Cloud token](#exchange-the-edbcloud-token-using-curl).
-EDB provides an optional script to simplify getting your device code and getting and refreshing your tokens. See [Using the `get-token` Script](#using-the-get-token-script) for details.
+EDB provides an optional script to simplify getting your device code and getting and refreshing your tokens. See [Using the `get-token` script](#using-the-get-token-script) for details.
-## Query the Authentication Endpoint
+## Query the authentication endpoint
This call returns the information that either:
@@ -49,7 +49,7 @@ AUDIENCE="https://portal.edbcloud.com/api"
```
The following example calls use these environment variables.
-## Request the Device Code Using `curl`
+## Request the device code using `curl`
!!!note
The `get-token` script executes this step. You don't need to make this call if you are using the script.
!!!
@@ -90,7 +90,7 @@ Store the device code in an environment variable for future use. For example:
DEVICE_CODE=KEOY2_5YjuVsRuIrrR-aq5gs
```
-## Authorize as a User
+## Authorize as a user
To authorize as a user:
@@ -103,9 +103,9 @@ To authorize as a user:
4. Log in with your Azure AD credentials.
-## Request the Raw Token Using `curl`
+## Request the raw token using `curl`
!!!note
- The `get-token` script executes this step. You don't need to make this call if you are using the script. See [Request Your Token Using `get-token`](#request-your-token-using-get-token).
+ The `get-token` script executes this step. You don't need to make this call if you are using the script. See [Request your token using `get-token`](#request-your-token-using-get-token).
!!!
The `curl --request POST` call requests a token. For example:
@@ -120,7 +120,7 @@ curl --request POST \
If successful, the call returns:
- `access_token` - use to exchange for the token to access EDB Cloud API.
-- `refresh_token` - use to obtain a new access token or ID token after the previous one has expired. (See [Refresh Tokens](https://auth0.com/docs/tokens/refresh-tokens) for more information.) Refresh tokens expire after 30 days.
+- `refresh_token` - use to obtain a new access token or ID token after the previous one has expired. (See [Refresh tokens](https://auth0.com/docs/tokens/refresh-tokens) for more information.) Refresh tokens expire after 30 days.
- `expires_in` - means the token expires after 24 hours since its creation.
@@ -159,7 +159,7 @@ If not successful, you receive one of the following errors:
The `get-token` script executes this step. You don't need to make this call if you are using the script.
!!!
-Use the raw token you obtained in the previous step [Request the Raw Token Using `curl`](#request-the-raw-token-using-curl) to get the EDB Cloud token:
+Use the raw token you obtained in the previous step [Request the raw token using `curl`](#request-the-raw-token-using-curl) to get the EDB Cloud token:
```
curl -s --request POST \
@@ -221,13 +221,13 @@ Example response:
```
-## Refresh your Token
+## Refresh your token
You use the refresh token to get a new raw access token. Usually you need a new access token only after the previous one expires or when gaining access to a new resource for the first time. You shouldn't call the endpoint to get a new access token every time you call an API. There are rate limits that throttle the amount of requests to the endpoint that can be executed using the same token from the same IP.
-### Refresh your Token Using `curl`
+### Refresh your t.oken Using `curl`
!!!note
-The `get-token` script has an option to execute this step. See [Refresh the Token Using `get-token`](#refresh-the-token-using-get-token).
+The `get-token` script has an option to execute this step. See [Refresh the token using `get-token`](#refresh-the-token-using-get-token).
!!!
If you are not using the `get-token` script to refresh your token, make a POST request to the `/oauth/token` endpoint in the Authentication API, using `grant_type=refresh_token`. For example:
@@ -269,7 +269,7 @@ The token you obtain from this step is the raw access token, you need to exchang
-## Using the `get-token` Script
+## Using the `get-token` script
To simplify the process of getting tokens, EDB provides the `get-token` script. You can download it [here](https://github.com/EnterpriseDB/cloud-utilities/tree/main/api).
@@ -278,7 +278,7 @@ To use the script, install the [jq command-line JSON processor](https://stedolan
Before running the script, [query the authentication endpoint](#query-the-authentication-endpoint).
-### get-token Usage
+### get-token usage
```
Get Tokens for EDB Cloud API
@@ -296,7 +296,7 @@ Usage:
Reference: https://www.enterprisedb.com/docs/edbcloud/latest/reference/
```
-### Request Your Token Using `get-token`
+### Request your token using `get-token`
To use the `get-token` script to get your tokens, use the script without the `--refresh` option. For example:
```
@@ -315,7 +315,7 @@ xxxxxxxxxx
##### Expires In Seconds ##########
86400
```
-### Refresh the Token Using `get-token`
+### Refresh the token using `get-token`
To use the `get-token` script to refresh your token, use the script with the `--refresh ` option. For example:
```
diff --git a/product_docs/docs/edbcloud/beta/using_cluster/01_postgres_access.mdx b/product_docs/docs/edbcloud/beta/using_cluster/01_postgres_access.mdx
index f5717a3ba05..85c414e34df 100644
--- a/product_docs/docs/edbcloud/beta/using_cluster/01_postgres_access.mdx
+++ b/product_docs/docs/edbcloud/beta/using_cluster/01_postgres_access.mdx
@@ -1,5 +1,5 @@
---
-title: "Managing Postgres Access"
+title: "Managing Postgres access"
---
The `edb_admin` database role and `edb_admin` database created during the _Create Cluster_ process should not be used by your application. Instead, create a new database role and a new database, which provides a high level of isolation in Postgres. If multiple applications are using the same cluster, each database can also contain multiple schemas, essentially a namespace in the database. If strict isolation is needed, use a dedicated cluster or dedicated database. If that strict isolation level is not required, a single database can be deployed with multiple schemas. Please refer to the [Privileges](https://www.postgresql.org/docs/current/ddl-priv.html) topic in the PostgreSQL documentation to further customize ownership and roles to your requirements.
@@ -9,7 +9,7 @@ To create a new role and database, first connect using `psql`:
```
psql -W "postgres://edb_admin@xxxxxxxxx.xxxxx.edbcloud.io:5432/edb_admin?sslmode=require"
```
-## Notes on the edb_admin Role
+## Notes on the edb_admin role
The `edb_admin` role does not have superuser priviledges by default. You should contact [Support](../overview/06_support) to request superuser priviledges for `edb_admin`.
@@ -20,7 +20,7 @@ Don't use the `edb_admin` user or the `edb_admin` database in your applications.
EDB Cloud stores all database-level authentication securely and directly in PostgreSQL. The `edb_admin` user password is SCRAM-SHA-256 hashed prior to storage. This hash, even if compromised, cannot be replayed by an attacker to gain access to the system.
-## One Database with One Application
+## One database with one application
For one database hosting a single application, replacing app1 with your preferred user name:
@@ -41,7 +41,7 @@ For one database hosting a single application, replacing app1 with your preferre
Using this example, the username and database in your connection string would be `app1`.
-## One Database with Multiple Schemas
+## One database with multiple schemas
If a single database is used to host multiple schemas, create a database owner and then roles and schemas for each application. The example in the following steps shows creating two database roles and two schemas. The default `search_path` for database roles in EDB Cloud is `"$user",public`. If the role name and schema match, then objects in that schema will match first, and no `search_path` changes or fully qualifying of objects are needed. The [PostgreSQL documentation](https://www.postgresql.org/docs/current/ddl-schemas.html#DDL-SCHEMAS-PATH) covers the schema search path in detail.
diff --git a/product_docs/docs/edbcloud/beta/using_cluster/02_connect_to_cluster.mdx b/product_docs/docs/edbcloud/beta/using_cluster/02_connect_to_cluster.mdx
index 32bc705bf8f..32ea1a3fb9d 100644
--- a/product_docs/docs/edbcloud/beta/using_cluster/02_connect_to_cluster.mdx
+++ b/product_docs/docs/edbcloud/beta/using_cluster/02_connect_to_cluster.mdx
@@ -1,8 +1,8 @@
---
-title: "Connecting to Your Cluster"
+title: "Connecting to your cluster"
---
-You can connect to your cluster using [`psql`](http://postgresguide.com/utilities/psql.html), the terminal-based client for Postgres, or another client. See [Recommended Settings for SSL Mode](#recommended-settings-for-ssl-mode) for EDB's recommendations for secure connections.
+You can connect to your cluster using [`psql`](http://postgresguide.com/utilities/psql.html), the terminal-based client for Postgres, or another client. See [Recommended settings for SSL mode](#recommended-settings-for-ssl-mode) for EDB's recommendations for secure connections.
## Using `psql`
To connect to your cluster using `psql`:
@@ -14,7 +14,7 @@ To connect to your cluster using `psql`:
4. On the **Overview** tab, select the copy icon to the right of the **Quick Connect** field to copy the command for connecting to your cluster using `psql` to your clipboard. `psql` will prompt for the edb_admin user password you selected at cluster creation time.
5. Paste the command in your terminal.
-## Using Another Client
+## Using another client
To connect to your cluster using a client other than `psql`:
1. Sign in to the [EDB Cloud](https://portal.edbcloud.com) portal.
@@ -23,7 +23,7 @@ To connect to your cluster using a client other than `psql`:
2. Select the name of your cluster.
3. Select the **Connect** tab. You can review and copy all the relevant information you need from this screen except for the edb_admin user password. Please consult the client driver documentation for the connection string format the driver uses.
-## Recommended Settings for SSL Mode
+## Recommended settings for SSL mode
Different clients can have different default TLS/SSL modes (sslmode). For example, `psql` defaults to `prefer`, which means the client will attempt to establish a TLS connection but fall back to non-TLS if the server does not support it. In the `psql` example provided by EDB in the **Quick Connect** field, `sslmode` is explicitly set to `require`, which means the client will attempt a TLS connection and fail if the connection to the server can't be encrypted.
diff --git a/product_docs/docs/edbcloud/beta/using_cluster/03_modifying_your_cluster/05_db_configuration_parameters.mdx b/product_docs/docs/edbcloud/beta/using_cluster/03_modifying_your_cluster/05_db_configuration_parameters.mdx
index f9d9457de4f..b280604d1ec 100644
--- a/product_docs/docs/edbcloud/beta/using_cluster/03_modifying_your_cluster/05_db_configuration_parameters.mdx
+++ b/product_docs/docs/edbcloud/beta/using_cluster/03_modifying_your_cluster/05_db_configuration_parameters.mdx
@@ -1,5 +1,5 @@
---
-title: Modifying Database Configuration Parameters
+title: Modifying database configuration parameters
---
The database parameters listed on the DB Configuration tab are also referred to as Grand Unified Configuration (GUC) variables. See [What Is a GUC Variable?](https://www.enterprisedb.com/blog/what-guc-variable) for more information.
diff --git a/product_docs/docs/edbcloud/beta/using_cluster/03_modifying_your_cluster/index.mdx b/product_docs/docs/edbcloud/beta/using_cluster/03_modifying_your_cluster/index.mdx
index f90c03b0f72..29752e31f71 100644
--- a/product_docs/docs/edbcloud/beta/using_cluster/03_modifying_your_cluster/index.mdx
+++ b/product_docs/docs/edbcloud/beta/using_cluster/03_modifying_your_cluster/index.mdx
@@ -1,5 +1,5 @@
---
-title: Modifying Your Cluster
+title: Modifying your cluster
# Added this redirect because we removed the Modify and Scale topic
redirects:
- ../03_modify_and_scale_cluster
@@ -14,10 +14,10 @@ redirects:
| Settings | Tab |
| ------- | ----------- |
- | Cluster name and password | [Cluster Info](../../getting_started/03_create_cluster/#cluster_info) |
- | Instance type (vCPUs and memory)* | [Operational Settings](../../getting_started/03_create_cluster/#operational_settings) |
- | Networking type (public or private) | [Operational Settings](../../getting_started/03_create_cluster/#operational_settings) |
- | Database configuration parameters | [Database Configuration Parameters](05_db_configuration_parameters)
+ | Cluster name and password | [Cluster info](../../getting_started/03_create_cluster/#cluster_info) |
+ | Instance type (vCPUs and memory)* | [Operational settings](../../getting_started/03_create_cluster/#operational_settings) |
+ | Networking type (public or private) | [Operational settings](../../getting_started/03_create_cluster/#operational_settings) |
+ | Database configuration parameters | [Database configuration parameters](05_db_configuration_parameters)
| High availability (on or off) | [Availability](../../getting_started/03_create_cluster/#availability) |
*Changing the instance type could incur higher cloud infrastructure charges.
diff --git a/product_docs/docs/edbcloud/beta/using_cluster/04_backup_and_restore.mdx b/product_docs/docs/edbcloud/beta/using_cluster/04_backup_and_restore.mdx
index 5dc96faff17..079498cd5be 100644
--- a/product_docs/docs/edbcloud/beta/using_cluster/04_backup_and_restore.mdx
+++ b/product_docs/docs/edbcloud/beta/using_cluster/04_backup_and_restore.mdx
@@ -1,5 +1,5 @@
---
-title: "Backing Up and Restoring"
+title: "Backing up and restoring"
---
### Backups
@@ -16,7 +16,7 @@ Cluster restores are not performed "in-place" on an existing cluster. Instead, a
You can restore backups into a new cluster in the same region.
-#### Performing a Cluster Restore
+#### Performing a cluster restore
1. Select the cluster you wish to restore on the [**Clusters**](https://portal.edbcloud.com/clusters) page in the [EDB Cloud](https://portal.edbcloud.com) portal.
diff --git a/product_docs/docs/edbcloud/beta/using_cluster/05_monitoring_and_logging.mdx b/product_docs/docs/edbcloud/beta/using_cluster/05_monitoring_and_logging.mdx
index 006f41a0562..bd46bbe05f1 100644
--- a/product_docs/docs/edbcloud/beta/using_cluster/05_monitoring_and_logging.mdx
+++ b/product_docs/docs/edbcloud/beta/using_cluster/05_monitoring_and_logging.mdx
@@ -1,18 +1,18 @@
---
-title: "Monitoring and Logging"
+title: "Monitoring and logging"
---
EDB Cloud sends all metrics and logs from PostgreSQL clusters to Azure. This topic describes what metrics and logs are sent and how to view them.
-### Azure Log Analytics
+### Azure log analytics
When EDB Cloud deploys workloads on Azure, the logs from the PostgreSQL clusters are forwarded to the Azure Log Workspace.
To query EDB Cloud logs, you must use [Azure Log Analytics](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/log-analytics-overview) and [Kusto Query language](https://azure-training.com/azure-data-science/the-kusto-query-language/).
-### Querying PostgreSQL Cluster Logs
+### Querying PostgreSQL cluster logs
All logs from your PostgreSQL clusters are stored in the _Customer Log Analytics workspace_. To find your _Customer Log Analytics workspace_:
@@ -31,7 +31,7 @@ All logs from your PostgreSQL clusters are stored in the _Customer Log Analytics
The following tables are available in the _Customer Log Analytic workspace_.
-| Table Name | Description | Logger |
+| Table name | Description | Logger |
| ---------- | ----------- | ------ |
| PostgresLogs_CL | Logs of the Customer clusters databases (all postgres related logs) | `logger = postgres` |
| PostgresAuditLogs_CL | Audit Logs of the Customer clusters databases | `logger = pgaudit or edb_audit` |
@@ -54,7 +54,7 @@ PostgresAuditLogs_CL
| sort by record_log_time_s desc
```
-### Using Shared Dashboards to View PostgreSQL Cluster Logs
+### Using shared dashboards to view PostgreSQL cluster logs
To view logs from your PostgreSQL clusters using Shared Dashboard:
@@ -66,6 +66,6 @@ To view logs from your PostgreSQL clusters using Shared Dashboard:
3. Select the resource of type _Shared Dashboard_ with the suffix -customer.
-4. Select the link _Go to dashboard_ located at the top of the page.
+4. Select the **Go to dashboard** link located at the top of the page.
diff --git a/product_docs/docs/edbcloud/beta/using_cluster/index.mdx b/product_docs/docs/edbcloud/beta/using_cluster/index.mdx
index ae379c26dd1..2454948ba59 100644
--- a/product_docs/docs/edbcloud/beta/using_cluster/index.mdx
+++ b/product_docs/docs/edbcloud/beta/using_cluster/index.mdx
@@ -1,5 +1,5 @@
---
-title: "Using Your Cluster"
+title: "Using your cluster"
---
In this section, account owners and contributors can learn how to connect, edit, scale, and monitor clusters through the EDB Cloud portal. This section also provides information on how EDB Cloud handles backup and restore.
diff --git a/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs b/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs
index 319f44af33f..8f5eb6fdc33 100644
--- a/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs
+++ b/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs
@@ -1,18 +1,45 @@
-export const process = (filename, content) => {
- const newContent = content.split("\n").map(rewriteLink).join("\n");
+import toVFile from "to-vfile";
+import remarkParse from "remark-parse";
+import mdx from "remark-mdx";
+import unified from "unified";
+import remarkFrontmatter from "remark-frontmatter";
+import remarkStringify from "remark-stringify";
+import admonitions from "remark-admonitions";
+import visit from "unist-util-visit";
+import isAbsoluteUrl from "is-absolute-url";
+
+export const process = async (filename, content) => {
+
+ const processor = unified()
+ .use(remarkParse)
+ .use(remarkStringify, { emphasis: "*", bullet: "-", fences: true })
+ .use(admonitions, {
+ tag: "!!!", icons: "none", infima: true, customTypes: {
+ seealso: "note", hint: "tip", interactive: "interactive",
+ }
+ })
+ .use(remarkFrontmatter)
+ .use(mdx)
+ .use(linkRewriter);
+
+ const output = await processor.process(toVFile({ path: filename, contents: content }));
return {
newFilename: filename,
- newContent,
+ newContent: output.contents.toString(),
};
};
-const rewriteLink = (line) => {
- const regex = /\[.+\]\((.+)\.yaml\)/;
- const match = line.match(regex);
- if (match === null) {
- return line;
- }
+function linkRewriter() {
+ return (tree) => {
- return line.replace(match[1], match[1].replace("samples/", "../samples/"));
-};
+ // link rewriter:
+ // - only links to .yaml files in samples dir
+ // - make relative to parent (because gatsby URL paths are always directories)
+ visit(tree, "link", (node) => {
+ if (isAbsoluteUrl(node.url) || node.url[0] === '/') return;
+ if (!node.url.includes(".yaml")) return;
+ node.url = node.url.replace(/^(?:\.\/)?samples\//, "../samples/");
+ });
+ };
+}
diff --git a/scripts/source/bdr.js b/scripts/source/bdr.js
index ccf198338f4..116f1629ec0 100644
--- a/scripts/source/bdr.js
+++ b/scripts/source/bdr.js
@@ -5,9 +5,9 @@
const path = require("path");
const fs = require("fs/promises");
const { read, write } = require("to-vfile");
-const remarkParse = require("@mdx-js/mdx/node_modules/remark-parse");
+const remarkParse = require("remark-parse");
const mdx = require("remark-mdx");
-const unified = require("@mdx-js/mdx/node_modules/unified");
+const unified = require("unified");
const remarkFrontmatter = require("remark-frontmatter");
const remarkStringify = require("remark-stringify");
const admonitions = require("remark-admonitions");
@@ -26,7 +26,11 @@ const imgPath = path.resolve("temp_bdr/docs/img/");
const processor = unified()
.use(remarkParse)
.use(remarkStringify, { emphasis: "*", bullet: "-", fences: true })
- .use(admonitions, { tag: "!!!", icons: "none", infima: true })
+ .use(admonitions, {
+ tag: "!!!", icons: "none", infima: true, customTypes: {
+ seealso: "note", hint: "tip", interactive: "interactive",
+ }
+ })
.use(remarkFrontmatter)
.use(mdx)
.use(bdrTransformer);