From 7bdf43ea713e934a6e9e70cf532cda61386205b6 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Mon, 28 Aug 2023 16:53:53 +0000
Subject: [PATCH 01/39] test import for 1.0
---
.../1/api_reference.md.in | 32 ++
.../1/api_reference.mdx | 129 +++---
.../1/architecture.mdx | 194 +++++++-
.../1/backup.mdx | 144 ++++++
.../1/before_you_start.mdx | 114 +++++
.../1/certificates.mdx | 30 ++
.../1/connectivity.mdx | 72 ++-
.../1/images/always_on_1x3_updated.png | 3 +
.../1/images/apps-in-k8s.png | 3 +
.../1/images/apps-outside-k8s.png | 3 +
.../1/images/edb-repo-portal.png | 3 +
.../1/images/k8s-architecture-3-az.png | 3 +
.../1/images/k8s-architecture-multi.png | 3 +
.../1/images/openshift/all-namespaces.png | 4 +-
.../1/images/openshift/find-pgd-openshift.png | 4 +-
.../1/index.mdx | 16 +-
.../1/installation_upgrade.mdx | 154 ++-----
.../1/openshift.mdx | 426 ++++++++++++++++++
.../1/private_registries.mdx | 103 +++++
.../1/quickstart.mdx | 2 +-
.../1/recovery.mdx | 172 +++++++
.../1/rel_notes/0_6_rel_notes.mdx | 9 -
.../1/rel_notes/index.mdx | 17 -
.../1/release_notes.mdx | 30 ++
.../1/samples.mdx | 6 +-
.../1/security.mdx | 250 ++++++++++
.../1/ssl_connections.mdx | 17 +
.../1/use_cases.mdx | 44 ++
.../1/using_pgd.mdx | 171 +++++++
29 files changed, 1931 insertions(+), 227 deletions(-)
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.md.in
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/images/always_on_1x3_updated.png
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/images/apps-in-k8s.png
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/images/apps-outside-k8s.png
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/images/edb-repo-portal.png
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/images/k8s-architecture-3-az.png
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/images/k8s-architecture-multi.png
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx
delete mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/0_6_rel_notes.mdx
delete mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/release_notes.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.md.in b/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.md.in
new file mode 100644
index 00000000000..5e70e3ff2ae
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.md.in
@@ -0,0 +1,32 @@
+# API Reference
+
+EDB Postgres Distributed for Kubernetes extends the Kubernetes API defining the
+custom resources you find below.
+
+All the resources are defined in the `pgd.k8s.enterprisedb.io/v1beta1`
+API.
+
+Below you will find a description of the defined resources:
+
+
+
+{{ range $ -}}
+- [{{ .Name -}}](#{{ .Name -}})
+{{ end }}
+
+{{ range $ -}}
+{{ .Anchor }}
+
+## {{ .Name }}
+
+{{ .Doc -}}
+{{ if .Items }}
+
+{{ .TableFieldName }} | {{ .TableFieldDoc }} | {{ .TableFieldRawType }}
+{{ .TableFieldNameDashSize }} | {{ .TableFieldDocDashSize }} | {{ .TableFieldRawTypeDashSize }}
+{{ end }}
+{{- range .Items -}}
+`{{ .Name }}` | {{ .Doc }}{{ if .Mandatory }} - *mandatory* {{ end }} | {{ .RawType }}
+{{ end }}
+{{ end -}}
+
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx
index b1a3bf38c77..75bba2b67c3 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx
@@ -19,6 +19,7 @@ Below you will find a description of the defined resources:
- [CertManagerTemplate](#CertManagerTemplate)
- [ClientCertConfiguration](#ClientCertConfiguration)
- [ClientPreProvisionedCertificates](#ClientPreProvisionedCertificates)
+- [CnpBaseConfiguration](#CnpBaseConfiguration)
- [CnpConfiguration](#CnpConfiguration)
- [ConnectivityConfiguration](#ConnectivityConfiguration)
- [ConnectivityStatus](#ConnectivityStatus)
@@ -64,10 +65,10 @@ Below you will find a description of the defined resources:
Backup configures the backup of cnp-pgd nodes
-| Name | Description | Type |
-| --------------- | -------------------------------------------------------------------------------------------------------- | ------------------------- |
-| `configuration` | The CNP configuration to be used for backup. ServerName value is reserved by the operator. - *mandatory* | cnpv1.BackupConfiguration |
-| `cron ` | The scheduled backup for the data - *mandatory* | cnpv1.ScheduledBackupSpec |
+| Name | Description | Type |
+| --------------- | ------------------------------------------------------------------------------------------ | ------------------------- |
+| `configuration` | The CNP configuration to be used for backup. ServerName value is reserved by the operator. | cnpv1.BackupConfiguration |
+| `cron ` | The scheduled backup for the data | cnpv1.ScheduledBackupSpec |
@@ -128,30 +129,41 @@ ClientPreProvisionedCertificates instruct how to fetch the pre-generated client
| ------------------ | --------------------------------------------------------------------------- | --------------------------------------------------------- |
| `streamingReplica` | StreamingReplica the pre-generated certificate for 'streaming_replica' user | [\*PreProvisionedCertificate](#PreProvisionedCertificate) |
+
+
+## CnpBaseConfiguration
+
+CnpBaseConfiguration contains the configuration parameters that can be applied to both CNP Witness and Data nodes
+
+| Name | Description | Type |
+| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
+| `startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32 |
+| `stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32 |
+| `storage ` | Configuration of the storage of the instances - *mandatory* | cnpv1.StorageConfiguration |
+| `walStorage ` | Configuration of the WAL storage for the instances | \*cnpv1.StorageConfiguration |
+| `clusterMaxStartDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 300) | int32 |
+| `affinity ` | Affinity/Anti-affinity rules for Pods | cnpv1.AffinityConfiguration |
+| `resources ` | Resources requirements of every generated Pod. Please refer to for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core) |
+| `postgresql ` | Configuration of the PostgreSQL server | cnpv1.PostgresConfiguration |
+| `monitoring ` | The configuration of the monitoring infrastructure of this cluster | \*cnpv1.MonitoringConfiguration |
+| `logLevel ` | The instances' log level, one of the following values: error, warning, info (default), debug, trace | string |
+| `serviceAccountTemplate` | The service account template to be passed to CNP | \*cnpv1.ServiceAccountTemplate |
+| `otel ` | OpenTelemetry Configuration | [OTELConfiguration](#OTELConfiguration) |
+| `postInitSQL ` | List of SQL queries to be executed as a superuser immediately after a node has been created - to be used with extreme care (by default empty) | \[]string |
+| `postInitTemplateSQL ` | List of SQL queries to be executed as a superuser in the `template1` after a node has been created - to be used with extreme care (by default empty) | \[]string |
+| `seccompProfile ` | The SeccompProfile applied to every Pod and Container. Defaults to: `RuntimeDefault` | \*corev1.SeccompProfile |
+| `metadata ` | Metadata applied exclusively to the generated Cluster resources. Useful for applying AppArmor profiles. | [InheritedMetadata](#InheritedMetadata) |
+
## CnpConfiguration
-CnpConfiguration contains all the configurations that will be injected into the resulting clusters composing the PGD group
-
-| Name | Description | Type |
-| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
-| `startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32 |
-| `stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32 |
-| `storage ` | Configuration of the storage of the instances - *mandatory* | cnpv1.StorageConfiguration |
-| `walStorage ` | Configuration of the WAL storage for the instances | \*cnpv1.StorageConfiguration |
-| `clusterMaxStartDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 300) | int32 |
-| `affinity ` | Affinity/Anti-affinity rules for Pods | cnpv1.AffinityConfiguration |
-| `resources ` | Resources requirements of every generated Pod. Please refer to for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core) |
-| `postgresql ` | Configuration of the PostgreSQL server | cnpv1.PostgresConfiguration |
-| `monitoring ` | The configuration of the monitoring infrastructure of this cluster | \*cnpv1.MonitoringConfiguration |
-| `superuserSecret ` | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | \*cnpv1.LocalObjectReference |
-| `enableSuperuserAccess ` | When this option is enabled the CNP operator will create or use the secret defined in the SuperuserSecret to allow superuser (postgres) access to the database. Disabled by default. | \*bool |
-| `logLevel ` | The instances' log level, one of the following values: error, warning, info (default), debug, trace | string |
-| `serviceAccountTemplate` | The service account template to be passed to CNP | \*cnpv1.ServiceAccountTemplate |
-| `otel ` | OpenTelemetry Configuration | [OTELConfiguration](#OTELConfiguration) |
-| `postInitSQL ` | List of SQL queries to be executed as a superuser immediately after a node has been created - to be used with extreme care (by default empty) | \[]string |
-| `postInitTemplateSQL ` | List of SQL queries to be executed as a superuser in the `template1` after a node has been created - to be used with extreme care (by default empty) | \[]string |
+CnpConfiguration contains the configurations of the data nodes that will be injected into the resulting clusters composing the PGD group
+
+| Name | Description | Type |
+| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
+| `enableSuperuserAccess` | When this option is enabled, the CNP operator will create or use the secret defined in the SuperuserSecret to allow superuser (postgres) access to the database. Disabled by default. | \*bool |
+| `superuserSecret ` | The secret containing the superuser password. A new secret will be created with a randomly generated password if not defined. This field is only allowed in the CNP Instances configuration. A Witness Node will always use the same SuperuserSecret as the other instances. | \*cnpv1.LocalObjectReference |
@@ -328,10 +340,11 @@ PGDGroupCleanupList contains a list of PGDGroupCleanup
PGDGroupCleanupSpec defines the desired state of PGDGroupCleanup
-| Name | Description | Type |
-| ---------- | ------------- | ------ |
-| `executor` | - *mandatory* | string |
-| `target ` | - *mandatory* | string |
+| Name | Description | Type |
+| ---------- | --------------------------------------------------------------------------------------------- | ------ |
+| `executor` | - *mandatory* | string |
+| `target ` | - *mandatory* | string |
+| `force ` | Force will force the removal of the PGDGroup even if the target PGDGroup nodes are not parted | bool |
@@ -364,18 +377,19 @@ PGDGroupSpec defines the desired state of PGDGroup
| ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ |
| `imageName ` | Name of the container image, supporting both tags (`:`) and digests for deterministic and repeatable deployments (`:@sha256:`) | string |
| `imagePullPolicy ` | Image pull policy. One of `Always`, `Never` or `IfNotPresent`. If not defined, it defaults to `IfNotPresent`. Cannot be updated. More info: | corev1.PullPolicy |
-| `imagePullSecrets ` | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | [\[\]corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#localobjectreference-v1-core) |
+| `imagePullSecrets ` | The list of pull secrets to be used to pull operator and or the operand images | [\[\]corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#localobjectreference-v1-core) |
| `inheritedMetadata ` | Metadata that will be inherited by all objects related to the pgdGroup | [\*InheritedMetadata](#InheritedMetadata) |
| `instances ` | Number of instances required in the cluster - *mandatory* | int32 |
| `proxyInstances ` | Number of proxy instances required in the cluster | int32 |
| `witnessInstances ` | Number of witness instances required in the cluster | int32 |
-| `backup ` | The configuration to be used for backups | [\*Backup](#Backup) |
-| `restore ` | The configuration to restore this PGD group from | [\*Restore](#Restore) |
-| `cnp ` | Instances configuration. - *mandatory* | [CnpConfiguration](#CnpConfiguration) |
+| `backup ` | The configuration to be used for backups in the CNP instances. | [\*Backup](#Backup) |
+| `restore ` | The configuration to restore this PGD group from an Object Store service | [\*Restore](#Restore) |
+| `cnp ` | Instances configuration that will be injected into the CNP clusters that compose the PGD Group - *mandatory* | [CnpConfiguration](#CnpConfiguration) |
+| `witness ` | WitnessInstances configuration that will be injected into the WitnessInstances CNP clusters If not defined, it will default to the Instances configuration | [\*CnpBaseConfiguration](#CnpBaseConfiguration) |
| `pgd ` | Pgd contains instructions to bootstrap this cluster - *mandatory* | [PgdConfiguration](#PgdConfiguration) |
| `pgdProxy ` | PGDProxy contains instructions to configure PGD Proxy | [PGDProxyConfiguration](#PGDProxyConfiguration) |
-| `connectivity ` | Configures the connectivity of the PGDGroup - *mandatory* | [ConnectivityConfiguration](#ConnectivityConfiguration) |
-| `failingFinalizerTimeLimitSeconds` | The amount of seconds for the finalizer to start correctly, measured from the deletion timestamp | int32 |
+| `connectivity ` | Configures the connectivity of the PGDGroup, like services and certificates that will be used. - *mandatory* | [ConnectivityConfiguration](#ConnectivityConfiguration) |
+| `failingFinalizerTimeLimitSeconds` | The amount of seconds that the operator will wait in case of a failing finalizer. A finalizer is considered failing when the operator cannot reach any nodes of the PGDGroup | int32 |
@@ -383,18 +397,20 @@ PGDGroupSpec defines the desired state of PGDGroup
PGDGroupStatus defines the observed state of PGDGroup
-| Name | Description | Type |
-| --------------------- | ------------------------------------------------------------------ | ----------------------------------------- |
-| `latestGeneratedNode` | ID of the latest generated node (used to avoid node name clashing) | int32 |
-| `phase ` | The initialization phase of this cluster | resources.OperatorPhase |
-| `phaseDetails ` | The details of the current phase | string |
-| `nodes ` | The list of summaries for the nodes in the group | [\[\]NodeSummary](#NodeSummary) |
-| `backup ` | The node that is taking backups of this PGDGroup | [BackupStatus](#BackupStatus) |
-| `restore ` | The status of the restore process | [RestoreStatus](#RestoreStatus) |
-| `PGD ` | Last known status of PGD | [PGDStatus](#PGDStatus) |
-| `CNP ` | Last known status of CNP | [CNPStatus](#CNPStatus) |
-| `PGDProxy ` | Last known status of PGDProxy | [PGDProxyStatus](#PGDProxyStatus) |
-| `connectivity ` | Last known status of Connectivity | [ConnectivityStatus](#ConnectivityStatus) |
+| Name | Description | Type |
+| ------------------------ | ------------------------------------------------------------------------------ | ----------------------------------------- |
+| `latestGeneratedNode ` | ID of the latest generated node (used to avoid node name clashing) | int32 |
+| `phase ` | The initialization phase of this cluster | resources.OperatorPhase |
+| `phaseDetails ` | The details of the current phase | string |
+| `phaseTroubleshootHints` | PhaseTroubleshootHints general troubleshooting indications for the given phase | string |
+| `phaseType ` | PhaseType describes the phase category. | resources.PhaseType |
+| `nodes ` | The list of summaries for the nodes in the group | [\[\]NodeSummary](#NodeSummary) |
+| `backup ` | The node that is taking backups of this PGDGroup | [BackupStatus](#BackupStatus) |
+| `restore ` | The status of the restore process | [RestoreStatus](#RestoreStatus) |
+| `PGD ` | Last known status of PGD | [PGDStatus](#PGDStatus) |
+| `CNP ` | Last known status of CNP | [CNPStatus](#CNPStatus) |
+| `PGDProxy ` | Last known status of PGDProxy | [PGDProxyStatus](#PGDProxyStatus) |
+| `connectivity ` | Last known status of Connectivity | [ConnectivityStatus](#ConnectivityStatus) |
@@ -537,8 +553,6 @@ PgdConfiguration is the configuration of the PGD group structure
| `ownerCredentialsSecret` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | \*cnpv1.LocalObjectReference |
| `proxySettings ` | Configuration for the proxy | [PGDProxySettings](#PGDProxySettings) |
| `nodeGroupSettings ` | Configuration for the PGD Group | [\*PGDNodeGroupSettings](#PGDNodeGroupSettings) |
-| `postInitSQL ` | List of SQL queries to be executed as a superuser immediately after a node has been created - to be used with extreme care (by default empty) | \[]string |
-| `postInitTemplateSQL ` | List of SQL queries to be executed as a superuser in the `template1` after a node has been created - to be used with extreme care (by default empty) | \[]string |
| `globalRouting ` | GlobalRouting is true when global routing is enabled, and in this case the proxies will be created in the parent group | bool |
| `mutations ` | List of SQL mutations to apply to the node group | SQLMutations |
@@ -603,11 +617,11 @@ RootDNSConfiguration describes how the FQDN for the resources should be generate
SQLMutation is a series of SQL statements to apply atomically
-| Name | Description | Type |
-| ----------- | -------------------------------------------------------------------------------------------------------------- | --------------- |
-| `isApplied` | List of boolean-returning SQL queries. If any of them returns false the mutation will be applied - *mandatory* | \[]string |
-| `exec ` | List of SQL queries to be executed to apply this mutation - *mandatory* | \[]string |
-| `type ` | The type of SQLMutation - *mandatory* | SQLMutationType |
+| Name | Description | Type |
+| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
+| `isApplied` | List of boolean-returning SQL queries. If any of them returns false the mutation will be applied - *mandatory* | \[]string |
+| `exec ` | List of SQL queries to be executed to apply this mutation - *mandatory* | \[]string |
+| `type ` | Type determines when the SQLMutation occurs. 'always': reconcile the mutation at each reconciliation cycle 'beforeSubgroupRaft': are executed only before the subgroupRaft is enabled If not specified, the Type defaults to 'always'. - *mandatory* | SQLMutationType |
@@ -626,10 +640,11 @@ ServerCertConfiguration contains the information to generate the certificates fo
ServiceTemplate is a structure that allows the user to set a template for the Service generation.
-| Name | Description | Type |
-| ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- |
-| `metadata` | Standard object's metadata. More info: | [Metadata](#Metadata) |
-| `spec ` | Specification of the desired behavior of the service. More info: | corev1.ServiceSpec |
+| Name | Description | Type |
+| ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
+| `metadata ` | Standard object's metadata. More info: | [Metadata](#Metadata) |
+| `spec ` | Specification of the desired behavior of the service. More info: | corev1.ServiceSpec |
+| `updateStrategy` | UpdateStrategy indicates how to update the services generated by this template. | \*ServiceUpdateStrategy |
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
index 41e5f6ebde5..bbd7e477926 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
@@ -4,30 +4,190 @@ originalFilePath: 'src/architecture.md'
---
This section covers the main architectural aspects you need to consider
-when deploying EDB Postgres Distributed for Kubernetes (PG4K-PGD).
+when deploying EDB Postgres Distributed in Kubernetes (PG4K-PGD).
-PG4K-PGD can be installed within a [single Kubernetes cluster](#single-kubernetes-cluster)
-or across [multiple Kubernetes clusters](#multiple-kubernetes-clusters) - normally
-in different regions.
+PG4K-PGD is a
+[Kubernetes operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
+designed to deploy and manage EDB Postgres Distributed clusters
+running in private, public, hybrid, or multi-cloud environments.
+
+## Relationship with EDB Postgres Distributed
+
+[EDB Postgres Distributed (PGD)](https://www.enterprisedb.com/docs/pgd/latest/)
+is a multi-master implementation of Postgres designed for high performance and
+availability.
+PGD generally requires deployment using
+[*Trusted Postgres Architect*, (TPA)](https://www.enterprisedb.com/docs/pgd/latest/tpa/),
+a tool that uses [Ansible](https://www.ansible.com) for provisioning and
+deployment of PGD clusters.
+
+PG4K-PGD offers a different way of deploying PGD clusters, leveraging containers
+and Kubernetes, with the added advantages that the resulting architecture is
+self-healing and robust, managed through declarative configuration, and that it
+takes advantage of the vast and growing Kubernetes ecosystem.
## Relationship with EDB Postgres for Kubernetes
-A PGD cluster is made up by one or more PGD groups, each having one or more PGD
-nodes. A PGD node in Kubernetes can be assimilated to a Postgres cluster
-without any physical replicas.
-PG4K-PGD internally manages each PGD node using the `Cluster` resource as
-defined by EDB Postgres for Kubernetes (PG4K), specifically a `Cluster` with
-`.spec.instances = 1`.
+A PGD cluster consists of one or more *PGD Groups*, each having one or more *PGD
+Nodes*. A PGD node is a Postgres database. PG4K-PGD internally
+manages each PGD node using the `Cluster` resource as defined by EDB Postgres
+for Kubernetes (PG4K), specifically a `Cluster` with a single instance (i.e. no
+replicas).
+
+The single PostgreSQL instance created by each `Cluster` can be configured
+declaratively via the
+[`.spec.cnp` section](api_reference.md#CnpConfiguration)
+of the PGD Group spec.
+
+In PG4K-PGD, as in PG4K, the underlying database implementation is responsible
+for data replication. However, it is important to note that *failover* and
+*switchover* work differently, entailing Raft election and the nomination of new
+write leaders. PG4K only handles the deployment and healing of data nodes.
+
+## Managing PGD using PG4K-PGD
+
+The PG4K-PGD operator can manage the complete lifecycle of PGD clusters. As
+such, in addition to PGD Nodes (represented as single-instance `Clusters`), it
+needs to manage other objects associated with PGD.
+
+PGD relies on the Raft algorithm for distributed consensus to manage node
+metadata, specifically agreement on a *write leader*. Consensus among data
+nodes is also required for operations such as generating new global sequences
+or performing distributed DDL.
+
+These considerations force additional actors in PGD above database nodes.
+
+PG4K-PGD manages the following:
+
+- Data nodes: as mentioned previously, a node is a database, and is managed
+ via PG4K, creating a `Cluster` with a single instance.
+- [Witness nodes](https://www.enterprisedb.com/docs/pgd/latest/nodes/#witness-nodes)
+ are basic database instances that do not participate in data
+ replication; their function is to guarantee that consensus is possible in
+ groups with an even number of data nodes, or after network partitions. Witness
+ nodes are also managed using a single-instance `Cluster` resource.
+- [PGD Proxies](https://www.enterprisedb.com/docs/pgd/latest/routing/proxy/):
+ act as Postgres proxies with knowledge of the write leader. PGD proxies need
+ information from Raft to route writes to the current write leader.
+
+### Proxies and routing
+
+PGD groups assume full mesh connectivity of PGD nodes. Each node must be able to
+connect to every other node, using the appropriate connection string (a
+`libpq`-style DSN). Write operations don't need to be sent to every node. PGD
+will take care of replicating data after it's committed to one node.
+
+For performance, it is often recommendable to send write operations mostly to a
+single node, the *write leader*. Raft is used to identify which node is the
+write leader, and to hold metadata about the PGD nodes. PGD Proxies are used to
+transparently route writes to write leaders, and to quickly pivot to the new
+write leader in case of switchover or failover.
+
+It is possible to configure *Raft subgroups*, each of which can maintain a
+separate write leader. In PG4K-PGD, a PGD Group containing a PGD Proxy
+automatically comprises a Raft subgroup.
+
+There are two kinds of routing available with PGD Proxies:
+
+- Global routing uses the top-level Raft group, and maintains one global write
+ leader.
+- Local routing uses subgroups to maintain separate write leaders. Local
+ routing is often used to achieve geographical separation of writes.
+
+In PG4K-PGD, local routing is used by default, and a configuration option is
+available to select global routing.
+
+You can find more information in the
+[PGD documentation of routing with Raft](https://www.enterprisedb.com/docs/pgd/latest/routing/raft/).
+
+### PGD Architectures and High Availability
+
+EDB proposes several recommended architectures to make good use of PGD's
+distributed multi-master capabilities and to offer high availability.
+
+The Always On architectures are built from either one group in a single location
+or two groups in two separate locations.
+Please refer to the
+[PGD architecture document](https://www.enterprisedb.com/docs/pgd/latest/architectures/)
+for further information.
+
+## Deploying PGD on Kubernetes
+
+PG4K-PGD leverages Kubernetes to deploy and manage PGD clusters. As such, some
+adaptations are necessary to translate PGD into the Kubernetes ecosystem.
+
+### Images and operands
+
+PGD can be configured to run one of three Postgres distributions. Please refer
+to the
+[PGD documentation](https://www.enterprisedb.com/docs/pgd/latest/choosing_server/)
+to understand the features of each distribution.
+
+To function in Kubernetes, containers are provided for each Postgres
+distribution. These are the *operands*.
+In addition, the operator images are kept in those same repositories.
+
+Please refer to [the document on registries](private_registries.md)
+for details on accessing the images.
+
+### Kubernetes architecture
+
+We reproduce some of the points of the
+[PG4K document on Kubernetes architecture](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/architecture/),
+to which we refer you for further depth.
+
+Kubernetes natively provides the possibility to span separate physical locations
+–also known as data centers, failure zones, or more frequently **availability
+zones**– connected to each other via redundant, low-latency, private network
+connectivity.
+
+Being a distributed system, the recommended minimum number of availability zones
+for a **Kubernetes cluster** is three (3), in order to make the control plane
+resilient to the failure of a single zone. This means that each data center is
+active at any time and can run workloads simultaneously.
+
+PG4K-PGD can be installed within a
+[single Kubernetes cluster](#single-kubernetes-cluster)
+or across
+[multiple Kubernetes clusters](#multiple-kubernetes-clusters).
+
+### Single Kubernetes cluster
+
+A multi-availability-zone Kubernetes architecture is typical of Kubernetes
+services managed by Cloud Providers. Such an architecture enables the PG4K-PGD
+and the PG4K operators to schedule workloads and nodes across availability
+zones, considering all zones active:
+
+![Kubernetes cluster spanning over 3 independent data centers](./images/k8s-architecture-3-az.png)
+
+PGD clusters can be deployed in a single Kubernetes cluster and take advantage
+of Kubernetes availability zones to enable High Availability architectures,
+including the Always On recommended architectures.
+
+The *Always On Single Location* architecture shown in the
+[PGD Architecture document](https://www.enterprisedb.com/docs/pgd/latest/architectures/):
+![Always On Single Region](./images/always_on_1x3_updated.png)
+
+can be realized on single kubernetes cluster with 3 availability zones.
+
+The PG4K-PGD operator can control the *scheduling* of pods (i.e. which pods go
+to which data center) using affinity, tolerations and node selectors, as is the
+case with PG4K. Individual scheduling controls are available for proxies as well
+as nodes.
-The PostgreSQL instances created by the `Cluster` can be configured in the
-[`.spec.cnp` section](api_reference.md#CnpConfiguration).
+Please refer to the
+[Kubernetes documentation on scheduling](https://kubernetes.io/docs/concepts/scheduling-eviction/),
+as well as the [PG4K documents](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/scheduling/)
+for further information.
-## Single Kubernetes cluster
+### Multiple Kubernetes clusters
-EDB Postgres Distributed can be executed inside a single Kubernetes cluster.
+PGD clusters can also be deployed in multiple Kubernetes clusters that can
+reliably communicate with each other.
-## Multiple Kubernetes clusters
+![Multiple Kubernetes clusters](./images/k8s-architecture-multi.png)
-EDB Postgres Distributed for Kubernetes can also be deployed in different
-Kubernetes clusters that can reliably communicate with each other.
+[Always On multi-location PGD architectures](https://www.enterprisedb.com/docs/pgd/latest/architectures/)
+can be realized on multiple Kubernetes clusters that meet the connectivity
+requirements.
More information can be found in the ["Connectivity"](connectivity.md) section.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
new file mode 100644
index 00000000000..be4b57ebf4b
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
@@ -0,0 +1,144 @@
+---
+title: 'Backup on object stores'
+originalFilePath: 'src/backup.md'
+---
+
+EDB Postgres Distributed for Kubernetes (PG4K-PGD) supports *online/hot backup* of
+PGD clusters through physical backup and WAL archiving on an object store.
+This means that the database is always up (no downtime required) and that
+Point In Time Recovery is available.
+
+## Common object stores
+
+Multiple object store are supported, such as `AWS S3`, `Microsoft Azure Blob Storage`,
+`Google Cloud Storage`, `MinIO Gateway`, or any S3 compatible provider.
+Given that PG4K-PGD configures the connection with object stores by relying on
+EDB Postgres for Kubernetes (PG4K), please refer to the [PG4K Cloud provider support](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#cloud-provider-support)
+documentation for additional depth.
+
+!!! Important
+ In the PG4K documentation you'll find the Cloud Provider configuration section
+ available at `spec.backup.barmanObjectStore`. Note that in PG4K-PGD examples, the object store section is found at a
+ different path: `spec.backup.configuration.barmanObjectStore`.
+
+## WAL archive
+
+WAL archiving is the process that sends `WAL files` to the object storage, and it's essential to
+execute *online/hot backups*, or Point in Time recovery (PITR).
+In PG4K-PGD, each PGD Node will be set up to archive WAL files in the object store independently.
+
+The WAL archive is defined in the PGDGroup `spec.backup.configuration.barmanObjectStore` stanza,
+and is enabled as soon as a destination path and cloud credentials are set.
+You can choose to compress WAL files before they are uploaded, and/or encrypt them.
+Parallel WAL archiving can also be enabled.
+
+```yaml
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroup
+[...]
+spec:
+ backup:
+ configuration:
+ barmanObjectStore:
+ [...]
+ wal:
+ compression: gzip
+ encryption: AES256
+ maxParallel: 8
+```
+
+For further information, refer to the [PG4K WAL archiving](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#wal-archiving) documentation.
+
+## Scheduled backups
+
+Scheduled backups are the recommended way to configure your backup strategy in PG4K-PGD.
+When the PGDGroup `spec.backup.configuration.barmanObjectStore` stanza is configured, the operator will select one of the
+PGD data nodes as the elected "Backup Node", for which it will automatically create a `Scheduled Backup` resource.
+
+The `.spec.backup.cron.schedule` field allows you to define a cron schedule specification, expressed
+in the [https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format]\(Go `cron` package format).
+
+```yaml
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroup
+[...]
+spec:
+ backup:
+ cron:
+ schedule: "0 0 0 * * *"
+ backupOwnerReference: self
+ suspend: false
+ immediate: true
+```
+
+Scheduled Backups can be suspended if necessary by setting `.spec.backup.cron.suspend` to true. This will
+prevent any new backup from being scheduled while the option is set to true.
+
+In case you want to execute a backup as soon as the ScheduledBackup resource is created
+you can set `.spec.backup.cron.immediate` to true.
+
+`.spec.backupOwnerReference` indicates which ownerReference should be used
+in the created backup resources. The choices are:
+
+- *none:* no owner reference for created backup objects
+- *self:* sets the Scheduled backup object as owner of the backup
+- *cluster:* sets the cluster as owner of the backup
+
+!!! Note
+ The `PG4K` ScheduledBackup object contains an additional option named `cluster` to specify the
+ Cluster to be backed up. This option is currently not supported by `PG4K-PGD`, and will be
+ ignored if specified.
+
+In case an elected "Backup node" is deleted, the operator will transparently elect a new "Backup Node"
+and reconcile the Scheduled Backup resource accordingly.
+
+## Retention policies
+
+PG4K-PGD can manage the automated deletion of backup files from the backup
+object store, using **retention policies** based on the recovery window.
+This process will also take care of removing unused WAL files and WALs associated with backups
+that are scheduled for deletion.
+
+You can define your backups with a retention policy of 30 days as follows:
+
+```yaml
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroup
+[...]
+spec:
+ backup:
+ configuration:
+ retentionPolicy: "30d"
+```
+
+For further information, refer to the [PG4K Retention policies](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#retention-policies) documentation.
+
+!!! Important
+ Currently, the retention policy will only be applied for the elected "Backup Node"
+ backups and WAL files. Given that each other PGD node also archives its own WALs
+ independently, it is your responsibility to manage the lifecycle of those WAL files,
+ for example by leveraging the object storage data retention policy.
+ Also, in case you have an object storage data retention policy set up on every PGD Node
+ directory, make sure it's not overlapping or interfering with the retention policy managed
+ by the operator.
+
+## Compression algorithms
+
+Backups and WAL files are uncompressed by default. However, multiple compression algorithms are
+supported. For more information, refer to the [PG4K Compression algorithms](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#compression-algorithms) documentation.
+
+## Tagging of backup objects
+
+It's possible to specify tags as key-value pairs for the backup objects, namely base backups, WAL files and history files.
+For more information, refer to the [PG4K document on Tagging of backup objects](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#tagging-of-backup-objects).
+
+## On-demand backups of a PGD Node
+
+A PGD Node is represented as single-instance PG4K `Cluster` object.
+As such, in case of need, it's possible to request an on-demand backup
+of a specific PGD Node by creating a PG4K `Backup` resource.
+In order to do that, you can directly refer to the [PG4K On-demand backups](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#on-demand-backups) documentation.
+
+!!! Hint
+ You can retrieve the list of PG4K Clusters that make up your PGDGroup
+ by running: `kubectl get cluster -l k8s.pgd.enterprisedb.io/group=my-pgd-group -n my-namespace`
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
new file mode 100644
index 00000000000..cdb6cb725ab
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
@@ -0,0 +1,114 @@
+---
+title: 'Before You Start'
+originalFilePath: 'src/before_you_start.md'
+---
+
+Before we get started, it is essential to go over some terminology that is
+specific to Kubernetes and PGD.
+
+## Kubernetes terminology
+
+[Node](https://kubernetes.io/docs/concepts/architecture/nodes/)
+: A *node* is a worker machine in Kubernetes, either virtual or physical, where
+ all services necessary to run pods are managed by the control plane node(s).
+
+[Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/)
+: A *pod* is the smallest computing unit that can be deployed in a Kubernetes
+ cluster and is composed of one or more containers that share network and
+ storage.
+
+[Service](https://kubernetes.io/docs/concepts/services-networking/service/)
+: A *service* is an abstraction that exposes as a network service an
+ application that runs on a group of pods and standardizes important features
+ such as service discovery across applications, load balancing, failover, and so
+ on.
+
+[Secret](https://kubernetes.io/docs/concepts/configuration/secret/)
+: A *secret* is an object that is designed to store small amounts of sensitive
+ data such as passwords, access keys, or tokens, and use them in pods.
+
+[Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/)
+: A *storage class* allows an administrator to define the classes of storage in
+ a cluster, including provisioner (such as AWS EBS), reclaim policies, mount
+ options, volume expansion, and so on.
+
+[Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
+: A *persistent volume* (PV) is a resource in a Kubernetes cluster that
+ represents storage that has been either manually provisioned by an
+ administrator or dynamically provisioned by a *storage class* controller. A PV
+ is associated with a pod using a *persistent volume claim* and its lifecycle is
+ independent of any pod that uses it. Normally, a PV is a network volume,
+ especially in the public cloud. A [*local persistent volume*
+ (LPV)](https://kubernetes.io/docs/concepts/storage/volumes/#local) is a
+ persistent volume that exists only on the particular node where the pod that
+ uses it is running.
+
+[Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
+: A *persistent volume claim* (PVC) represents a request for storage, which
+ might include size, access mode, or a particular storage class. Similar to how
+ a pod consumes node resources, a PVC consumes the resources of a PV.
+
+[Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/)
+: A *namespace* is a logical and isolated subset of a Kubernetes cluster and
+ can be seen as a *virtual cluster* within the wider physical cluster.
+ Namespaces allow administrators to create separated environments based on
+ projects, departments, teams, and so on.
+
+[RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
+: *Role Based Access Control* (RBAC), also known as *role-based security*, is a
+ method used in computer systems security to restrict access to the network and
+ resources of a system to authorized users only. Kubernetes has a native API to
+ control roles at the namespace and cluster level and associate them with
+ specific resources and individuals.
+
+[CRD](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
+: A *custom resource definition* (CRD) is an extension of the Kubernetes API
+ and allows developers to create new data types and objects, *called custom
+ resources*.
+
+[Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
+: An *operator* is a Kubernetes software extension that automates those steps
+ that are normally performed by a human operator when managing one or more
+ applications or given services. An operator assists Kubernetes in making sure
+ that the resource's defined state always matches the observed one.
+
+[`kubectl`](https://kubernetes.io/docs/reference/kubectl/overview/)
+: `kubectl` is the command-line tool used to manage a Kubernetes cluster.
+
+EDB Postgres Distributed for Kubernetes requires a Kubernetes version supported by the community. Please refer to the
+["Supported releases"](https://www.enterprisedb.com/resources/platform-compatibility#pgdk8s) page for details.
+
+## PGD terminology
+
+Please refer to the
+[PGD terminology page for further information](https://www.enterprisedb.com/docs/pgd/latest/terminology/).
+
+[Node](https://www.enterprisedb.com/docs/pgd/latest/terminology/#node)
+: A PGD database instance.
+
+[Failover](https://www.enterprisedb.com/docs/pgd/latest/terminology/#failover)
+: The automated process that recognizes a failure in a highly available database cluster and takes action to connect the application to another active database.
+
+[Switchover](https://www.enterprisedb.com/docs/pgd/latest/terminology/#switchover)
+: A planned change in connection between the application and the active database node in a cluster, typically done for maintenance.
+
+[Write leader](https://www.enterprisedb.com/docs/pgd/latest/terminology/#write-leader)
+: In always-on architectures, a node is selected as the correct connection endpoint for applications. This node is called the write leader. The write leader is selected by consensus of a quorum of proxy nodes.
+
+## Cloud terminology
+
+Region
+: A *region* in the Cloud is an isolated and independent geographic area
+ organized in *availability zones*. Zones within a region have very little
+ round-trip network latency.
+
+Zone
+: An *availability zone* in the Cloud (also known as *zone*) is an area in a
+ region where resources can be deployed. Usually, an availability zone
+ corresponds to a data center or an isolated building of the same data center.
+
+## What to do next
+
+Now that you have familiarized with the terminology, you can decide to
+[test EDB Postgres Distributed for Kubernetes (PG4K-PGD) on your laptop using a local cluster](quickstart.md) before
+deploying the operator in your selected cloud environment.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
new file mode 100644
index 00000000000..cc9e2463f39
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
@@ -0,0 +1,30 @@
+---
+title: 'Certificates'
+originalFilePath: 'src/certificates.md'
+---
+
+EDB Postgres Distributed for Kubernetes has been designed to natively support TLS certificates.
+In order to set up a PGD cluster, each PGD node require:
+
+- a server Certification Authority (CA) certificate
+- a server TLS certificate signed by the server Certification Authority
+- a client Certification Authority (CA) certificate
+- a streaming replication client certificate generated by the client Certification Authority
+
+!!! Note
+ You can find all the secrets used by each PGD Node and the expiry dates in
+ the Cluster (PGD Node) Status.
+
+EDB Postgres Distributed for Kubernetes is very flexible when it comes to TLS certificates, and
+primarily operates in two modes:
+
+1. **operator managed**: certificates are internally
+ managed by the operator in a fully automated way, and signed using a CA created
+ by EDB Postgres Distributed for Kubernetes
+2. **user provided**: certificates are
+ generated outside the operator and imported in the cluster definition as
+ secrets - EDB Postgres Distributed for Kubernetes integrates itself with cert-manager (see
+ examples below)
+
+You can find further information in the
+[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/certificates/).
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
index ef5676337b2..fd50828c2ba 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
@@ -10,7 +10,7 @@ PGD Cluster, covering the following topics:
- [domain names resolution](#domain-names-resolution) using fully qualified domain names (FQDN)
- [TLS configuration](#tls-configuration)
-!!! Note
+\!!! Notice
Although the above topics might seem unrelated to each other, they all
participate in the configuration of the PGD resources to make them universally
identifiable and accessible over a secure network.
@@ -26,6 +26,8 @@ Every PGDGroup manages several of them, namely:
- a *proxy service*, to enable applications to reach the write leader of the
group, transparently using PGD proxy
+For an example using these services, see [Connecting an application to a PGD cluster](#connecting-to-a-pgd-cluster-from-an-application).
+
![Basic architecture of an EDB Postgres Distributed for Kubernetes PGD group](./images/pg4k-pgd-basic-architecture.png)
Each service is generated from a customizable template in the `.spec.connectivity`
@@ -158,4 +160,70 @@ Alternatively, you can specify a secret containing the pre-provisioned
client certificate for the streaming replication user through the
`.spec.connectivity.tls.clientCert.preProvisioned.streamingReplica.secretRef` option.
The certificate lifecycle in this case is managed entirely by a third party,
-either manually or automated, by simply updating the content of the secret.
\ No newline at end of file
+either manually or automated, by simply updating the content of the secret.
+
+## Connecting to a PGD cluster from an application
+
+Connecting to a PGD Group from an application running inside the same Kubernetes cluster
+or from outside the cluster is a simple procedure. In both cases, you will connect to
+the proxy service of the PGD Group as the `app` user. The proxy service is a LoadBalancer
+service that will route the connection to the write leader of the PGD Group.
+
+### Connecting from inside the cluster
+
+When connecting from inside the cluster, you can use the proxy service name to connect
+to the PGD Group. The proxy service name is composed of the PGD Group name and the (optional)
+host suffix defined in the `.spec.connectivity.dns` section of the PGDGroup custom resource.
+
+For example, if the PGD Group name is `my-group` and the host suffix is `.my-domain.com`,
+the proxy service name will be `my-group.my-domain.com`.
+
+Before connecting you will need to get the password for the app user from the app user
+secret. The naming format of the secret is `my-group-app` for a PGD Group named `my-group`.
+
+You can get the username and password from the secret with the following commands:
+
+```sh
+kubectl get secret my-group-app -o jsonpath='{.data.username}' | base64 --decode
+kubectl get secret my-group-app -o jsonpath='{.data.password}' | base64 --decode
+```
+
+With this you now have all the pieces for a connection string to the PGD Group:
+
+```text
+postgresql://:@:5432/
+```
+
+or for a `psql` invocation:
+
+```sh
+psql -U -h
+```
+
+where `app-user` and `app-password` are the values you got from the secret,
+and `database` is the name of the database you want to connect
+to (the default is `app` for the app user.)
+
+### Connecting from outside the Kubernetes cluster
+
+When connecting from outside the Kubernetes cluster, in the general case,
+the [*Ingress*](https://kubernetes.io/docs/concepts/services-networking/ingress/) resource or a [*Load Balancer*](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) will be necessary.
+Check your cloud provider or local installation for more information about the
+behavior of them in your environment.
+
+Ingresses and Load Balancers require a Pod selector to forward connection to
+the PGD proxies. When configuring them, we suggest to use the following labels:
+
+- `k8s.pgd.enterprisedb.io/group`: set the the PGD group name
+- `k8s.pgd.enterprisedb.io/workloadType`: set to `pgd-proxy`
+
+If using Kind or other solutions for local development, the easiest way to
+access the PGD Group from outside is to use port forwarding
+to the proxy service. You can use the following command to forward port 5432 on your
+local machine to the proxy service:
+
+```sh
+kubectl port-forward svc/my-group.my-domain.com 5432:5432
+```
+
+where `my-group.my-domain.com` is the proxy service name from the previous example.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/images/always_on_1x3_updated.png b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/always_on_1x3_updated.png
new file mode 100644
index 00000000000..05c7072cd87
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/always_on_1x3_updated.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ce8008b5cfb11f454d792c48c00f824eda8efe03dcd2e3fb9a32723abc6ab26a
+size 12341
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/images/apps-in-k8s.png b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/apps-in-k8s.png
new file mode 100644
index 00000000000..832dcb3c59b
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/apps-in-k8s.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:afe49c1bcdb498302c3cf0af1bd058b43ca98a0a4de15c25e354912443d58eb0
+size 45106
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/images/apps-outside-k8s.png b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/apps-outside-k8s.png
new file mode 100644
index 00000000000..4259c49ec5c
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/apps-outside-k8s.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e687abe20e25f9589a094860769d2272ade598ecd643035712caa6a9b620e42
+size 54998
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/images/edb-repo-portal.png b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/edb-repo-portal.png
new file mode 100644
index 00000000000..d258736bf77
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/edb-repo-portal.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:465c8d9f3f12d1cf07069432d634d0ada269bdf764e89fe1a37b0a8b8e0b78b8
+size 732338
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/images/k8s-architecture-3-az.png b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/k8s-architecture-3-az.png
new file mode 100644
index 00000000000..bbc0f09f6be
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/k8s-architecture-3-az.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b5abe82c6febf14dc1c2c09fe5c40f129e70053fefe654983e64bac0ab301a4
+size 119593
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/images/k8s-architecture-multi.png b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/k8s-architecture-multi.png
new file mode 100644
index 00000000000..51a22831b4e
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/k8s-architecture-multi.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7abed062c67cca40349271f22d28595c4e18ddbd6a3da6b62570e8e19590edb2
+size 137762
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/images/openshift/all-namespaces.png b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/openshift/all-namespaces.png
index d296a0f2cfe..7370d1efc1b 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/images/openshift/all-namespaces.png
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/openshift/all-namespaces.png
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:f5dc49a56dbbf2cd2bbedec2c90f017de72961c2a42dc9231c08d0e1deee1320
-size 77073
+oid sha256:28b8d97d2ab41d3b3f6d84587d5bf805fde6ec41d7939a61852b18b0b3636cf3
+size 108291
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/images/openshift/find-pgd-openshift.png b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/openshift/find-pgd-openshift.png
index b108f18d3e7..85e1460c333 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/images/openshift/find-pgd-openshift.png
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/images/openshift/find-pgd-openshift.png
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:0a97a6cecc165c2da26ee681b130635d0078ffdf33c4f085a0ef72b3e8609ce2
-size 79551
+oid sha256:1266ea593a84cab6430ef89b4da1c40074ca506037877bc82cdd07b8b03d4dcc
+size 109083
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
index 19f0359cb13..d0d657c65ab 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
@@ -3,19 +3,27 @@ title: EDB Postgres Distributed for Kubernetes
originalFilePath: src/index.md
indexCards: none
navigation:
- - rel_notes
- - '!release_notes*'
+ - release_notes
+ - before_you_start
- quickstart
- installation_upgrade
- architecture
+ - certificates
+ - use_cases
+ - using_pgd
+ - ssl_connections
+ - security
+ - backup
+ - recovery
- connectivity
+ - private_registries
+ - openshift
- samples
- api_reference
- '!api_reference.md.in'
directoryDefaults:
iconName: logos/KubernetesMono
- hideVersion: true
- displayBanner: Preview release v0.6
+
---
**EDB Postgres Distributed for Kubernetes** (`pg4k-pgd`, or PG4K-PGD) is an
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
index f1ad55cf525..32db913e2ce 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
@@ -5,25 +5,31 @@ originalFilePath: 'src/installation_upgrade.md'
## Kubernetes
-### Installation using Helm
-
EDB Postgres Distributed for Kubernetes can be installed using the provided
[Helm chart](https://github.com/EnterpriseDB/edb-postgres-for-kubernetes-charts).
If you don't have [Helm](https://helm.sh) installed yet, please follow the
-[instructions](https://helm.sh/docs/intro/quickstart/) to install it in your
-system.
+[official instructions](https://helm.sh/docs/intro/quickstart/) to install it
+in your system.
Assuming you have Helm installed, the first step is to add the repository:
```console
-helm repo add edb https://enterprisedb.github.io/edb-postgres-for-kubernetes-charts/
+helm repo add edb \
+ https://enterprisedb.github.io/edb-postgres-for-kubernetes-charts/
```
-You will need credentials to enable `helm` to retrieve the various
-operator and operand images that are stored in private repositories.
-Make sure to replace your username and password
-in the command below:
+!!! Important
+ You'll need access to the private EDB repository where both the operator
+ and operand images are stored. Access requires a valid
+ [EDB subscription plan](https://www.enterprisedb.com/products/plans-comparison).
+ Please refer to ["Accessing EDB private image registries"](private_registries.md) for further details.
+
+Given that the container images for both the operator and the selected operand
+are in EDB's private registry, you need your credentials to enable `helm` to
+retrieve them.
+
+Make sure to replace your repo and token in the command below:
```console
helm upgrade --dependency-update \
@@ -31,16 +37,33 @@ helm upgrade --dependency-update \
--namespace pgd-operator-system \
--create-namespace \
edb/edb-postgres-distributed-for-kubernetes \
- --set image.imageCredentials.username=${REPO} \
- --set image.imageCredentials.password=${TOKEN}
+ --set image.imageCredentials.username=@@REPOSITORY@@ \
+ --set image.imageCredentials.password=@@TOKEN@@
```
-Set `REPO` to either `k8s_enterprise_pgd` or `k8s_standard_pgd` depending on the EDB software subscription purchased and Postgres distribution to be installed. Use `k8s_enterprise_pgd` if you are a trialist or preview user.
+In particular:
+
+- set `@@REPOSITORY@@` to the name of the repository, as explained in ["Which repository to
+ choose?"](private_registries.md#which-repository-to-choose)
+- set `@@TOKEN@@` to the repository token for your EDB account, as explained in
+ ["How to retrieve the token"](private_registries.md#how-to-retrieve-the-token)
+
+Please remember to create a cert issuer before you start deploying PGD clusters.
+The helm chart will already suggest that you do this, but in case you miss it,
+you may run, for example:
-Set `TOKEN` to the repository token for your EDB account. You can obtain this by going to the [Repos page](https://www.enterprisedb.com/repos-downloads) on the EDB website, signing in (if necessary) and then and displaying the EDB Repos 2.0 token using the Reveal Token button or copying it using the Copy button.
+```sh
+kubectl apply -f \
+ https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
+```
+
+!!! Info
+ For further details on the Helm chart, please refer to the
+ [Helm chart repo documentation](https://github.com/EnterpriseDB/edb-postgres-for-kubernetes-charts).
-For further details on the Helm chart, please refer to the
-[Helm chart repo](https://github.com/EnterpriseDB/edb-postgres-for-kubernetes-charts).
+With the operators and a self-signed cert issuer deployed, you can start
+creating PGD clusters. Please refer to the
+["Quickstart"](quickstart.md#part-3-deploy-a-pgd-cluster) for an example.
+If you are trying to install EDB Postgres Distributed for Kubernetes on Red Hat OpenShift,
+please refer to the ["Red Hat OpenShift section"](openshift.md) which contains
+information on the certified operator maintained by EDB.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx
new file mode 100644
index 00000000000..c0bfdfb239b
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx
@@ -0,0 +1,426 @@
+---
+title: 'Red Hat OpenShift'
+originalFilePath: 'src/openshift.md'
+---
+
+EDB Postgres Distributed for Kubernetes is a certified operator that can be
+installed on OpenShift via the web interface.
+
+## Ensuring access to EDB private registry
+
+!!! Important
+ You'll need access to the private EDB repository where both the operator
+ and operand images are stored. Access requires a valid
+ [EDB subscription plan](https://www.enterprisedb.com/products/plans-comparison).
+ Please refer to ["Accessing EDB private image registries"](private_registries.md) for further details.
+
+The OpenShift install will use pull secrets in order to access the
+operand and operator images, which are held in a private repository.
+
+Once you have credentials to the private repo, you will need to create
+two pull secrets in the `openshift-operators` namespace, named:
+
+- `pgd-operator-pull-secret`, for the EDB Postgres Distributed for Kubernetes operator images
+- `postgresql-operator-pull-secret`, for the EDB Postgres for Kubernetes operator images
+
+You can create each secret via the `oc create` command, as follows:
+
+```sh
+oc create secret docker-registry pgd-operator-pull-secret \
+ -n openshift-operators --docker-server=docker.enterprisedb.com \
+ --docker-username="@@REPOSITORY@@" \
+ --docker-password="@@TOKEN@@"
+
+oc create secret docker-registry postgresql-operator-pull-secret \
+ -n openshift-operators --docker-server=docker.enterprisedb.com \
+ --docker-username="@@REPOSITORY@@" \
+ --docker-password="@@TOKEN@@"
+```
+
+where:
+
+- `@@REPOSITORY@@` is the name of the repository, as explained in ["Which repository to
+ choose?"](private_registries.md#which-repository-to-choose)
+- `@@TOKEN@@` is the repository token for your EDB account, as explained in
+ ["How to retrieve the token"](private_registries.md#how-to-retrieve-the-token)
+
+## Installing the operator
+
+The EDB Postgres Distributed for Kubernetes operator can be found in the Red
+Hat OperatorHub directly from your OpenShift dashboard.
+
+1. Navigate in the web console to the `Operators -> OperatorHub` page:
+
+ ![Menu OperatorHub](./images/openshift/operatorhub_1.png)
+
+2. Use the search box to restrict the listing, e.g. using `EDB` or `pgd`:
+
+ ![Install OperatorHub](./images/openshift/find-pgd-openshift.png)
+
+3. Read the information about the Operator and select `Install`
+
+4. The following `Operator installation` page expects you to choose:
+
+ - the installation mode ([cluster-wide](#cluster-wide-installation) is the
+ only mode at the moment)
+ - the update channel (at the moment `preview`)
+ - the approval strategy, following the availability on the market place of
+ a new release of the operator, certified by Red Hat:
+ - `Automatic`: OLM automatically upgrades the running operator with the
+ new version
+ - `Manual`: OpenShift waits for human intervention, by requiring an
+ approval in the `Installed Operators` section
+
+### Cluster-wide installation
+
+With cluster-wide installation, you are asking OpenShift to install the
+Operator in the default `openshift-operators` namespace and to make it
+available to all the projects in the cluster.
+
+This is the default and normally recommended approach to install EDB Postgres
+Distributed for Kubernetes.
+
+From the web console, select `All namespaces on the cluster (default)` as
+`Installation mode`:
+
+![Install all namespaces](./images/openshift/all-namespaces.png)
+
+On installation, the operator will be visible in all namespaces. In case there
+were problems during installation, check the logs in any pods in the
+`openshift-operators` project on the `Workloads → Pods` page,
+as you would with any other OpenShift operator.
+
+!!! Important "Beware"
+ By choosing the cluster-wide installation you cannot easily move to a
+ single project installation at a later time.
+
+## Creating a PGD cluster
+
+After the installation from OpenShift, you should find the operator deployment
+in the `openshift-operators` namespace. Notice the cert-manager operator will
+also get installed, as will the EDB Postgres for Kubernetes operator
+(`postgresql-operator-controller-manager`).
+
+```sh
+$ oc get deployments -n openshift-operators
+NAME READY UP-TO-DATE AVAILABLE AGE
+cert-manager-operator 1/1 1 1 11m
+pgd-operator-controller-manager 1/1 1 1 11m
+postgresql-operator-controller-manager-1-20-0 1/1 1 1 23h
+…
+```
+
+Checking that the `pgd-operator-controller-manager` deployment is READY, we can
+start creating PGD clusters. The EDB Postgres Distributed for Kubernetes
+repository contains some useful sample files.
+
+Remember to deploy your PGD clusters on a dedicated namespace/project. The
+default namespace is reserved.
+
+First then, you should create a new namespace, and deploy a
+[self-signed certificate `Issuer`](https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml)
+in it:
+
+```sh
+oc create ns my-namespace
+oc apply -n my-namespace -f \
+ https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
+```
+
+### Using PGD in a single Openshift Cluster in a single region
+
+Please see the following section for [multi-cluster and multi-region](#using-pgd-in-multiple-openshift-clusters-in-multiple-regions) deployments.
+
+Now you can deploy a PGD cluster, for example a flexible 3-region, which
+contains two data groups and a witness group. You can find the YAML manifest
+in the file [`flexible_3regions.yaml`](../samples/flexible_3regions.yaml).
+
+```sh
+oc apply -f flexible_3regions.yaml -n my-namespace
+```
+
+You should start seeing your PGD groups come up:
+
+```sh
+$ oc get pgdgroups -n my-namespace
+NAME DATA INSTANCES WITNESS INSTANCES PHASE PHASE DETAILS AGE
+region-a 2 1 PGDGroup - Healthy 23m
+region-b 2 1 PGDGroup - Healthy 23m
+region-c 0 1 PGDGroup - Healthy 23m
+```
+
+### Using PGD in multiple Openshift Clusters in multiple regions
+
+In order to deploy PGD in multiple Openshift Clusters in multiple regions you must first establish a way for the
+PGD Groups to communicate with each other. The recommended way of achieving this with multiple Openshift clusters is to use
+[Submariner](https://submariner.io/getting-started/quickstart/openshift/). Configuring the connectivity is outside the
+scope of this document, but once you have established connectivity between the Openshift Clusters you can deploy
+PGD Groups synced with one another.
+
+!!! Warning
+ This example assumes you are deploying three PGD Groups, one in each Openshift
+ Cluster, and that you have established connectivity between the Openshift Clusters using Submariner.
+
+Similar to the [single cluster example](#using-pgd-in-a-single-openshift-cluster-in-a-single-region), we will create
+two data PGD groups and one witness group. In contrast to that example,
+each group will live in a different Openshift Cluster.
+
+In addition to basic connectivity between the Openshift Clusters, you will need to ensure that each Openshift Cluster
+contains a certificate authority that is trusted by the other Openshift Clusters. This is required for the PGD Groups
+to communicate with each other.
+
+The Openshift clusters can all use
+the same certificate authority, or each cluster can have its own certificate
+authority. Either way, it needs to be ensured that each Openshift cluster's
+certificates trust the other Openshift clusters' certificate authorities.
+
+For illustration, we are going to demo using a self-signed certificate
+that has a single certificate authority used for all certificates on all our Openshift clusters.
+
+In this demo we will refer to the Openshift clusters as `Openshift Cluster A`, `Openshift Cluster B`, and
+`Openshift Cluster C` . In Openshift, an installation of the PG4K-PGD-Operator from OperatorHub will include an
+installation of the *cert-manager* operator; creating and managing certificates with cert-manager is
+recommended. We create a namespace to hold `Openshift Cluster A`, and in it
+we will also create the needed objects for a self-signed certificate. Assuming
+that the PGD operator and the cert-manager are installed, we create a [self-signed certificate `Issuer`](https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml)
+in that namespace.
+
+```sh
+oc create ns pgd-group
+oc apply -n pgd-group -f \
+ https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
+```
+
+After a few moments, cert-manager should have created the Issuers and Certificates. Additionally, there should now be
+two secrets in the `pgd-group` namespace: `server-ca-key-pair` and `client-ca-key-pair`. These secrets contain
+the certificates and private keys for the server and client certificate authorities. We will need to copy these secrets
+to the other Openshift Clusters **before applying** the `issuer-selfsigned.yaml` manifest. We can use the
+`oc get secret` command to get the contents of the secrets.
+
+```sh
+oc get secret server-ca-key-pair -n pgd-group -o yaml > server-ca-key-pair.yaml
+oc get secret client-ca-key-pair -n pgd-group -o yaml > client-ca-key-pair.yaml
+```
+
+After removing the content specific to `Openshift Cluster A`
+from the above secrets (such as uid, resourceVersion and timestamp,) we can switch our
+context to `Openshift Cluster B`; we create the namespace, create our
+secrets in it, and only then apply the `issuer-selfsigned.yaml` file.
+
+```sh
+oc create ns pgd-group
+oc apply -n pgd-group -f server-ca-key-pair.yaml
+oc apply -n pgd-group -f client-ca-key-pair.yaml
+oc apply -n pgd-group -f \
+ https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
+```
+
+Finally, we can switch our context to `Openshift Cluster C`, and repeat
+the same process we followed for Cluster B.
+
+```sh
+oc create ns pgd-group
+oc apply -n pgd-group -f server-ca-key-pair.yaml
+oc apply -n pgd-group -f client-ca-key-pair.yaml
+oc apply -n pgd-group -f \
+ https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
+```
+
+Now, back on `Openshift Cluster A`, we can create our first PGD Group, called `region-a`. The YAML manifest for the PGD Group is as
+follows:
+
+```yaml
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroup
+metadata:
+ name: region-a
+spec:
+ instances: 2
+ proxyInstances: 2
+ witnessInstances: 1
+ pgd:
+ parentGroup:
+ name: world
+ create: true
+ discovery:
+ - host: region-a-group.pgd-group.svc.clusterset.local
+ - host: region-b-group.pgd-group.svc.clusterset.local
+ - host: region-c-group.pgd-group.svc.clusterset.local
+ cnp:
+ storage:
+ size: 1Gi
+ connectivity:
+ dns:
+ domain: "pgd-group.svc.clusterset.local"
+ additional:
+ - domain: alternate.domain
+ - domain: my.domain
+ hostSuffix: -dc1
+ tls:
+ mode: verify-ca
+ clientCert:
+ caCertSecret: client-ca-key-pair
+ certManager:
+ spec:
+ issuerRef:
+ name: client-ca-issuer
+ kind: Issuer
+ group: cert-manager.io
+ serverCert:
+ caCertSecret: server-ca-key-pair
+ certManager:
+ spec:
+ issuerRef:
+ name: server-ca-issuer
+ kind: Issuer
+ group: cert-manager.io
+```
+
+!!! Important
+ Please note that the format of the hostnames in the `discovery` section differs from the single cluster
+ example. This is because we are using Submariner to connect the Openshift Clusters, and Submariner uses the
+ `..svc.clusterset.local` domain to route traffic between the Openshift Clusters. `region-a-group` is the
+ name of the service that will be created for the PGD Group named `region-a`.
+
+Let's apply the `region-a` PGD Group YAML:
+
+```sh
+oc apply -f region-a.yaml -n pgd-group
+```
+
+We can now switch our context to `Openshift Cluster B` and create our second PGD Group. The YAML for the PGD Group in Cluster B
+is as follows, the only difference is the `metadata.name`:
+
+```yaml
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroup
+metadata:
+ name: region-b
+spec:
+ instances: 2
+ proxyInstances: 2
+ witnessInstances: 1
+ pgd:
+ parentGroup:
+ name: world
+ discovery:
+ - host: region-a-group.pgd-group.svc.clusterset.local
+ - host: region-b-group.pgd-group.svc.clusterset.local
+ - host: region-c-group.pgd-group.svc.clusterset.local
+ cnp:
+ storage:
+ size: 1Gi
+ connectivity:
+ dns:
+ domain: "pgd-group.svc.clusterset.local"
+ tls:
+ mode: verify-ca
+ clientCert:
+ caCertSecret: client-ca-key-pair
+ certManager:
+ spec:
+ issuerRef:
+ name: client-ca-issuer
+ kind: Issuer
+ group: cert-manager.io
+ serverCert:
+ caCertSecret: server-ca-key-pair
+ certManager:
+ spec:
+ issuerRef:
+ name: server-ca-issuer
+ kind: Issuer
+ group: cert-manager.io
+```
+
+Apply the `region-b` PGD Group YAML:
+
+```sh
+oc apply -f region-b.yaml -n pgd-group
+```
+
+And finally, we can switch our context to `Openshift Cluster C` and create our third PGD Group. The YAML for the PGD
+Group is as follows:
+
+```yaml
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroup
+metadata:
+ name: region-c
+spec:
+ instances: 0
+ proxyInstances: 0
+ witnessInstances: 1
+ pgd:
+ parentGroup:
+ name: world
+ discovery:
+ - host: region-a-group.pgd-group.svc.clusterset.local
+ - host: region-b-group.pgd-group.svc.clusterset.local
+ - host: region-c-group.pgd-group.svc.clusterset.local
+ cnp:
+ storage:
+ size: 1Gi
+ connectivity:
+ dns:
+ domain: "pgd-group.svc.clusterset.local"
+ tls:
+ mode: verify-ca
+ clientCert:
+ caCertSecret: client-ca-key-pair
+ certManager:
+ spec:
+ issuerRef:
+ name: client-ca-issuer
+ kind: Issuer
+ group: cert-manager.io
+ serverCert:
+ caCertSecret: server-ca-key-pair
+ certManager:
+ spec:
+ issuerRef:
+ name: server-ca-issuer
+ kind: Issuer
+ group: cert-manager.io
+```
+
+Apply the `region-c` PGD Group YAML:
+
+```sh
+oc apply -f region-c.yaml -n pgd-group
+```
+
+Now we can switch our context back to `Openshift Cluster A` and check the status of our PGD Group there.
+
+```sh
+oc get pgdgroup region-a -n pgd-group
+```
+
+We should expect to find the PGD group in phase
+`PGD - Waiting for node discovery`.
+
+After creating the PGD Groups in each Openshift Cluster, which will in turn create the services for each node, you will
+need to expose the services to the other Openshift Clusters. This can be done in various ways.
+Since we are using
+Submariner, we will do it using the
+[`subctl`](https://submariner.io/operations/deployment/subctl/)
+command. We need to run the `subctl export service` command
+for each service in our
+`pgd-group` namespace that has a `-group` or `-node` suffix. We can accomplish this by running the following bash
+`for` loop on each cluster:
+
+```sh
+for service in $(oc get svc -n pgd-group --no-headers -o custom-columns="NAME:.metadata.name" | grep -E '(-group|-node)$'); do
+ subctl export service $service -n pgd-group
+done
+```
+
+After a few minutes the status should show that the PGD Group is healthy. Once each PGD Group is healthy, you can write
+to the `app` database in either of the two data nodes, `region-a` or `region-b`, and the data will be replicated to the
+other data node.
+
+
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx
new file mode 100644
index 00000000000..ad6d1de2e9a
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx
@@ -0,0 +1,103 @@
+---
+title: 'EDB private image registries'
+originalFilePath: 'src/private_registries.md'
+---
+
+The images for the EDB Postgres Distributed for Kubernetes and EDB Postgres for
+Kubernetes operators, as well as various operands, are kept in private
+container image registries under `docker.enterprisedb.com`.
+
+!!! Important
+ Access to the private registries requires an account with EDB and is
+ reserved to EDB customers with a valid [subscription plan](https://www.enterprisedb.com/products/plans-comparison#selfmanagedenterpriseplan).
+ Credentials will be funneled through your EDB account.
+ For trials, please refer to the ["Trials"](#trials) section below.
+
+## Which repository to choose?
+
+EDB Postgres Distributed for Kubernetes is available as part of the "Extreme
+High Availability Add-On" on top of either the "EDB Enterprise Plan" or "EDB
+Standard Plan".
+
+Depending on your subscription plan, EDB Postgres Distributed for Kubernetes
+will be in one of the following repositories, as described in the table below:
+
+| Plan | Repository |
+| --------------------- | -------------------- |
+| EDB Standard Plan | `k8s_standard_pgd` |
+| EDB EnterpriseDB Plan | `k8s_enterprise_pgd` |
+
+The name of the repository shall be used as the *Username* when you try to
+login to the EDB container registry, for example through `docker login` or a
+[`kubernetes.io/dockerconfigjson` pull secret](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types).
+
+!!! Important
+ Each repository contains all the images you can access with your plan.
+ You don't need to connect to different repositories to access different
+ images, such as operator or operand images.
+
+## How to retrieve the token
+
+In the ["repos" page in EDB](https://www.enterprisedb.com/repos-downloads),
+you'll find an *EDB Repos 2.0* section where a `Repo Token` is shown obscured.
+
+![EDB Repo Portal](images/edb-repo-portal.png)
+
+Next to the "Repo Token" you'll find a button to copy the token, and an eye icon
+in case you want to look at the content of the token.
+
+The "Repo Token" shall be used as the *Password* when you try to login to EDB
+container registry.
+
+### Example with `docker login`
+
+You should be able to logon via Docker from your terminal. We suggest you
+copy the Repo Token using the `Copy Token` button. The `docker` command below
+will prompt you for a username and a password.
+
+As explained above, the username should be the repo you are trying to access
+while the password is the token you just copied.
+
+```sh
+$ docker login docker.enterprisedb.com
+Username: k8s_enterprise_pgd
+Password:
+Login Succeeded
+```
+
+## Trials
+
+If you are a trialist or a preview user, use `k8s_enterprise_pgd` as the name
+of the repository and follow the instructions in
+["How to retrieve the token"](#how-to-retrieve-the-token) for the token.
+
+## Operand images
+
+EDB Postgres Distributed for Kubernetes is an operator that supports running
+Postgres Distributed (PGD) version 5 on three PostgreSQL distributions:
+
+- PostgreSQL
+- EDB Postgres Advanced
+- EDB Postgres Extended
+
+!!! Important
+ Please refer to ["Choosing a Postgres distribution"](https://www.enterprisedb.com/docs/pgd/latest/choosing_server/)
+ from the PGD documentation for details and a comparison of PGD on the
+ different supported PostgreSQL distributions.
+
+Due to the immutable application container adoption in EDB operators, the
+operator expects that the container images include all the binaries required
+to run the requested version of PGD on top of the required distribution and
+version of Postgres.
+
+These images follow the requirements and the conventions described in the
+["Container image requirements"](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/container_images/)
+page of the EDB Postgres for Kubernetes documentation, adding the `bdr5`
+extension.
+
+In the table below you can find the image name prefix for each Postgres distribution:
+
+| Postgres distribution | Versions | Image name | Repositories |
+| --------------------- | -------- | --------------------------- | ---------------------------------------- |
+| EDB Postgres Extended | 15, 14 | `edb-postgres-extended-pgd` | `k8s_standard_pgd`, `k8s_enterprise_pgd` |
+| EDB Postgres Advanced | 15, 14 | `edb-postgres-advanced-pgd` | `k8s_enterprise_pgd` |
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
index c105066870d..b4cf8ae7ba7 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
@@ -53,7 +53,7 @@ As with any other deployment in Kubernetes, to deploy a PGD cluster you need to
apply a configuration file that defines your desired `PGDGroup` resources that
make up a PGD cluster.
-Some sample files are included (see [Examples of configuration](samples.md)). The
+Some sample files are included in the PG4K-PGD repository. The
[flexible_3regions.yaml](../samples/flexible_3regions.yaml) manifest
contains the definition of a PGD cluster with 2 Data Groups and a global
witness node spread across 3 regions. Each Data Group consists of 2 data nodes
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx
new file mode 100644
index 00000000000..4c4e837fe78
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx
@@ -0,0 +1,172 @@
+---
+title: 'Recovery'
+originalFilePath: 'src/recovery.md'
+---
+
+In EDB Postgres Distributed for Kubernetes (PG4K-PGD), recovery is available as a way
+to bootstrap a new PGD Group starting from an available physical backup of a PGD Node.
+The recovery cannot be performed "in-place" on an existing PGD Group.
+PG4K-PGD also supports Point In Time Recovery, which allows you to restore a PGDGroup up to
+any point in time, from the first available backup in your catalog to the last archived
+WAL (having a WAL archive is mandatory in this case).
+
+## Prerequisite
+
+Before recovering from a Backup, take care to apply the following considerations:
+
+- Make sure that the PostgreSQL configuration (`.spec.cnp.postgresql.parameters`) of the
+ recovered cluster is compatible, from a physical replication standpoint, with the original one.
+
+- When recovering in a newly created namespace, remember to first setup a cert-manager CA Issuer before deploying the recovered PGDGroup.
+
+For further information, refer to the [PG4K Recovery - Additional considerations](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/bootstrap/#additional-considerations) documentation section.
+
+## Recovery from an object store
+
+You can recover from a PGD Node backup created by Barman Cloud and stored on supported object storage.
+
+For example, given a PGDGroup named `pgdgroup-example` with 3 instances, with Backups available, your object storage
+should contain a directory for each node:
+
+`pgdgroup-example-1`, `pgdgroup-example-2`, `pgdgroup-example-3`
+
+The following example will define a full recovery from the object store.
+The operator will transparently select the latest backup between the defined `serverNames`, and
+replay up to the last available WAL.
+
+```yaml
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroup
+metadata:
+ name: pgdgroup-restore
+spec:
+ [...]
+ restore:
+ serverNames:
+ - pgdgroup-backup-1
+ - pgdgroup-backup-2
+ - pgdgroup-backup-3
+ barmanObjectStore:
+ destinationPath: ""
+ s3Credentials:
+ accessKeyId:
+ name: backup-storage-creds
+ key: ID
+ secretAccessKey:
+ name: backup-storage-creds
+ key: KEY
+ wal:
+ compression: gzip
+ encryption: AES256
+ maxParallel: 8
+```
+
+!!! Important
+ Make sure to correctly configure the WAL section according to the source cluster.
+ In the above example, since the `pgdgroup-example` PGDGroup uses `compression`
+ and `encryption`, make sure to set the proper parameters also in the PGDGroup
+ that's being created by the `restore`.
+
+!!! Note
+ In the above example we are taking advantage of the parallel WAL restore feature,
+ dedicating up to 8 jobs to concurrently fetch the required WAL files from the archive.
+ This feature can appreciably reduce the recovery time. Make sure that you plan ahead
+ for this scenario and tune the value of this parameter for your environment.
+ It will certainly make a difference when you'll need it.
+
+## Point in time recovery (PITR) from an object store
+
+Instead of replaying all the WALs up to the latest one, we can ask PostgreSQL to stop replaying
+WALs at any given point in time, after having extracted a base backup.
+PostgreSQL uses this technique to achieve point-in-time recovery (PITR).
+The presence of a WAL archive is mandatory.
+
+The following example will define a time base target for the recovery:
+
+```yaml
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroup
+metadata:
+ name: pgdgroup-restore
+spec:
+ [...]
+ restore:
+ recoveryTarget:
+ targetTime: "2023-08-11 11:14:21.00000+02"
+ serverNames:
+ - pgdgroup-backup-1
+ - pgdgroup-backup-2
+ - pgdgroup-backup-3
+ barmanObjectStore:
+ destinationPath: ""
+ s3Credentials:
+ accessKeyId:
+ name: backup-storage-creds
+ key: ID
+ secretAccessKey:
+ name: backup-storage-creds
+ key: KEY
+ wal:
+ compression: gzip
+ encryption: AES256
+ maxParallel: 8
+```
+
+!!! Important
+ PITR requires you to specify a `targetTime` recovery target, by using the options described
+ in the "Recovery targets" section below. When you use `targetTime` or `targetLSN`, the operator
+ automatically selects the closest backup that was completed before that target. Otherwise, it
+ selects the last available backup in chronological order between the specified `serverNames`.
+
+## Recovery from an object store specifying a `backupID`
+
+The `.spec.restore.recoveryTarget.backupID` option allows you to specify a base backup from
+which to initiate the recovery process. By default, this value is empty.
+If you assign a value to it (in the form of a Barman backup ID), the operator will use that backup as base for the recovery.
+
+The following example recovers a new PGDGroup from a specific backupID of the
+`pgdgroup-backup-1` PGD Node:
+
+```yaml
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroup
+metadata:
+ name: pgdgroup-restore
+spec:
+ [...]
+ restore:
+ recoveryTarget:
+ backupID: 20230824T133000
+ serverNames:
+ - pgdgroup-backup-1
+ barmanObjectStore:
+ destinationPath: ""
+ s3Credentials:
+ accessKeyId:
+ name: backup-storage-creds
+ key: ID
+ secretAccessKey:
+ name: backup-storage-creds
+ key: KEY
+ wal:
+ compression: gzip
+ encryption: AES256
+ maxParallel: 8
+```
+
+!!! Important
+ When a `backupID` is specified, make sure to only define the related PGD Node
+ in the `serverNames` option, and avoid defining the other ones.
+
+!!! Note
+ Defining a specific `backupID` is especially needed when using one of the
+ following recovery targets: `targetName`, `targetXID`, and `targetImmediate`.
+ In such cases, it is important to specify `backupID`, unless you are OK with
+ the last available backup in the catalog.
+
+## Recovery targets
+
+Beyond PITR there are other recovery target criteria you can use.
+For more information on all the available Recovery Targets you can
+refer to the [PG4K Recovery targets](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/bootstrap/#point-in-time-recovery-pitr)
+documentation (end of paragraph).
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/0_6_rel_notes.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/0_6_rel_notes.mdx
deleted file mode 100644
index 4760fd1e2c1..00000000000
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/0_6_rel_notes.mdx
+++ /dev/null
@@ -1,9 +0,0 @@
----
-title: "EDB Postgres Distributed for Kubernetes 0.6 release notes"
-navTitle: "Preview version 0.6"
----
-
-| Type | Description |
-| ------- | -------------------------------------- |
-| Feature | This is the initial preview release. |
-
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
deleted file mode 100644
index cf3533088ad..00000000000
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
----
-title: EDB Postgres for Kubernetes Release notes
-navTitle: "Release notes"
-redirects:
-- ../release_notes
-navigation:
-- 0_6_rel_notes
----
-
-The EDB Postgres Distributed for Kubernetes documentation describes the major version of EDB Postgres Distributed for Kubernetes, including minor releases and patches. The release notes provide information on what is new in each release. For new functionality introduced in a minor or patch release, the content also indicates the release that introduced the feature.
-
-| Version | Release date |
-| -------------------------- | ------------ |
-| [0.6.0](0_6_rel_notes) | 15 May 2023 |
-
-
-
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/release_notes.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/release_notes.mdx
new file mode 100644
index 00000000000..7ac7a558a95
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/release_notes.mdx
@@ -0,0 +1,30 @@
+---
+title: 'Release notes'
+originalFilePath: 'src/release_notes.md'
+---
+
+History of user-visible changes for EDB Postgres Distributed for Kubernetes.
+
+## Version 1.0.0
+
+**Release date:** 29 August 2023
+
+This is the first major stable release of EDB Postgres Distributed for
+Kubernetes, a Kubernetes operator to deploy and manage
+EDB Postgres Distributed clusters.
+
+The operator implements the `PGDGroup` custom resource
+in the API group `pgd.k8s.enterprisedb.io`.
+This resource can be used to create and manage EDB Postgres Distributed clusters
+inside Kubernetes with capabilities including:
+
+- Deployment of EDB Postgres Distributed clusters with versions 5 and later
+- Additional self-healing capability on top of that of Postgres Distributed,
+ such as recovery and restart of failed nodes
+- Definition of the services to connect applications to the
+ write leader of each PGD group
+- Implementation of Raft subgroups
+- Support for Local Persistent Volumes with PVC templates
+- Reuse of Persistent Volumes storage in Pods
+- TLS connections and client certificate authentication
+- Continuous backup to an S3 compatible object store
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
index 32637e6e10f..df567876bab 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
@@ -10,9 +10,9 @@ originalFilePath: 'src/samples.md'
In this section, you can find some examples of configuration files to set up
your EDB Postgres Distributed Cluster in a Kubernetes environment.
-### Flexible 3 regions
-
-**[flexible_3regions.yaml](../samples/flexible_3regions.yaml)** a PGD cluster with 2 Data Groups and a global witness node spread across 3
+Flexible 3 regions
+: [`flexible_3regions.yaml`](../samples/flexible_3regions.yaml):
+ a PGD cluster with 2 Data Groups and a global witness node spread across 3
regions, where each Data Groups consists of 2 data nodes and a local witness
node.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx
new file mode 100644
index 00000000000..36c31daec82
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx
@@ -0,0 +1,250 @@
+---
+title: 'Security'
+originalFilePath: 'src/security.md'
+---
+
+This section contains information about security for EDB Postgres Distributed for Kubernetes,
+that are analyzed at 3 different layers: Code, Container and Cluster.
+
+!!! Warning
+ The information contained in this page must not exonerate you from
+ performing regular InfoSec duties on your Kubernetes cluster. Please
+ familiarize yourself with the ["Overview of Cloud Native Security"](https://kubernetes.io/docs/concepts/security/overview/)
+ page from the Kubernetes documentation.
+
+!!! Seealso "About the 4C's Security Model"
+ Please refer to ["The 4C's Security Model in Kubernetes"](https://www.enterprisedb.com/blog/4cs-security-model-kubernetes)
+ blog article to get a better understanding and context of the approach EDB
+ has taken with security in EDB Postgres Distributed for Kubernetes.
+
+## Code
+
+Source code of EDB Postgres Distributed for Kubernetes is *systematically scanned* for static analysis purposes,
+including **security problems**, using a popular open-source linter for Go called
+[GolangCI-Lint](https://github.com/golangci/golangci-lint) directly in the CI/CD pipeline.
+GolangCI-Lint can run several *linters* on the same source code.
+
+One of these is [Golang Security Checker](https://github.com/securego/gosec), or simply `gosec`,
+a linter that scans the abstract syntactic tree of the source against a set of rules aimed at
+the discovery of well-known vulnerabilities, threats, and weaknesses hidden in
+the code such as hard-coded credentials, integer overflows and SQL injections - to name a few.
+
+!!! Important
+ A failure in the static code analysis phase of the CI/CD pipeline is a blocker
+ for the entire delivery of EDB Postgres Distributed for Kubernetes, meaning that each commit is validated
+ against all the linters defined by GolangCI-Lint.
+
+## Container
+
+Every container image that is part of EDB Postgres Distributed for Kubernetes is automatically built via CI/CD pipelines following every commit.
+Such images include not only the operator's, but also the operands' - specifically every supported PostgreSQL version.
+Within the pipelines, images are scanned with:
+
+- [Dockle](https://github.com/goodwithtech/dockle): for best practices in terms
+ of the container build process
+- [Clair](https://github.com/quay/clair): for vulnerabilities found in both the
+ underlying operating system and libraries and applications that they run
+
+!!! Important
+ All operand images are automatically rebuilt once a day by our pipelines in case
+ of security updates at the base image and package level, providing **patch level updates**
+ for the container images that EDB distributes.
+
+The following guidelines and frameworks have been taken into account for container-level security:
+
+- the ["Container Image Creation and Deployment Guide"](https://dl.dod.cyber.mil/wp-content/uploads/devsecops/pdf/DevSecOps_Enterprise_Container_Image_Creation_and_Deployment_Guide_2.6-Public-Release.pdf),
+ developed by the Defense Information Systems Agency (DISA) of the United States Department of Defense (DoD)
+- the ["CIS Benchmark for Docker"](https://www.cisecurity.org/benchmark/docker/),
+ developed by the Center for Internet Security (CIS)
+
+!!! Seealso "About the Container level security"
+ Please refer to ["Security and Containers in EDB Postgres Distributed for Kubernetes"](https://www.enterprisedb.com/blog/security-and-containers-cloud-native-postgresql)
+ blog article for more information about the approach that EDB has taken on
+ security at the container level in EDB Postgres Distributed for Kubernetes.
+
+## Cluster
+
+Security at the cluster level takes into account all Kubernetes components that
+form both the control plane and the nodes, as well as the applications that run in
+the cluster (PostgreSQL included).
+
+### Role Based Access Control (RBAC)
+
+The operator interacts with the Kubernetes API server with a dedicated service
+account called `pgd-operator-controller-manager`. In Kubernetes this is installed
+by default in the `pgd-operator-system` namespace, with a cluster role
+binding between this service account and the `pgd-operator-controller-manager`
+cluster role which defines the set of rules/resources/verbs granted to the operator.
+
+RedHat OpenShift directly manage the operator RBAC entities via [Operator
+Lifecycle
+Manager](https://docs.openshift.com/container-platform/4.13/operators/understanding/olm/olm-understanding-olm.html),
+allowing the user to grant permissions only where they are required,
+implementing the principle of least privilege.
+
+!!! Important
+ The above permissions are exclusively reserved for the operator's service
+ account to interact with the Kubernetes API server. They are not directly
+ accessible by the users of the operator that interact only with `PGDGroup`
+ and `PGDGroupCleanup` resources.
+
+Below we provide some examples and, most importantly, the reasons why
+EDB Postgres Distributed for Kubernetes requires full or partial management of standard Kubernetes
+namespaced resources.
+
+`jobs`
+: The operator needs to handle jobs to manage different `PGDGroup`'s phases.
+
+`poddisruptionbudgets`
+: The operator uses pod disruption budgets to make sure enough PGD Nodes
+ are kept active during maintenance operations.
+
+`pods`
+: The operator needs to manage PGD Nodes (as a `Cluster` resource).
+
+`secrets`
+: Unless you provide certificates and passwords to your data nodes,
+ the operator adopts the "convention over configuration" paradigm by
+ self-provisioning random generated passwords and TLS certificates, and by
+ storing them in secrets.
+
+`serviceaccounts`
+: The operator needs to create a service account to
+ enable the PGDGroup recovery job to retrieve the backup objects from
+ the object store where they reside.
+
+`services`
+: The operator needs to control network access to the PGD cluster
+ from applications, and properly manage
+ failover/switchover operations in an automated way.
+
+`statefulsets`
+: The operator needs to manage PGD Proxies.
+
+`validatingwebhookconfigurations` and `mutatingwebhookconfigurations`
+: The operator injects its self-signed webhook CA into both webhook
+ configurations, which are needed to validate and mutate all the resources it
+ manages. For more details, please see the
+ [Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/).
+
+To see all the permissions required by the operator, you can run `kubectl
+describe clusterrole pgd-operator-manager-role`.
+
+PG4K-PGD internally manages the PGD nodes using the `Cluster` resource as defined by EDB Postgres
+for Kubernetes (PG4K). We refer you to the
+[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/security/)
+for the list of permissions used by PG4K operator service account.
+
+### Calls to the API server made by the instance manager
+
+The instance manager, which is the entry point of the operand container, needs
+to make some calls to the Kubernetes API server to ensure that the status of
+some resources is correctly updated and to access the config maps and secrets
+that are associated with that Postgres cluster. Such calls are performed through
+a dedicated `ServiceAccount` created by the operator that shares the same
+PostgreSQL `Cluster` resource name.
+
+!!! Important
+ The operand can only access a specific and limited subset of resources
+ through the API server. A service account is the
+ [recommended way to access the API server from within a Pod](https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/).
+
+We refer you to the
+[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/security/)
+for additional depth on the instance manager.
+
+### Pod Security Policies
+
+A [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/)
+is the Kubernetes way to define security rules and specifications that a pod needs to meet
+to run in a cluster.
+For InfoSec reasons, every Kubernetes platform should implement them.
+
+EDB Postgres Distributed for Kubernetes does not require *privileged* mode for containers execution.
+The PostgreSQL containers run as `postgres` system user. No component whatsoever requires running as `root`.
+
+Likewise, Volumes access does not require *privileges* mode or `root` privileges either.
+Proper permissions must be properly assigned by the Kubernetes platform and/or administrators.
+The PostgreSQL containers run with a read-only root filesystem (i.e. no writable layer).
+
+The operator explicitly sets the required security contexts.
+
+On Red Hat OpenShift, Cloud Native PostgreSQL runs in `restricted` security context constraint,
+the most restrictive one. The goal is to limit the execution of a pod to a namespace allocated UID
+and SELinux context.
+
+!!! Seealso "Security Context Constraints in OpenShift"
+ For further information on Security Context Constraints (SCC) in
+ OpenShift, please refer to the
+ ["Managing SCC in OpenShift"](https://www.openshift.com/blog/managing-sccs-in-openshift)
+ article.
+
+!!! Warning "Security Context Constraints and namespaces"
+ As stated by [Openshift documentation](https://docs.openshift.com/container-platform/latest/authentication/managing-security-context-constraints.html#role-based-access-to-ssc_configuring-internal-oauth)
+ SCCs are not applied in the default namespaces (`default`, `kube-system`,
+ `kube-public`, `openshift-node`, `openshift-infra`, `openshift`) and those
+ should not be used to run pods. CNP clusters deployed in those namespaces
+ will be unable to start due to missing SCCs.
+
+
+
+#### Exposed Ports
+
+EDB Postgres Distributed for Kubernetes exposes ports at operator, instance manager and operand
+levels, as listed in the table below:
+
+| System | Port number | Exposing | Name | Certificates | Authentication |
+| :--------------- | :---------- | :------------------ | :--------------- | :----------- | :------------- |
+| operator | 9443 | webhook server | `webhook-server` | TLS | Yes |
+| operator | 8080 | metrics | `metrics` | no TLS | No |
+| instance manager | 9187 | metrics | `metrics` | no TLS | No |
+| instance manager | 8000 | status | `status` | no TLS | No |
+| operand | 5432 | PostgreSQL instance | `postgresql` | optional TLS | Yes |
+
+### PGD
+
+The current implementation of EDB Postgres Distributed for Kubernetes automatically creates
+passwords for the `postgres` superuser and the database owner.
+
+As far as encryption of password is concerned, EDB Postgres Distributed for Kubernetes follows
+the default behavior of PostgreSQL: starting from PostgreSQL 14,
+`password_encryption` is by default set to `scram-sha-256`, while on earlier
+versions it is set to `md5`.
+
+!!! Important
+ Please refer to the ["Connection DSNs and SSL"](https://www.enterprisedb.com/docs/pgd/latest/nodes/#connection-dsns-and-ssl-tls)
+ section in the PGD documentation for details.
+
+You can disable management of the `postgres` user password via secrets by setting
+`enableSuperuserAccess` to `false` in the `cnp` section of the spec.
+
+!!! Note
+ The operator supports toggling the `enableSuperuserAccess` option. When you
+ disable it on a running cluster, the operator will ignore the content of the secret,
+ remove it (if previously generated by the operator) and set the password of the
+ `postgres` user to `NULL` (de facto disabling remote access through password authentication).
+
+### Storage
+
+EDB Postgres Distributed for Kubernetes delegates encryption at rest to the underlying storage class. For
+data protection in production environments, we highly recommend that you choose
+a storage class that supports encryption at rest.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
new file mode 100644
index 00000000000..ee42e6afa42
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
@@ -0,0 +1,17 @@
+---
+title: 'Client TLS/SSL Connections'
+originalFilePath: 'src/ssl_connections.md'
+---
+
+!!! Seealso "Certificates"
+ Please refer to the ["Certificates"](certificates.md)
+ page for more details on how EDB Postgres Distributed for Kubernetes supports TLS certificates.
+
+The EDB Postgres Distributed for Kubernetes operator has been designed to work with TLS/SSL for both encryption in transit and
+authentication, on server and client sides. PGD nodes are created as Cluster
+resources using the EDB Postgres for Kubernetes (PG4K) operator, and this
+includes the deployment of a Certification
+Authority (CA) to create and sign TLS client certificates.
+
+Please refer to the [EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/ssl_connections/)
+for further information on issuers and certificates.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
new file mode 100644
index 00000000000..0ec06ddba2a
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
@@ -0,0 +1,44 @@
+---
+title: 'Use cases'
+originalFilePath: 'src/use_cases.md'
+---
+
+EDB Postgres Distributed for Kubernetes has been designed to work with applications
+that reside in the same Kubernetes cluster, for a full cloud native
+experience.
+
+However, it might happen that, while the database can be hosted
+inside a Kubernetes cluster, applications cannot be containerized
+at the same time and need to run in a *traditional environment* such
+as a VM.
+
+We reproduce here a summary of the basic considerations, and refer
+you to the
+[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/use_cases/)
+for further depth.
+
+## Case 1: Applications inside Kubernetes
+
+In a typical situation, the application and the database run in the same
+namespace inside a Kubernetes cluster.
+
+![Application and Database inside Kubernetes](./images/apps-in-k8s.png)
+
+The application, normally stateless, is managed as a standard `Deployment`,
+with multiple replicas spread over different Kubernetes node, and internally
+exposed through a `ClusterIP` service.
+
+The service is exposed externally to the end user through an `Ingress` and the
+provider's load balancer facility, via HTTPS.
+
+## Case 2: Applications outside Kubernetes
+
+Another possible use case is to manage your Postgres Distributed database inside
+Kubernetes, while having your applications outside of it (for example in a
+virtualized environment). In this case, Postgres Distributed is represented by an IP
+address (or host name) and a TCP port, corresponding to the defined Ingress
+resource in Kubernetes.
+
+The application can still benefit from a TLS connection to Postgres Distributed.
+
+![Application outside Kubernetes](./images/apps-outside-k8s.png)
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
new file mode 100644
index 00000000000..0a5689af355
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
@@ -0,0 +1,171 @@
+---
+title: 'Managing EDB Postgres Distributed databases'
+originalFilePath: 'src/using_pgd.md'
+---
+
+As described in the [architecture document](architecture.md),
+EDB Postgres Distributed for Kubernetes is an operator created to deploy
+Postgres Distributed (PGD) databases.
+It provides an alternative over deployment with TPA, and by leveraging the
+Kubernetes ecosystem, it can offer self-healing and declarative control.
+The operator is also responsible of the backup and restore operations
+(see the [backup](backup.md) document.)
+
+However, many of the operations and control of PGD clusters are not
+managed by the operator.
+The pods created by EDB Postgres Distributed for Kubernetes come with
+[PGD CLI](https://www.enterprisedb.com/docs/pgd/latest/cli/) installed, and
+this is the tool that can be used, for example, to execute a switchover.
+
+## PGD CLI
+
+!!! Warning
+ The PGD CLI should not be used to create/delete resources. For example,
+ the `create-proxy`, `delete-proxy` commands should be avoided.
+ Provisioning of resources is under the control of the operator, and manual
+ creation/deletion is not supported.
+
+As an example, let's execute a switchover command.
+
+It is recommendable to use the PGD CLI from proxy pods. Let's find them.
+You can get a pod listing for your cluster:
+
+```shell
+kubectl get pods -n my-namespace
+
+NAME READY STATUS RESTARTS AGE
+location-a-1-1 1/1 Running 0 2h
+location-a-2-1 1/1 Running 0 2h
+location-a-3-1 1/1 Running 0 2h
+location-a-proxy-0 1/1 Running 0 2h
+location-a-proxy-1 1/1 Running 0 2h
+```
+
+The proxy nodes have `proxy` in the name. Let's choose one and get a command
+prompt in it:
+
+```shell
+kubectl exec -n my-namespace -ti location-a-proxy-0 -- bash
+```
+
+You should now have a bash session open with the proxy pod. The `pgd` command
+is available:
+
+```shell
+pgd
+
+Available Commands:
+ check-health Checks the health of the EDB Postgres Distributed cluster.
+ <- snipped ->
+ switchover Switches over to new write leader.
+ <- snipped ->
+```
+
+You can easily move your way through getting the information needed for the
+switchover:
+
+```shell
+pgd switchover --help
+
+ $ pgd switchover --group-name group_a --node-name bdr-a1
+ switchover is complete
+```
+
+```shell
+pgd show-groups
+
+Group Group ID Type Parent Group Location Raft Routing Write Leader
+----- -------- ---- ------------ -------- ---- ------- ------------
+world 3239291720 global true true location-a-2
+location-a 2135079751 data world true true location-a-1
+```
+
+```shell
+pgd show-nodes
+Node Node ID Group Type Current State Target State Status Seq ID
+---- ------- ----- ---- ------------- ------------ ------ ------
+location-a-1 3165289849 location-a data ACTIVE ACTIVE Up 1
+location-a-2 3266498453 location-a data ACTIVE ACTIVE Up 2
+location-a-3 1403922770 location-a data ACTIVE ACTIVE Up 3
+```
+
+## Accessing the database
+
+In the [use cases document](use_cases.md) you can find a discussion on using the
+database within the Kubernetes cluster vs. from outside, and in the
+[connectivity document](connectivity.md), you can find a discussion on services,
+which is relevant for accessing the database from applications.
+
+However you implement your system, your applications should use the proxy
+service to connect, in order to reap the benefits of Postgres Distributed, and
+of the increased self-healing capabilities added by the EDB Postgres Distributed
+for Kubernetes operator.
+
+!!! Important
+ Note that, as per the EDB Postgres for Kubernetes defaults, data nodes are
+ created with a database called `app`, owned by a user named `app`, in
+ contrast to the `bdrdb` database you'll find in the EDB Postgres
+ Distributed documentation. This
+ is configurable by the user, in the `cnp` section of the manifest.
+ See the [EDB Postgres for Kubernetes bootstrapping document](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/bootstrap/)
+ for reference.
+
+You may, however, want access to your PGD data nodes for administrative tasks,
+using the `psql` CLI.
+
+As we did in the previous section on using the PGD CLI, we can get a pod listing
+for our PGD cluster, and `kubectl exec` into a data node:
+
+```shell
+kubectl exec -n my-namespace -ti location-a-1-1 -- psql
+```
+
+In the familiar territory of `psql`, you should remember that the default
+created database is named `app` (see warning above).
+
+```terminal
+postgres=# \c app
+You are now connected to database "app" as user "postgres".
+app=# \x
+Expanded display is on.
+app=# select * from bdr.node_summary;
+-[ RECORD 1 ]---------------------------------------
+node_name | location-a-1
+node_group_name | location-a
+interface_connstr | host=location-a-1-node user=streaming_replica sslmode=verify-ca port=5432 sslkey=/controller/certificates/streaming_replica.key sslcert=/controller/certificates/streaming_replica.crt sslrootcert=/controller/certificates/server-ca.crt application_name=location-a-1 dbname=app
+peer_state_name | ACTIVE
+peer_target_state_name | ACTIVE
+
+<- snipped ->
+```
+
+For your applications, of course, you should use the non-privileged role (`app`
+by default).
+
+You will need the user credentials, which are stored in a Kubernetes secret:
+
+```shell
+kubectl get secrets
+
+NAME TYPE DATA AGE
+<- snipped ->
+location-a-app kubernetes.io/basic-auth 2 2h
+```
+
+This secret contains the username and password needed for the postgres DSN,
+encoded in base64:
+
+```shell
+kubectl get secrets location-a-app -o yaml
+
+apiVersion: v1
+data:
+ password:
+ username:
+kind: Secret
+metadata:
+ creationTimestamp:
+ labels:
+
+<- snipped ->
+```
\ No newline at end of file
From a4a0ef11d307b92640c3c0dca58df94de9651f6a Mon Sep 17 00:00:00 2001
From: kelpoole <44814688+kelpoole@users.noreply.github.com>
Date: Thu, 7 Sep 2023 10:14:33 -0700
Subject: [PATCH 02/39] Update index.mdx: - add headings - updated architecture
to remove s to match for index
---
.../1/index.mdx | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
index d0d657c65ab..80d217739a9 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
@@ -4,17 +4,19 @@ originalFilePath: src/index.md
indexCards: none
navigation:
- release_notes
+ - "#Getting Started"
- before_you_start
- - quickstart
- - installation_upgrade
- - architecture
- - certificates
- use_cases
+ - architecture
+ - installation_upgrade
+ - quickstart
+ - "#Using"
- using_pgd
- - ssl_connections
- security
- backup
- recovery
+ - certificates
+ - ssl_connections
- connectivity
- private_registries
- openshift
@@ -27,7 +29,7 @@ directoryDefaults:
---
**EDB Postgres Distributed for Kubernetes** (`pg4k-pgd`, or PG4K-PGD) is an
-operator designed to manage **EDB Postgres Distributed** v5 workloads on
+operator designed to manage **EDB Postgres Distributed** workloads on
Kubernetes, with traffic routed by PGD Proxy.
The main custom resource that the operator provides is called `PGDGroup`.
From 088331688cdd687f206e4f1788ccdddddda585e6 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Fri, 8 Sep 2023 17:28:36 +0000
Subject: [PATCH 03/39] sync with 0.7.1; add to homepage
---
.../postgres_distributed_for_kubernetes/1/index.mdx | 11 ++++++-----
.../1/release_notes.mdx | 8 +++++++-
src/pages/index.js | 4 ++++
3 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
index 80d217739a9..1c66ebfe6aa 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
@@ -4,13 +4,13 @@ originalFilePath: src/index.md
indexCards: none
navigation:
- release_notes
- - "#Getting Started"
+ - '#Getting Started'
- before_you_start
- use_cases
- architecture
- installation_upgrade
- quickstart
- - "#Using"
+ - '#Using'
- using_pgd
- security
- backup
@@ -25,7 +25,8 @@ navigation:
- '!api_reference.md.in'
directoryDefaults:
iconName: logos/KubernetesMono
-
+ hideVersion: true
+ displayBanner: Preview release v0.7.1
---
**EDB Postgres Distributed for Kubernetes** (`pg4k-pgd`, or PG4K-PGD) is an
@@ -76,8 +77,8 @@ clusters hosting the distributed PGD cluster have been prepared by you to cater
EDB Postgres Distributed for Kubernetes also requires Cert Manager 1.10 or higher.
-!!! SeeAlso See also
- Please refer to ["Connectivity" section](connectivity.md) for more information.
+!!! Seealso "About connectivity"
+ Please refer to the ["Connectivity" section](connectivity.md) for more information.
{{ range $ -}}
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx
index 75bba2b67c3..ae63da33e12 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx
@@ -1,16 +1,14 @@
---
-title: 'API Reference'
+title: 'API reference'
originalFilePath: 'src/api_reference.md'
---
-EDB Postgres Distributed for Kubernetes extends the Kubernetes API defining the
-custom resources you find below.
+EDB Postgres Distributed for Kubernetes extends the Kubernetes API by defining the
+custom resources that follow.
All the resources are defined in the `pgd.k8s.enterprisedb.io/v1beta1`
API.
-Below you will find a description of the defined resources:
-
- [Backup](#Backup)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
index bbd7e477926..afb3bb0f2c0 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
@@ -3,150 +3,147 @@ title: 'Architecture'
originalFilePath: 'src/architecture.md'
---
-This section covers the main architectural aspects you need to consider
-when deploying EDB Postgres Distributed in Kubernetes (PG4K-PGD).
+Consider these main architectural aspects
+when deploying EDB Postgres Distributed in Kubernetes.
-PG4K-PGD is a
+EDB Postgres Distributed for Kubernetes is a
[Kubernetes operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
designed to deploy and manage EDB Postgres Distributed clusters
running in private, public, hybrid, or multi-cloud environments.
## Relationship with EDB Postgres Distributed
-[EDB Postgres Distributed (PGD)](https://www.enterprisedb.com/docs/pgd/latest/)
+[EDB Postgres Distributed (PGD)](/pgd/latest/)
is a multi-master implementation of Postgres designed for high performance and
availability.
PGD generally requires deployment using
-[*Trusted Postgres Architect*, (TPA)](https://www.enterprisedb.com/docs/pgd/latest/tpa/),
-a tool that uses [Ansible](https://www.ansible.com) for provisioning and
-deployment of PGD clusters.
+[Trusted Postgres Architect (TPA)](/pgd/latest/tpa/),
+a tool that uses [Ansible](https://www.ansible.com) to provision and
+deploy PGD clusters.
-PG4K-PGD offers a different way of deploying PGD clusters, leveraging containers
-and Kubernetes, with the added advantages that the resulting architecture is
-self-healing and robust, managed through declarative configuration, and that it
-takes advantage of the vast and growing Kubernetes ecosystem.
+EDB Postgres Distributed for Kubernetes offers a different way of deploying PGD clusters, leveraging containers
+and Kubernetes. The advantages are that the resulting architecture:
+
+- Is self-healing and robust.
+- Is managed through declarative configuration.
+- Takes advantage of the vast and growing Kubernetes ecosystem.
## Relationship with EDB Postgres for Kubernetes
-A PGD cluster consists of one or more *PGD Groups*, each having one or more *PGD
-Nodes*. A PGD node is a Postgres database. PG4K-PGD internally
+A PGD cluster consists of one or more *PGD groups*, each having one or more *PGD
+nodes*. A PGD node is a Postgres database. EDB Postgres Distributed for Kubernetes internally
manages each PGD node using the `Cluster` resource as defined by EDB Postgres
-for Kubernetes (PG4K), specifically a `Cluster` with a single instance (i.e. no
+for Kubernetes, specifically a cluster with a single instance (that is, no
replicas).
-The single PostgreSQL instance created by each `Cluster` can be configured
-declaratively via the
+You can configure the single PostgreSQL instance created by each cluster
+declaratively using the
[`.spec.cnp` section](api_reference.md#CnpConfiguration)
-of the PGD Group spec.
+of the PGD group spec.
-In PG4K-PGD, as in PG4K, the underlying database implementation is responsible
-for data replication. However, it is important to note that *failover* and
-*switchover* work differently, entailing Raft election and the nomination of new
-write leaders. PG4K only handles the deployment and healing of data nodes.
+In EDB Postgres Distributed for Kubernetes, as in EDB Postgres for Kubernetes, the underlying database implementation is responsible
+for data replication. However, it's important to note that failover and
+switchover work differently, entailing Raft election and nominating new
+write leaders. EDB Postgres for Kubernetes handles only the deployment and healing of data nodes.
-## Managing PGD using PG4K-PGD
+## Managing PGD using EDB Postgres Distributed for Kubernetes
-The PG4K-PGD operator can manage the complete lifecycle of PGD clusters. As
-such, in addition to PGD Nodes (represented as single-instance `Clusters`), it
+The EDB Postgres Distributed for Kubernetes operator can manage the complete lifecycle of PGD clusters. As
+such, in addition to PGD nodes (represented as single-instance clusters), it
needs to manage other objects associated with PGD.
PGD relies on the Raft algorithm for distributed consensus to manage node
-metadata, specifically agreement on a *write leader*. Consensus among data
+metadata, specifically agreement on a write leader. Consensus among data
nodes is also required for operations such as generating new global sequences
or performing distributed DDL.
These considerations force additional actors in PGD above database nodes.
-PG4K-PGD manages the following:
+EDB Postgres Distributed for Kubernetes manages the following:
-- Data nodes: as mentioned previously, a node is a database, and is managed
- via PG4K, creating a `Cluster` with a single instance.
+- Data nodes. A node is a database and is managed
+ by EDB Postgres for Kubernetes, creating a cluster with a single instance.
- [Witness nodes](https://www.enterprisedb.com/docs/pgd/latest/nodes/#witness-nodes)
- are basic database instances that do not participate in data
- replication; their function is to guarantee that consensus is possible in
- groups with an even number of data nodes, or after network partitions. Witness
+ are basic database instances that don't participate in data
+ replication. Their function is to guarantee that consensus is possible in
+ groups with an even number of data nodes or after network partitions. Witness
nodes are also managed using a single-instance `Cluster` resource.
-- [PGD Proxies](https://www.enterprisedb.com/docs/pgd/latest/routing/proxy/):
+- [PGD proxies](https://www.enterprisedb.com/docs/pgd/latest/routing/proxy/)
act as Postgres proxies with knowledge of the write leader. PGD proxies need
information from Raft to route writes to the current write leader.
### Proxies and routing
PGD groups assume full mesh connectivity of PGD nodes. Each node must be able to
-connect to every other node, using the appropriate connection string (a
-`libpq`-style DSN). Write operations don't need to be sent to every node. PGD
-will take care of replicating data after it's committed to one node.
-
-For performance, it is often recommendable to send write operations mostly to a
-single node, the *write leader*. Raft is used to identify which node is the
-write leader, and to hold metadata about the PGD nodes. PGD Proxies are used to
-transparently route writes to write leaders, and to quickly pivot to the new
+connect to every other node using the appropriate connection string (a
+`libpq`-style DSN). Write operations don't need to be sent to every node. PGD
+takes care of replicating data after it's committed to one node.
+
+For performance, we often recommend sending write operations mostly to a
+single node: the write leader. Raft identifies the node that's the
+write leader and holds metadata about the PGD nodes. PGD proxies
+transparently route writes to write leaders and can quickly pivot to the new
write leader in case of switchover or failover.
-It is possible to configure *Raft subgroups*, each of which can maintain a
-separate write leader. In PG4K-PGD, a PGD Group containing a PGD Proxy
-automatically comprises a Raft subgroup.
+It's possible to configure *Raft subgroups*, each of which can maintain a
+separate write leader. In EDB Postgres Distributed for Kubernetes, a PGD group containing a PGD proxy
+comprises a Raft subgroup.
-There are two kinds of routing available with PGD Proxies:
+Two kinds of routing are available with PGD proxies:
-- Global routing uses the top-level Raft group, and maintains one global write
+- Global routing uses the top-level Raft group and maintains one global write
leader.
- Local routing uses subgroups to maintain separate write leaders. Local
routing is often used to achieve geographical separation of writes.
-In PG4K-PGD, local routing is used by default, and a configuration option is
+In EDB Postgres Distributed for Kubernetes, local routing is used by default, and a configuration option is
available to select global routing.
-You can find more information in the
-[PGD documentation of routing with Raft](https://www.enterprisedb.com/docs/pgd/latest/routing/raft/).
+For more information, see
+[Proxies, Raft, and Raft subgroups](/pgd/latest/routing/raft/) in the PGD documentation.
-### PGD Architectures and High Availability
+### PGD architectures and high availability
-EDB proposes several recommended architectures to make good use of PGD's
-distributed multi-master capabilities and to offer high availability.
+To make good use of PGD's
+distributed multi-master capabilities and to offer high availability,
+we recommend several architectures.
The Always On architectures are built from either one group in a single location
or two groups in two separate locations.
-Please refer to the
-[PGD architecture document](https://www.enterprisedb.com/docs/pgd/latest/architectures/)
-for further information.
+See [Choosing your architecture](/pgd/latest/architectures/) in the PGD documentation
+for more information.
## Deploying PGD on Kubernetes
-PG4K-PGD leverages Kubernetes to deploy and manage PGD clusters. As such, some
+EDB Postgres Distributed for Kubernetes leverages Kubernetes to deploy and manage PGD clusters. As such, some
adaptations are necessary to translate PGD into the Kubernetes ecosystem.
### Images and operands
-PGD can be configured to run one of three Postgres distributions. Please refer
-to the
-[PGD documentation](https://www.enterprisedb.com/docs/pgd/latest/choosing_server/)
-to understand the features of each distribution.
+PGD can be configured to run one of three Postgres distributions. See
+[Choosing a Postgres distribution](/pgd/latest/choosing_server/)
+in the PGD documentation to understand the features of each distribution.
To function in Kubernetes, containers are provided for each Postgres
distribution. These are the *operands*.
In addition, the operator images are kept in those same repositories.
-Please refer to [the document on registries](private_registries.md)
+See [EDB private image registries](private_registries.md)
for details on accessing the images.
### Kubernetes architecture
-We reproduce some of the points of the
-[PG4K document on Kubernetes architecture](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/architecture/),
-to which we refer you for further depth.
-
Kubernetes natively provides the possibility to span separate physical locations
-–also known as data centers, failure zones, or more frequently **availability
-zones**– connected to each other via redundant, low-latency, private network
-connectivity.
+connected to each other by way of redundant, low-latency, private network
+connectivity. These physical locations are also known as data centers, failure zones, or,
+more frequently, *availability zones*.
Being a distributed system, the recommended minimum number of availability zones
-for a **Kubernetes cluster** is three (3), in order to make the control plane
+for a Kubernetes cluster is three to make the control plane
resilient to the failure of a single zone. This means that each data center is
active at any time and can run workloads simultaneously.
-PG4K-PGD can be installed within a
+EDB Postgres Distributed for Kubernetes can be installed in a
[single Kubernetes cluster](#single-kubernetes-cluster)
or across
[multiple Kubernetes clusters](#multiple-kubernetes-clusters).
@@ -154,31 +151,31 @@ or across
### Single Kubernetes cluster
A multi-availability-zone Kubernetes architecture is typical of Kubernetes
-services managed by Cloud Providers. Such an architecture enables the PG4K-PGD
-and the PG4K operators to schedule workloads and nodes across availability
-zones, considering all zones active:
+services managed by cloud providers. Such an architecture enables the EDB Postgres Distributed for Kubernetes
+and the EDB Postgres for Kubernetes operators to schedule workloads and nodes across availability
+zones, considering all zones active.
![Kubernetes cluster spanning over 3 independent data centers](./images/k8s-architecture-3-az.png)
PGD clusters can be deployed in a single Kubernetes cluster and take advantage
-of Kubernetes availability zones to enable High Availability architectures,
+of Kubernetes availability zones to enable high-availability architectures,
including the Always On recommended architectures.
-The *Always On Single Location* architecture shown in the
-[PGD Architecture document](https://www.enterprisedb.com/docs/pgd/latest/architectures/):
-![Always On Single Region](./images/always_on_1x3_updated.png)
+You can realize the Always On, single-location architecture shown in
+[Choosing your architecture](/pgd/latest/architectures/) in the PGD documentation on
+a single Kubernetes cluster with three availability zones.
-can be realized on single kubernetes cluster with 3 availability zones.
+![Always On Single Region](./images/always_on_1x3_updated.png)
-The PG4K-PGD operator can control the *scheduling* of pods (i.e. which pods go
-to which data center) using affinity, tolerations and node selectors, as is the
-case with PG4K. Individual scheduling controls are available for proxies as well
+The EDB Postgres Distributed for Kubernetes operator can control the scheduling of pods (that is, which pods go
+to which data center) using affinity, tolerations, and node selectors, as is the
+case with EDB Postgres for Kubernetes. Individual scheduling controls are available for proxies as well
as nodes.
-Please refer to the
+See the
[Kubernetes documentation on scheduling](https://kubernetes.io/docs/concepts/scheduling-eviction/),
-as well as the [PG4K documents](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/scheduling/)
-for further information.
+and [Scheduling](/postgres_for_kubernetes/latest/scheduling/) in the EDB Postgres for Kubernetes documentation
+for more information.
### Multiple Kubernetes clusters
@@ -187,7 +184,7 @@ reliably communicate with each other.
![Multiple Kubernetes clusters](./images/k8s-architecture-multi.png)
-[Always On multi-location PGD architectures](https://www.enterprisedb.com/docs/pgd/latest/architectures/)
+[Always On multi-location PGD architectures](/pgd/latest/architectures/)
can be realized on multiple Kubernetes clusters that meet the connectivity
requirements.
-More information can be found in the ["Connectivity"](connectivity.md) section.
\ No newline at end of file
+For more information, see [Connectivity](connectivity.md).
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
index be4b57ebf4b..45e2ab42101 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
@@ -3,34 +3,34 @@ title: 'Backup on object stores'
originalFilePath: 'src/backup.md'
---
-EDB Postgres Distributed for Kubernetes (PG4K-PGD) supports *online/hot backup* of
+EDB Postgres Distributed for Kubernetes supports *online/hot backup* of
PGD clusters through physical backup and WAL archiving on an object store.
This means that the database is always up (no downtime required) and that
-Point In Time Recovery is available.
+point-in-time recovery (PITR) is available.
## Common object stores
-Multiple object store are supported, such as `AWS S3`, `Microsoft Azure Blob Storage`,
-`Google Cloud Storage`, `MinIO Gateway`, or any S3 compatible provider.
-Given that PG4K-PGD configures the connection with object stores by relying on
-EDB Postgres for Kubernetes (PG4K), please refer to the [PG4K Cloud provider support](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#cloud-provider-support)
-documentation for additional depth.
+Multiple object stores are supported, such as AWS S3, Microsoft Azure Blob Storage,
+Google Cloud Storage, MinIO Gateway, or any S3-compatible provider.
+Given that EDB Postgres Distributed for Kubernetes configures the connection with object stores by relying on
+EDB Postgres for Kubernetes, see the [EDB Postgres for Kubernetes cloud provider support](/postgres_for_kubernetes/latest/backup_recovery/#cloud-provider-support)
+documentation for more information.
!!! Important
- In the PG4K documentation you'll find the Cloud Provider configuration section
- available at `spec.backup.barmanObjectStore`. Note that in PG4K-PGD examples, the object store section is found at a
+ The EDB Postgres for Kubernetes documentation's Cloud Provider configuration section is
+ available at `spec.backup.barmanObjectStore`. In EDB Postgres Distributed for Kubernetes examples, the object store section is at a
different path: `spec.backup.configuration.barmanObjectStore`.
## WAL archive
-WAL archiving is the process that sends `WAL files` to the object storage, and it's essential to
-execute *online/hot backups*, or Point in Time recovery (PITR).
-In PG4K-PGD, each PGD Node will be set up to archive WAL files in the object store independently.
+WAL archiving is the process that sends WAL files to the object storage, and it's essential to
+execute online/hot backups or PITR.
+In EDB Postgres Distributed for Kubernetes, each PGD node is set up to archive WAL files in the object store independently.
-The WAL archive is defined in the PGDGroup `spec.backup.configuration.barmanObjectStore` stanza,
+The WAL archive is defined in the PGD group `spec.backup.configuration.barmanObjectStore` stanza
and is enabled as soon as a destination path and cloud credentials are set.
-You can choose to compress WAL files before they are uploaded, and/or encrypt them.
-Parallel WAL archiving can also be enabled.
+You can choose to compress WAL files before uploading them and also or alternatively encrypt them.
+In adddition, you can enable parallel WAL archiving.
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -47,16 +47,16 @@ spec:
maxParallel: 8
```
-For further information, refer to the [PG4K WAL archiving](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#wal-archiving) documentation.
+For more information, see the [EDB Postgres for Kubernetes WAL archiving](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#wal-archiving) documentation.
## Scheduled backups
-Scheduled backups are the recommended way to configure your backup strategy in PG4K-PGD.
-When the PGDGroup `spec.backup.configuration.barmanObjectStore` stanza is configured, the operator will select one of the
-PGD data nodes as the elected "Backup Node", for which it will automatically create a `Scheduled Backup` resource.
+Scheduled backups are the recommended way to configure your backup strategy in EDB Postgres Distributed for Kubernetes.
+When the PGD group `spec.backup.configuration.barmanObjectStore` stanza is configured, the operator selects one of the
+PGD data nodes as the elected backup node for which it creates a `Scheduled Backup` resource.
The `.spec.backup.cron.schedule` field allows you to define a cron schedule specification, expressed
-in the [https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format]\(Go `cron` package format).
+in the [Go `cron` package format](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format).
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -71,35 +71,35 @@ spec:
immediate: true
```
-Scheduled Backups can be suspended if necessary by setting `.spec.backup.cron.suspend` to true. This will
-prevent any new backup from being scheduled while the option is set to true.
+You can suspend scheduled backups by setting `.spec.backup.cron.suspend` to `true`. This setting
+prevents any new backup from being scheduled.
-In case you want to execute a backup as soon as the ScheduledBackup resource is created
-you can set `.spec.backup.cron.immediate` to true.
+If you want to execute a backup as soon as the `ScheduledBackup` resource is created,
+you can set `.spec.backup.cron.immediate` to `true`.
-`.spec.backupOwnerReference` indicates which ownerReference should be used
+`.spec.backupOwnerReference` indicates the `ownerReference` to use
in the created backup resources. The choices are:
-- *none:* no owner reference for created backup objects
-- *self:* sets the Scheduled backup object as owner of the backup
-- *cluster:* sets the cluster as owner of the backup
+- `none` — No owner reference for created backup objects.
+- `self` — Sets the scheduled backup object as owner of the backup.
+- `cluster` — Sets the cluster as owner of the backup.
!!! Note
- The `PG4K` ScheduledBackup object contains an additional option named `cluster` to specify the
- Cluster to be backed up. This option is currently not supported by `PG4K-PGD`, and will be
+ The EDB Postgres for Kubernetes `ScheduledBackup` object contains the `cluster` option to specify the
+ cluster to back up. This option is currently not supported by EDB Postgres Distributed for Kubernetes and is
ignored if specified.
-In case an elected "Backup node" is deleted, the operator will transparently elect a new "Backup Node"
-and reconcile the Scheduled Backup resource accordingly.
+If an elected backup node is deleted, the operator transparently elects a new backup node
+and reconciles the `Scheduled Backup` resource accordingly.
## Retention policies
-PG4K-PGD can manage the automated deletion of backup files from the backup
-object store, using **retention policies** based on the recovery window.
-This process will also take care of removing unused WAL files and WALs associated with backups
+EDB Postgres Distributed for Kubernetes can manage the automated deletion of backup files from the backup
+object store using retention policies based on the recovery window.
+This process also takes care of removing unused WAL files and WALs associated with backups
that are scheduled for deletion.
-You can define your backups with a retention policy of 30 days as follows:
+You can define your backups with a retention policy of 30 days:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -111,34 +111,34 @@ spec:
retentionPolicy: "30d"
```
-For further information, refer to the [PG4K Retention policies](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#retention-policies) documentation.
+For more information, see the [EDB Postgres for Kubernetes retention policies](/postgres_for_kubernetes/latest/backup_recovery/#retention-policies) in the EDB Postgres for Kubernetes documentation.
!!! Important
- Currently, the retention policy will only be applied for the elected "Backup Node"
- backups and WAL files. Given that each other PGD node also archives its own WALs
- independently, it is your responsibility to manage the lifecycle of those WAL files,
- for example by leveraging the object storage data retention policy.
- Also, in case you have an object storage data retention policy set up on every PGD Node
+ Currently, the retention policy is applied only for the elected backup node
+ backups and WAL files. Given that every other PGD node also archives its own WALs
+ independently, it's your responsibility to manage the lifecycle of those WAL files,
+ for example by leveraging the object storage data-retention policy.
+ Also, in case you have an object storage data retention policy set up on every PGD node
directory, make sure it's not overlapping or interfering with the retention policy managed
by the operator.
## Compression algorithms
Backups and WAL files are uncompressed by default. However, multiple compression algorithms are
-supported. For more information, refer to the [PG4K Compression algorithms](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#compression-algorithms) documentation.
+supported. For more information, see the [EDB Postgres for Kubernetes compression algorithms](/postgres_for_kubernetes/latest/backup_recovery/#compression-algorithms) documentation.
## Tagging of backup objects
-It's possible to specify tags as key-value pairs for the backup objects, namely base backups, WAL files and history files.
-For more information, refer to the [PG4K document on Tagging of backup objects](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#tagging-of-backup-objects).
+It's possible to specify tags as key-value pairs for the backup objects, namely base backups, WAL files, and history files.
+For more information, see the EDB Postgres for Kubernetes documentation about [tagging of backup objects](/postgres_for_kubernetes/latest/backup_recovery/#tagging-of-backup-objects).
-## On-demand backups of a PGD Node
+## On-demand backups of a PGD node
-A PGD Node is represented as single-instance PG4K `Cluster` object.
+A PGD node is represented as single-instance EDB Postgres for Kubernetes `Cluster` object.
As such, in case of need, it's possible to request an on-demand backup
-of a specific PGD Node by creating a PG4K `Backup` resource.
-In order to do that, you can directly refer to the [PG4K On-demand backups](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#on-demand-backups) documentation.
+of a specific PGD node by creating a EDB Postgres for Kubernetes `Backup` resource.
+To do that, see [EDB Postgres for Kubernetes on-demand backups](/postgres_for_kubernetes/latest/backup_recovery/#on-demand-backups) in the EDB Postgres for Kubernetes documentation.
!!! Hint
- You can retrieve the list of PG4K Clusters that make up your PGDGroup
+ You can retrieve the list of EDB Postgres for Kubernetes clusters that make up your PGD group
by running: `kubectl get cluster -l k8s.pgd.enterprisedb.io/group=my-pgd-group -n my-namespace`
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
index cdb6cb725ab..87a07e5259a 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
@@ -1,16 +1,16 @@
---
-title: 'Before You Start'
+title: 'Before you start'
originalFilePath: 'src/before_you_start.md'
---
-Before we get started, it is essential to go over some terminology that is
+Before you get started, review the terminology that's
specific to Kubernetes and PGD.
## Kubernetes terminology
[Node](https://kubernetes.io/docs/concepts/architecture/nodes/)
: A *node* is a worker machine in Kubernetes, either virtual or physical, where
- all services necessary to run pods are managed by the control plane node(s).
+ all services necessary to run pods are managed by the control plane nodes.
[Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/)
: A *pod* is the smallest computing unit that can be deployed in a Kubernetes
@@ -19,31 +19,30 @@ specific to Kubernetes and PGD.
[Service](https://kubernetes.io/docs/concepts/services-networking/service/)
: A *service* is an abstraction that exposes as a network service an
- application that runs on a group of pods and standardizes important features
- such as service discovery across applications, load balancing, failover, and so
- on.
+ application that runs on a group of pods and standardizes important features,
+ such as service discovery across applications, load balancing, and failover.
[Secret](https://kubernetes.io/docs/concepts/configuration/secret/)
-: A *secret* is an object that is designed to store small amounts of sensitive
- data such as passwords, access keys, or tokens, and use them in pods.
+: A *secret* is an object that's designed to store small amounts of sensitive
+ data such as passwords, access keys, or tokens and use them in pods.
-[Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/)
+[Storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/)
: A *storage class* allows an administrator to define the classes of storage in
a cluster, including provisioner (such as AWS EBS), reclaim policies, mount
options, volume expansion, and so on.
-[Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
+[Persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
: A *persistent volume* (PV) is a resource in a Kubernetes cluster that
- represents storage that has been either manually provisioned by an
+ represents storage that was either manually provisioned by an
administrator or dynamically provisioned by a *storage class* controller. A PV
- is associated with a pod using a *persistent volume claim* and its lifecycle is
+ is associated with a pod using a *persistent volume claim*, and its lifecycle is
independent of any pod that uses it. Normally, a PV is a network volume,
especially in the public cloud. A [*local persistent volume*
(LPV)](https://kubernetes.io/docs/concepts/storage/volumes/#local) is a
persistent volume that exists only on the particular node where the pod that
uses it is running.
-[Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
+[Persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
: A *persistent volume claim* (PVC) represents a request for storage, which
might include size, access mode, or a particular storage class. Similar to how
a pod consumes node resources, a PVC consumes the resources of a PV.
@@ -55,7 +54,7 @@ specific to Kubernetes and PGD.
projects, departments, teams, and so on.
[RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
-: *Role Based Access Control* (RBAC), also known as *role-based security*, is a
+: *Role-based access control* (RBAC), also known as *role-based security*, is a
method used in computer systems security to restrict access to the network and
resources of a system to authorized users only. Kubernetes has a native API to
control roles at the namespace and cluster level and associate them with
@@ -63,7 +62,7 @@ specific to Kubernetes and PGD.
[CRD](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
: A *custom resource definition* (CRD) is an extension of the Kubernetes API
- and allows developers to create new data types and objects, *called custom
+ and allows developers to create new data types and objects, called *custom
resources*.
[Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
@@ -75,13 +74,13 @@ specific to Kubernetes and PGD.
[`kubectl`](https://kubernetes.io/docs/reference/kubectl/overview/)
: `kubectl` is the command-line tool used to manage a Kubernetes cluster.
-EDB Postgres Distributed for Kubernetes requires a Kubernetes version supported by the community. Please refer to the
-["Supported releases"](https://www.enterprisedb.com/resources/platform-compatibility#pgdk8s) page for details.
+EDB Postgres Distributed for Kubernetes requires a Kubernetes version supported by the community. See
+[Supported releases](https://www.enterprisedb.com/resources/platform-compatibility#pgdk8s) for details.
## PGD terminology
-Please refer to the
-[PGD terminology page for further information](https://www.enterprisedb.com/docs/pgd/latest/terminology/).
+For more information, see
+[Terminology](https://www.enterprisedb.com/docs/pgd/latest/terminology/) in the PGD documentation.
[Node](https://www.enterprisedb.com/docs/pgd/latest/terminology/#node)
: A PGD database instance.
@@ -93,22 +92,22 @@ Please refer to the
: A planned change in connection between the application and the active database node in a cluster, typically done for maintenance.
[Write leader](https://www.enterprisedb.com/docs/pgd/latest/terminology/#write-leader)
-: In always-on architectures, a node is selected as the correct connection endpoint for applications. This node is called the write leader. The write leader is selected by consensus of a quorum of proxy nodes.
+: In Always On architectures, a node is selected as the correct connection endpoint for applications. This node is called the *write leader*. The write leader is selected by consensus of a quorum of proxy nodes.
## Cloud terminology
Region
-: A *region* in the Cloud is an isolated and independent geographic area
+: A *region* in the cloud is an isolated and independent geographic area
organized in *availability zones*. Zones within a region have very little
round-trip network latency.
Zone
-: An *availability zone* in the Cloud (also known as *zone*) is an area in a
+: An *availability zone* in the cloud (also known as a *zone*) is an area in a
region where resources can be deployed. Usually, an availability zone
corresponds to a data center or an isolated building of the same data center.
## What to do next
-Now that you have familiarized with the terminology, you can decide to
-[test EDB Postgres Distributed for Kubernetes (PG4K-PGD) on your laptop using a local cluster](quickstart.md) before
+Now that you have familiarized with the terminology, you can
+[test EDB Postgres Distributed for Kubernetes on your laptop using a local cluster](quickstart.md) before
deploying the operator in your selected cloud environment.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
index cc9e2463f39..c0a4f8c098e 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
@@ -3,28 +3,27 @@ title: 'Certificates'
originalFilePath: 'src/certificates.md'
---
-EDB Postgres Distributed for Kubernetes has been designed to natively support TLS certificates.
-In order to set up a PGD cluster, each PGD node require:
+EDB Postgres Distributed for Kubernetes was designed to natively support TLS certificates.
+To set up an PGD cluster, each PGD node requires:
-- a server Certification Authority (CA) certificate
-- a server TLS certificate signed by the server Certification Authority
-- a client Certification Authority (CA) certificate
-- a streaming replication client certificate generated by the client Certification Authority
+- A server certification authority (CA) certificate
+- A server TLS certificate signed by the server CA
+- A client CA certificate
+- A streaming replication client certificate generated by the client CA
!!! Note
- You can find all the secrets used by each PGD Node and the expiry dates in
- the Cluster (PGD Node) Status.
+ You can find all the secrets used by each PGD node and the expiry dates in
+ the cluster (PGD node) status.
-EDB Postgres Distributed for Kubernetes is very flexible when it comes to TLS certificates, and
-primarily operates in two modes:
+EDB Postgres Distributed for Kubernetes is very flexible when it comes to TLS certificates. It operates
+primarily in two modes:
-1. **operator managed**: certificates are internally
- managed by the operator in a fully automated way, and signed using a CA created
- by EDB Postgres Distributed for Kubernetes
-2. **user provided**: certificates are
+1. **Operator managed** — Certificates are internally
+ managed by the operator in a fully automated way and signed using a CA created
+ by EDB Postgres Distributed for Kubernetes.
+2. **User provided** — Certificates are
generated outside the operator and imported in the cluster definition as
- secrets - EDB Postgres Distributed for Kubernetes integrates itself with cert-manager (see
- examples below)
+ secrets. EDB Postgres Distributed for Kubernetes integrates itself with cert-manager.
-You can find further information in the
-[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/certificates/).
\ No newline at end of file
+For more information, see
+[Certificates](/postgres_for_kubernetes/latest/certificates/) in the EDB Postgres for Kubernetes documentation.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
index fd50828c2ba..0090c42296c 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
@@ -3,96 +3,94 @@ title: 'Connectivity'
originalFilePath: 'src/connectivity.md'
---
-This section provides information about secure network communications within a
-PGD Cluster, covering the following topics:
+Information about secure network communications in a
+PGD cluster includes:
-- [services](#services)
-- [domain names resolution](#domain-names-resolution) using fully qualified domain names (FQDN)
+- [Services](#services)
+- [Domain names resolution](#domain-names-resolution) using fully qualified domain names (FQDN)
- [TLS configuration](#tls-configuration)
-\!!! Notice
- Although the above topics might seem unrelated to each other, they all
+!!! Notice
+ Although these topics might seem unrelated to each other, they all
participate in the configuration of the PGD resources to make them universally
identifiable and accessible over a secure network.
## Services
-Resources in a PGD Cluster are accessible through Kubernetes services.
-Every PGDGroup manages several of them, namely:
+Resources in a PGD cluster are accessible through Kubernetes services.
+Every PGD group manages several of them, namely:
-- one service per node, used for internal communications (*node service*)
-- a *group service*, to reach any node in the group, used primarily by PG4K-PGD
+- One service per node, used for internal communications (*node service*)
+- A *group service* to reach any node in the group, used primarily by EDB Postgres Distributed for Kubernetes
to discover a new group in the cluster
-- a *proxy service*, to enable applications to reach the write leader of the
- group, transparently using PGD proxy
+- A *proxy service* to enable applications to reach the write leader of the
+ group transparently using PGD Proxy
-For an example using these services, see [Connecting an application to a PGD cluster](#connecting-to-a-pgd-cluster-from-an-application).
+For an example that uses these services, see [Connecting an application to a PGD cluster](#connecting-to-a-pgd-cluster-from-an-application).
![Basic architecture of an EDB Postgres Distributed for Kubernetes PGD group](./images/pg4k-pgd-basic-architecture.png)
Each service is generated from a customizable template in the `.spec.connectivity`
section of the manifest.
-All services must be reachable using their fully qualified domain name (FQDN)
-from all the PGD nodes in all the Kubernetes clusters (see below in this
-section).
+All services must be reachable using their FQDN
+from all the PGD nodes in all the Kubernetes clusters. See [Domain names resolution](#domain-names-resolutions).
-PG4K-PGD provides a service templating framework that gives you the
-availability to easily customize services at the following 3 levels:
+EDB Postgres Distributed for Kubernetes provides a service templating framework that gives you the
+availability to easily customize services at the following three levels:
Node Service Template
-: Each PGD node is reachable using a service which can be configured in the
+: Each PGD node is reachable using a service that can be configured in the
`.spec.connectivity.nodeServiceTemplate` section.
Group Service Template
-: Each PGD group has a group service that is a single entry point for the
+: Each PGD group has a group service that's a single entry point for the
whole group and that can be configured in the
`.spec.connectivity.groupServiceTemplate` section.
Proxy Service Template
: Each PGD group has a proxy service to reach the group write leader through
- the PGD proxy, and can be configured in the `.spec.connectivity.proxyServiceTemplate`
+ the PGD proxy and can be configured in the `.spec.connectivity.proxyServiceTemplate`
section. This is the entry-point service for the applications.
-You can use templates to create a LoadBalancer service, and/or to add arbitrary
-annotations and labels to a service in order to integrate with other components
-available in the Kubernetes system (i.e. to create external DNS names or tweak
+You can use templates to create a LoadBalancer service or to add arbitrary
+annotations and labels to a service to integrate with other components
+available in the Kubernetes system (that is, to create external DNS names or tweak
the generated load balancer).
## Domain names resolution
-PG4K-PGD ensures that all resources in a PGD Group have a fully qualified
-domain name (FQDN) by adopting a convention that uses the PGD Group name as a prefix
+EDB Postgres Distributed for Kubernetes ensures that all resources in a PGD group have a FQDN by adopting a convention that uses the PGD group name as a prefix
for all of them.
-As a result, it expects that you define the domain name of the PGD Group. This
-can be done through the `.spec.connectivity.dns` section which controls how the
-FQDN for the resources are generated, with two fields:
+As a result, it expects you to define the domain name of the PGD group. This
+can be done through the `.spec.connectivity.dns` section, which controls how the
+FQDN for the resources are generated with two fields:
-- `domain`: domain name to be used by all the objects in the PGD group (mandatory);
-- `hostSuffix`: suffix to be added to each service in the PGD group (optional).
+- `domain` — Domain name for all the objects in the PGD group to use (mandatory).
+- `hostSuffix` — Suffix to add to each service in the PGD group (optional).
-## TLS Configuration
+## TLS configuration
-PG4K-PGD requires that resources in a PGD Cluster communicate over a secure
+EDB Postgres Distributed for Kubernetes requires that resources in a PGD cluster communicate over a secure
connection. It relies on PostgreSQL's native support for [SSL connections](https://www.postgresql.org/docs/current/libpq-ssl.html)
to encrypt client/server communications using TLS protocols for increased
security.
-Currently, PG4K-PGD requires that [cert-manager](https://cert-manager.io/) is installed.
-Cert-manager has been chosen as the tool to provision dynamic certificates,
-given that it is widely recognized as the de facto standard in a Kubernetes
+Currently, EDB Postgres Distributed for Kubernetes requires that [cert-manager](https://cert-manager.io/) is installed.
+Cert-manager was chosen as the tool to provision dynamic certificates
+given that it's widely recognized as the standard in a Kubernetes
environment.
The `spec.connectivity.tls` section describes how the communication between the
-nodes should happen:
+nodes happens:
- `mode` is an enumeration describing how the server certificates are verified
during PGD group nodes communication. It accepts the following values, as
- documented in ["SSL Support"](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS)
- from the PostgreSQL documentation:
+ documented in [SSL Support](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS)
+ in the PostgreSQL documentation:
- `verify-full`
- `verify-ca`
@@ -100,59 +98,59 @@ nodes should happen:
- `serverCert` defines the server certificates used by the PGD group nodes to
accept requests.
- The clients validate this certificate depending on the passed TLS mode;
- refer to the previous point for the accepted values.
+ The clients validate this certificate depending on the passed TLS mode.
+ It accepts the same values as `mode`.
-- `clientCert` defines the `streaming_replica` user certificate that will
- be used by the nodes to authenticate each other.
+- `clientCert` defines the `streaming_replica` user certificate
+ used by the nodes to authenticate each other.
-### Server TLS Configuration
+### Server TLS configuration
-The server certificate configuration is specified in `.spec.connectivity.tls.serverCert.certManager`
-section of the PGDGroup custom resource.
+The server certificate configuration is specified in the `.spec.connectivity.tls.serverCert.certManager`
+section of the `PGDGroup` custom resource.
-The following assumptions have been made for this section to work:
+The following assumptions were made for this section to work:
- An issuer `.spec.connectivity.tls.serverCert.certManager.issuerRef` is available
for the domain `.spec.connectivity.dns.domain` and any other domain used by
- `.spec.connectivity.tls.serverCert.certManager.altDnsNames`
-- There is a secret containing the public certificate of the CA
- used by the issuer `.spec.connectivity.tls.serverCert.caCertSecret`
+ `.spec.connectivity.tls.serverCert.certManager.altDnsNames`.
+- There's a secret containing the public certificate of the CA
+ used by the issuer `.spec.connectivity.tls.serverCert.caCertSecret`.
-The `.spec.connectivity.tls.serverCert.certManager` is used to create a per node
-cert-manager certificate request
-The resulting certificate will be used by the underlying Postgres instance
+The `.spec.connectivity.tls.serverCert.certManager` is used to create a per-node
+cert-manager certificate request.
+The resulting certificate is used by the underlying Postgres instance
to terminate TLS connections.
-The operator will add the following altDnsNames to the certificate:
+The operator adds the following altDnsNames to the certificate:
- `$node$hostSuffix.$domain`
- `$groupName$hostSuffix.$domain`
!!! Important
- It's your responsibility to add in `.spec.connectivity.tls.serverCert.certManager.altDnsNames`
- any name required from the underlying networking architecture
- (e.g., load balancers used by the user to reach the nodes).
+ It's your responsibility to add to `.spec.connectivity.tls.serverCert.certManager.altDnsNames`
+ any name required from the underlying networking architecture,
+ for example, load balancers used by the user to reach the nodes.
-### Client TLS Configuration
+### Client TLS configuration
The operator requires client certificates to be dynamically provisioned
-via cert-manager (recommended approach) or pre-provisioned via secrets.
+using cert-manager (the recommended approach) or pre-provisioned using secrets.
-#### Dynamic provisioning via Cert-manager
+#### Dynamic provisioning via cert-manager
-The client certificates configuration is managed by `.spec.connectivity.tls.clientCert.certManager`
-section of the PGDGroup custom resource.
-The following assumptions have been made for this section to work:
+The client certificates configuration is managed by the `.spec.connectivity.tls.clientCert.certManager`
+section of the `PGDGroup` custom resource.
+The following assumptions were made for this section to work:
- An issuer `.spec.connectivity.tls.clientCert.certManager.issuerRef` is available
- and will sign a certificate with the common name `streaming_replica`
-- There is a secret containing the public certificate of the CA
- used by the issuer `.spec.connectivity.tls.clientCert.caCertSecret`
+ and signs a certificate with the common name `streaming_replica`.
+- There's a secret containing the public certificate of the CA
+ used by the issuer `.spec.connectivity.tls.clientCert.caCertSecret`.
-The operator will use the configuration under `.spec.connectivity.tls.clientCert.certManager`
+The operator uses the configuration under `.spec.connectivity.tls.clientCert.certManager`
to create a certificate request per the `streaming_replica` Postgres user.
-The resulting certificate will be used to secure communication between the nodes.
+The resulting certificate is used to secure communication between the nodes.
#### Pre-provisioned certificates via secrets
@@ -160,65 +158,65 @@ Alternatively, you can specify a secret containing the pre-provisioned
client certificate for the streaming replication user through the
`.spec.connectivity.tls.clientCert.preProvisioned.streamingReplica.secretRef` option.
The certificate lifecycle in this case is managed entirely by a third party,
-either manually or automated, by simply updating the content of the secret.
+either manually or automated, by updating the content of the secret.
## Connecting to a PGD cluster from an application
-Connecting to a PGD Group from an application running inside the same Kubernetes cluster
-or from outside the cluster is a simple procedure. In both cases, you will connect to
-the proxy service of the PGD Group as the `app` user. The proxy service is a LoadBalancer
-service that will route the connection to the write leader of the PGD Group.
+Connecting to a PGD group from an application running inside the same Kubernetes cluster
+or from outside the cluster is a simple procedure. In both cases, you connect to
+the proxy service of the PGD group as the `app` user. The proxy service is a LoadBalancer
+service that routes the connection to the write leader of the PGD group.
### Connecting from inside the cluster
When connecting from inside the cluster, you can use the proxy service name to connect
-to the PGD Group. The proxy service name is composed of the PGD Group name and the (optional)
-host suffix defined in the `.spec.connectivity.dns` section of the PGDGroup custom resource.
+to the PGD group. The proxy service name is composed of the PGD group name and the optional
+host suffix defined in the `.spec.connectivity.dns` section of the `PGDGroup` custom resource.
-For example, if the PGD Group name is `my-group` and the host suffix is `.my-domain.com`,
-the proxy service name will be `my-group.my-domain.com`.
+For example, if the PGD group name is `my-group`, and the host suffix is `.my-domain.com`,
+the proxy service name is `my-group.my-domain.com`.
-Before connecting you will need to get the password for the app user from the app user
-secret. The naming format of the secret is `my-group-app` for a PGD Group named `my-group`.
+Before connecting, you need to get the password for the app user from the app user
+secret. The naming format of the secret is `my-group-app` for a PGD group named `my-group`.
-You can get the username and password from the secret with the following commands:
+You can get the username and password from the secret using the following commands:
```sh
kubectl get secret my-group-app -o jsonpath='{.data.username}' | base64 --decode
kubectl get secret my-group-app -o jsonpath='{.data.password}' | base64 --decode
```
-With this you now have all the pieces for a connection string to the PGD Group:
+With this, you have all the pieces for a connection string to the PGD group:
```text
postgresql://:@:5432/
```
-or for a `psql` invocation:
+Or, for a `psql` invocation:
```sh
psql -U -h
```
-where `app-user` and `app-password` are the values you got from the secret,
+Where `app-user` and `app-password` are the values you got from the secret,
and `database` is the name of the database you want to connect
-to (the default is `app` for the app user.)
+to. (The default is `app` for the app user.)
### Connecting from outside the Kubernetes cluster
When connecting from outside the Kubernetes cluster, in the general case,
-the [*Ingress*](https://kubernetes.io/docs/concepts/services-networking/ingress/) resource or a [*Load Balancer*](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) will be necessary.
-Check your cloud provider or local installation for more information about the
-behavior of them in your environment.
+the [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) resource or a [load balancer](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) is necessary.
+Check your cloud provider or local installation for more information about their
+behavior in your environment.
-Ingresses and Load Balancers require a Pod selector to forward connection to
-the PGD proxies. When configuring them, we suggest to use the following labels:
+Ingresses and load balancers require a pod selector to forward connection to
+the PGD proxies. When configuring them, we suggest using the following labels:
-- `k8s.pgd.enterprisedb.io/group`: set the the PGD group name
-- `k8s.pgd.enterprisedb.io/workloadType`: set to `pgd-proxy`
+- `k8s.pgd.enterprisedb.io/group` — Set the PGD group name.
+- `k8s.pgd.enterprisedb.io/workloadType` — Set to `pgd-proxy`.
If using Kind or other solutions for local development, the easiest way to
-access the PGD Group from outside is to use port forwarding
+access the PGD group from outside is to use port forwarding
to the proxy service. You can use the following command to forward port 5432 on your
local machine to the proxy service:
@@ -226,4 +224,4 @@ local machine to the proxy service:
kubectl port-forward svc/my-group.my-domain.com 5432:5432
```
-where `my-group.my-domain.com` is the proxy service name from the previous example.
\ No newline at end of file
+Where `my-group.my-domain.com` is the proxy service name from the previous example.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
index c11e9f94e50..59a381222af 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
@@ -29,8 +29,8 @@ directoryDefaults:
displayBanner: Preview release v0.7.1
---
-**EDB Postgres Distributed for Kubernetes** (`pg4k-pgd`, or PG4K-PGD) is an
-operator designed to manage **EDB Postgres Distributed** workloads on
+EDB Postgres Distributed for Kubernetes (`pg4k-pgd`) is an
+operator designed to manage EDB Postgres Distributed (PGD) workloads on
Kubernetes, with traffic routed by PGD Proxy.
The main custom resource that the operator provides is called `PGDGroup`.
@@ -40,45 +40,45 @@ Architectures can also be deployed across different Kubernetes clusters.
## Before you start
EDB Postgres Distributed for Kubernetes provides you with a way to deploy
-EDB Postgres Distributed in a Kubernetes environment. As a result, it
-is fundamental that you have read the
-["EDB Postgres Distributed" documentation](https://www.enterprisedb.com/docs/pgd/latest/).
+EDB Postgres Distributed in a Kubernetes environment. Therefore, we recommend
+reading the
+[EDB Postgres Distributed documentation](/pgd/latest/).
-The following chapters are very important to start working with EDB Postgres
-Distributed for Kubernetes:
+To start working with EDB Postgres
+Distributed for Kubernetes, read the following in the PGD documentation:
-- [Terminology](https://www.enterprisedb.com/docs/pgd/latest/terminology/)
-- [PGD Overview](https://www.enterprisedb.com/docs/pgd/latest/overview/)
-- [Choosing your architecture](https://www.enterprisedb.com/docs/pgd/latest/architectures/)
-- [Choosing a Postgres distribution](https://www.enterprisedb.com/docs/pgd/latest/choosing_server/)
+- [Terminology](/pgd/latest/terminology/)
+- [PGD overview](/pgd/latest/overview/)
+- [Choosing your architecture](/pgd/latest/architectures/)
+- [Choosing a Postgres distribution](/pgd/latest/choosing_server/)
-For advanced usage and maximum customization, it is also important to familiarize with
-["EDB Postgres for Kubernetes" (PG4K) documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/),
-as described in the ["Architecture" section](architecture.md#relationship-with-edb-postgres-for-kubernetes).
+For advanced usage and maximum customization, it's also important to be familiar with the
+[EDB Postgres for Kubernetes documentation](/postgres_for_kubernetes/latest/),
+as described in [Architecture](architecture.md#relationship-with-edb-postgres-for-kubernetes).
## Supported Kubernetes distributions
EDB Postgres Distributed for Kubernetes is available for:
-- Kubernetes version 1.23 or higher through a Helm Chart
-- Red Hat OpenShift version 4.10 or higher through the Red Hat OpenShift
- Certified Operator only
+- Kubernetes version 1.23 or later through a Helm chart
+- Red Hat OpenShift version 4.10 or later only through the Red Hat OpenShift
+ certified operator
## Requirements
EDB Postgres Distributed for Kubernetes requires that the Kubernetes/OpenShift
-clusters hosting the distributed PGD cluster have been prepared by you to cater for:
+clusters hosting the distributed PGD cluster were prepared by you to cater for:
-- the Public Key Infrastructure (PKI) encompassing all the Kubernetes clusters
- the PGD Global Group is spread across, as mTLS is required to authenticate
- and authorize all nodes in the mesh topology and guarantee encrypted communication
+- The public key infrastructure (PKI) encompassing all the Kubernetes clusters
+ the PGD global group is spread across. mTLS is required to authenticate
+ and authorize all nodes in the mesh topology and guarantee encrypted communication.
- Networking infrastructure across all Kubernetes clusters involved in the
- PGD Global Group to ensure that each node can communicate with each other
+ PGD global group to ensure that each node can communicate with each other
-EDB Postgres Distributed for Kubernetes also requires Cert Manager 1.10 or higher.
+EDB Postgres Distributed for Kubernetes also requires Cert Manager 1.10 or later.
!!! Seealso "About connectivity"
- Please refer to the ["Connectivity" section](connectivity.md) for more information.
+ See [Connectivity](connectivity.md) for more information.
-#### Exposed Ports
+#### Exposed ports
-EDB Postgres Distributed for Kubernetes exposes ports at operator, instance manager and operand
-levels, as listed in the table below:
+EDB Postgres Distributed for Kubernetes exposes ports at operator, instance manager, and operand
+levels, as shown in the table.
| System | Port number | Exposing | Name | Certificates | Authentication |
| :--------------- | :---------- | :------------------ | :--------------- | :----------- | :------------- |
@@ -222,26 +221,26 @@ levels, as listed in the table below:
### PGD
-The current implementation of EDB Postgres Distributed for Kubernetes automatically creates
-passwords for the `postgres` superuser and the database owner.
+The current implementation of EDB Postgres Distributed for Kubernetes creates
+passwords for the postgres superuser and the database owner.
-As far as encryption of password is concerned, EDB Postgres Distributed for Kubernetes follows
-the default behavior of PostgreSQL: starting from PostgreSQL 14,
-`password_encryption` is by default set to `scram-sha-256`, while on earlier
-versions it is set to `md5`.
+As far as encryption of passwords is concerned, EDB Postgres Distributed for Kubernetes follows
+the default behavior of PostgreSQL: starting with PostgreSQL 14,
+`password_encryption` is by default set to `scram-sha-256`. On earlier
+versions, it's set to `md5`.
!!! Important
- Please refer to the ["Connection DSNs and SSL"](https://www.enterprisedb.com/docs/pgd/latest/nodes/#connection-dsns-and-ssl-tls)
- section in the PGD documentation for details.
+ See [Connection DSNs and SSL](/pgd/latest/nodes/#connection-dsns-and-ssl-tls)
+ in the PGD documentation for details.
-You can disable management of the `postgres` user password via secrets by setting
+You can disable management of the postgres user password using secrets by setting
`enableSuperuserAccess` to `false` in the `cnp` section of the spec.
!!! Note
The operator supports toggling the `enableSuperuserAccess` option. When you
- disable it on a running cluster, the operator will ignore the content of the secret,
- remove it (if previously generated by the operator) and set the password of the
- `postgres` user to `NULL` (de facto disabling remote access through password authentication).
+ disable it on a running cluster, the operator ignores the content of the secret.
+ Remove it (if previously generated by the operator) and set the password of the
+ postgres user to `NULL`, in effect disabling remote access through password authentication.
### Storage
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
index ee42e6afa42..47d71bcf954 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
@@ -1,17 +1,17 @@
---
-title: 'Client TLS/SSL Connections'
+title: 'Client TLS/SSL connections'
originalFilePath: 'src/ssl_connections.md'
---
!!! Seealso "Certificates"
- Please refer to the ["Certificates"](certificates.md)
- page for more details on how EDB Postgres Distributed for Kubernetes supports TLS certificates.
+ See [Certificates](certificates.md)
+ for more details on how EDB Postgres Distributed for Kubernetes supports TLS certificates.
-The EDB Postgres Distributed for Kubernetes operator has been designed to work with TLS/SSL for both encryption in transit and
-authentication, on server and client sides. PGD nodes are created as Cluster
-resources using the EDB Postgres for Kubernetes (PG4K) operator, and this
-includes the deployment of a Certification
-Authority (CA) to create and sign TLS client certificates.
+The EDB Postgres Distributed for Kubernetes operator was designed to work with TLS/SSL for both encryption in transit and
+authentication on server and client sides. PGD nodes are created as cluster
+resources using the EDB Postgres for Kubernetes operator. This
+includes deploying a certification
+authority (CA) to create and sign TLS client certificates.
-Please refer to the [EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/ssl_connections/)
-for further information on issuers and certificates.
\ No newline at end of file
+See the [EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/ssl_connections/)
+for more information on issuers and certificates.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
index 0ec06ddba2a..831bcd9cf1d 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
@@ -3,19 +3,18 @@ title: 'Use cases'
originalFilePath: 'src/use_cases.md'
---
-EDB Postgres Distributed for Kubernetes has been designed to work with applications
-that reside in the same Kubernetes cluster, for a full cloud native
+EDB Postgres Distributed for Kubernetes was designed to work with applications
+that reside in the same Kubernetes cluster for a full cloud native
experience.
However, it might happen that, while the database can be hosted
-inside a Kubernetes cluster, applications cannot be containerized
-at the same time and need to run in a *traditional environment* such
+inside a Kubernetes cluster, applications can't be containerized
+at the same time and need to run in a traditional environment such
as a VM.
-We reproduce here a summary of the basic considerations, and refer
-you to the
-[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/use_cases/)
-for further depth.
+The following is a summary of the basic considerations. See the
+[EDB Postgres for Kubernetes documentation](/postgres_for_kubernetes/latest/use_cases/)
+for more detail.
## Case 1: Applications inside Kubernetes
@@ -24,21 +23,21 @@ namespace inside a Kubernetes cluster.
![Application and Database inside Kubernetes](./images/apps-in-k8s.png)
-The application, normally stateless, is managed as a standard `Deployment`,
-with multiple replicas spread over different Kubernetes node, and internally
-exposed through a `ClusterIP` service.
+The application, normally stateless, is managed as a standard deployment,
+with multiple replicas spread over different Kubernetes nodes and internally
+exposed through a ClusterIP service.
-The service is exposed externally to the end user through an `Ingress` and the
-provider's load balancer facility, via HTTPS.
+The service is exposed externally to the end user through an Ingress and the
+provider's load balancer facility by way of HTTPS.
## Case 2: Applications outside Kubernetes
-Another possible use case is to manage your Postgres Distributed database inside
-Kubernetes, while having your applications outside of it (for example in a
-virtualized environment). In this case, Postgres Distributed is represented by an IP
-address (or host name) and a TCP port, corresponding to the defined Ingress
+Another possible use case is to manage your PGD database inside
+Kubernetes while having your applications outside of it, for example, in a
+virtualized environment. In this case, PGD is represented by an IP
+address or host name and a TCP port, corresponding to the defined Ingress
resource in Kubernetes.
-The application can still benefit from a TLS connection to Postgres Distributed.
+The application can still benefit from a TLS connection to PGD.
![Application outside Kubernetes](./images/apps-outside-k8s.png)
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
index 0a5689af355..4c971e5752a 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
@@ -1,34 +1,34 @@
---
-title: 'Managing EDB Postgres Distributed databases'
+title: 'Managing EDB Postgres Distributed (PGD) databases'
originalFilePath: 'src/using_pgd.md'
---
As described in the [architecture document](architecture.md),
EDB Postgres Distributed for Kubernetes is an operator created to deploy
-Postgres Distributed (PGD) databases.
+PGD databases.
It provides an alternative over deployment with TPA, and by leveraging the
Kubernetes ecosystem, it can offer self-healing and declarative control.
-The operator is also responsible of the backup and restore operations
-(see the [backup](backup.md) document.)
+The operator is also responsible of the backup and restore operations.
+See [Backup](backup.md).
-However, many of the operations and control of PGD clusters are not
+However, many of the operations and control of PGD clusters aren't
managed by the operator.
-The pods created by EDB Postgres Distributed for Kubernetes come with
-[PGD CLI](https://www.enterprisedb.com/docs/pgd/latest/cli/) installed, and
-this is the tool that can be used, for example, to execute a switchover.
+The pods created by EDB Postgres Distributed for Kubernetes come with the
+[PGD CLI](https://www.enterprisedb.com/docs/pgd/latest/cli/) installed. You can use
+this tool, for example, to execute a switchover.
## PGD CLI
!!! Warning
- The PGD CLI should not be used to create/delete resources. For example,
- the `create-proxy`, `delete-proxy` commands should be avoided.
+ Don't use the PGD CLI to create and delete resources. For example,
+ avoid the `create-proxy` and `delete-proxy` commands.
Provisioning of resources is under the control of the operator, and manual
- creation/deletion is not supported.
+ creation and deletion isn't supported.
-As an example, let's execute a switchover command.
+As an example, execute a switchover command.
-It is recommendable to use the PGD CLI from proxy pods. Let's find them.
-You can get a pod listing for your cluster:
+We recommend that you use the PGD CLI from proxy pods. To find them,
+get a pod listing for your cluster:
```shell
kubectl get pods -n my-namespace
@@ -41,14 +41,14 @@ location-a-proxy-0 1/1 Running 0 2h
location-a-proxy-1 1/1 Running 0 2h
```
-The proxy nodes have `proxy` in the name. Let's choose one and get a command
+The proxy nodes have `proxy` in the name. Choose one, and get a command
prompt in it:
```shell
kubectl exec -n my-namespace -ti location-a-proxy-0 -- bash
```
-You should now have a bash session open with the proxy pod. The `pgd` command
+You now have a bash session open with the proxy pod. The `pgd` command
is available:
```shell
@@ -91,37 +91,37 @@ location-a-3 1403922770 location-a data ACTIVE ACTIVE Up 3
## Accessing the database
-In the [use cases document](use_cases.md) you can find a discussion on using the
-database within the Kubernetes cluster vs. from outside, and in the
-[connectivity document](connectivity.md), you can find a discussion on services,
+In [Use cases](use_cases.md) is a discussion on using the
+database within the Kubernetes cluster versus from outside. In
+[Connectivity](connectivity.md), you can find a discussion on services,
which is relevant for accessing the database from applications.
-However you implement your system, your applications should use the proxy
-service to connect, in order to reap the benefits of Postgres Distributed, and
+However you implement your system, your applications must use the proxy
+service to connect to reap the benefits of PGD and
of the increased self-healing capabilities added by the EDB Postgres Distributed
for Kubernetes operator.
!!! Important
- Note that, as per the EDB Postgres for Kubernetes defaults, data nodes are
- created with a database called `app`, owned by a user named `app`, in
- contrast to the `bdrdb` database you'll find in the EDB Postgres
- Distributed documentation. This
- is configurable by the user, in the `cnp` section of the manifest.
- See the [EDB Postgres for Kubernetes bootstrapping document](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/bootstrap/)
- for reference.
+ As per the EDB Postgres for Kubernetes defaults, data nodes are
+ created with a database called `app` and owned by a user named `app`, in
+ contrast to the `bdrdb` database described in the EDB Postgres
+ Distributed documentation. You can configure these values
+ in the `cnp` section of the manifest.
+ For reference, see [Bootstrap](/postgres_for_kubernetes/latest/bootstrap/) in the EDB Postgres for Kubernetes
+ documentation.
-You may, however, want access to your PGD data nodes for administrative tasks,
-using the `psql` CLI.
+You might, however, want access to your PGD data nodes for administrative tasks,
+using the psql CLI.
-As we did in the previous section on using the PGD CLI, we can get a pod listing
-for our PGD cluster, and `kubectl exec` into a data node:
+You can get a pod listing
+for your PGD cluster and `kubectl exec` into a data node:
```shell
kubectl exec -n my-namespace -ti location-a-1-1 -- psql
```
-In the familiar territory of `psql`, you should remember that the default
-created database is named `app` (see warning above).
+In the familiar territory of psql, remember that the default
+created database is named `app` (see previous warning).
```terminal
postgres=# \c app
@@ -139,10 +139,10 @@ peer_target_state_name | ACTIVE
<- snipped ->
```
-For your applications, of course, you should use the non-privileged role (`app`
+For your applications, use the non-privileged role (`app`
by default).
-You will need the user credentials, which are stored in a Kubernetes secret:
+You need the user credentials, which are stored in a Kubernetes secret:
```shell
kubectl get secrets
@@ -152,7 +152,7 @@ NAME TYPE DATA AGE
location-a-app kubernetes.io/basic-auth 2 2h
```
-This secret contains the username and password needed for the postgres DSN,
+This secret contains the username and password needed for the Postgres DSN,
encoded in base64:
```shell
From d6894ecf38fa86b90847bbc0339c94f2144bae25 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Wed, 3 Apr 2024 04:40:29 +0000
Subject: [PATCH 06/39] Fix URLs in api doc
---
.../processors/pg4k-pgd/replace-beta-urls.mjs | 17 +++++++++++++++++
scripts/source/process-pgd4k-docs.sh | 1 +
2 files changed, 18 insertions(+)
create mode 100644 scripts/fileProcessor/processors/pg4k-pgd/replace-beta-urls.mjs
diff --git a/scripts/fileProcessor/processors/pg4k-pgd/replace-beta-urls.mjs b/scripts/fileProcessor/processors/pg4k-pgd/replace-beta-urls.mjs
new file mode 100644
index 00000000000..97fc5999020
--- /dev/null
+++ b/scripts/fileProcessor/processors/pg4k-pgd/replace-beta-urls.mjs
@@ -0,0 +1,17 @@
+// Replace URLs beginning with the following patterns...
+// - https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/pg4k-pgd.v1beta1#
+// ...with "#" (that is, leave them relative.) This handles a weird API docs thing during development.
+
+const replacements = [
+ {pattern: /https:\/\/www\.enterprisedb\.com\/docs\/postgres_for_kubernetes\/latest\/pg4k-pgd.v1beta1#/g, replacement: "#"},
+];
+
+export const process = (filename, content) => {
+ for (const r of replacements)
+ content = content.replace(r.pattern, r.replacement);
+
+ return {
+ newFilename: filename,
+ newContent: content,
+ };
+};
diff --git a/scripts/source/process-pgd4k-docs.sh b/scripts/source/process-pgd4k-docs.sh
index 11e65fd1cce..a6150d98110 100755
--- a/scripts/source/process-pgd4k-docs.sh
+++ b/scripts/source/process-pgd4k-docs.sh
@@ -28,6 +28,7 @@ cd $SOURCE_CHECKOUT/docs-import/docs
node $DESTINATION_CHECKOUT/scripts/fileProcessor/main.mjs \
-f "src/**/*.md" \
-p "cnp/replace-github-urls" \
+ -p "pg4k-pgd/replace-beta-urls" \
-p "cnp/update-yaml-links" \
-p "cnp/add-frontmatters" \
-p "cnp/cleanup-html" \
From 3f0e6a076230fd7d279e3b51c895b3552a9495a8 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Fri, 16 Feb 2024 01:16:07 +0000
Subject: [PATCH 07/39] v1 import (April 22nd 2024)
---
.../1/api_reference.md.in | 30 -
.../1/api_reference.mdx | 657 ----
.../1/architecture.mdx | 171 +-
.../1/backup.mdx | 102 +-
.../1/before_you_start.mdx | 49 +-
.../1/certificates.mdx | 35 +-
.../1/connectivity.mdx | 186 +-
.../1/group_cleanup.mdx | 78 +
.../1/index.mdx | 83 +-
.../1/installation_upgrade.mdx | 47 +-
.../1/labels_annotations.mdx | 70 +
.../1/openshift.mdx | 211 +-
.../1/pause_resume.mdx | 57 +
.../1/pg4k-pgd.v1beta1.mdx | 2816 +++++++++++++++++
.../1/private_registries.mdx | 63 +-
.../1/quickstart.mdx | 73 +-
.../1/recovery.mdx | 77 +-
.../1/release_notes.mdx | 26 +-
.../1/samples.mdx | 12 +-
.../1/security.mdx | 202 +-
.../1/ssl_connections.mdx | 20 +-
.../1/supported_versions.mdx | 22 +
.../1/tde.mdx | 119 +
.../1/use_cases.mdx | 37 +-
.../1/using_pgd.mdx | 76 +-
25 files changed, 3898 insertions(+), 1421 deletions(-)
delete mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.md.in
delete mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/group_cleanup.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/supported_versions.mdx
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/tde.mdx
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.md.in b/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.md.in
deleted file mode 100644
index 7998937baf2..00000000000
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.md.in
+++ /dev/null
@@ -1,30 +0,0 @@
-# API reference
-
-EDB Postgres Distributed for Kubernetes extends the Kubernetes API by defining the
-custom resources that follow.
-
-All the resources are defined in the `pgd.k8s.enterprisedb.io/v1beta1`
-API.
-
-
-
-{{ range $ -}}
-- [{{ .Name -}}](#{{ .Name -}})
-{{ end }}
-
-{{ range $ -}}
-{{ .Anchor }}
-
-## {{ .Name }}
-
-{{ .Doc -}}
-{{ if .Items }}
-
-{{ .TableFieldName }} | {{ .TableFieldDoc }} | {{ .TableFieldRawType }}
-{{ .TableFieldNameDashSize }} | {{ .TableFieldDocDashSize }} | {{ .TableFieldRawTypeDashSize }}
-{{ end }}
-{{- range .Items -}}
-`{{ .Name }}` | {{ .Doc }}{{ if .Mandatory }} - *mandatory* {{ end }} | {{ .RawType }}
-{{ end }}
-{{ end -}}
-
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx
deleted file mode 100644
index ae63da33e12..00000000000
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/api_reference.mdx
+++ /dev/null
@@ -1,657 +0,0 @@
----
-title: 'API reference'
-originalFilePath: 'src/api_reference.md'
----
-
-EDB Postgres Distributed for Kubernetes extends the Kubernetes API by defining the
-custom resources that follow.
-
-All the resources are defined in the `pgd.k8s.enterprisedb.io/v1beta1`
-API.
-
-
-
-- [Backup](#Backup)
-- [BackupStatus](#BackupStatus)
-- [CNPStatus](#CNPStatus)
-- [CertManagerTemplate](#CertManagerTemplate)
-- [ClientCertConfiguration](#ClientCertConfiguration)
-- [ClientPreProvisionedCertificates](#ClientPreProvisionedCertificates)
-- [CnpBaseConfiguration](#CnpBaseConfiguration)
-- [CnpConfiguration](#CnpConfiguration)
-- [ConnectivityConfiguration](#ConnectivityConfiguration)
-- [ConnectivityStatus](#ConnectivityStatus)
-- [DNSConfiguration](#DNSConfiguration)
-- [DiscoveryJobConfig](#DiscoveryJobConfig)
-- [InheritedMetadata](#InheritedMetadata)
-- [Metadata](#Metadata)
-- [NameKindGroup](#NameKindGroup)
-- [NodeCertificateStatus](#NodeCertificateStatus)
-- [NodeSummary](#NodeSummary)
-- [OTELConfiguration](#OTELConfiguration)
-- [OTELTLSConfiguration](#OTELTLSConfiguration)
-- [PGDGroup](#PGDGroup)
-- [PGDGroupCleanup](#PGDGroupCleanup)
-- [PGDGroupCleanupList](#PGDGroupCleanupList)
-- [PGDGroupCleanupSpec](#PGDGroupCleanupSpec)
-- [PGDGroupCleanupStatus](#PGDGroupCleanupStatus)
-- [PGDGroupList](#PGDGroupList)
-- [PGDGroupSpec](#PGDGroupSpec)
-- [PGDGroupStatus](#PGDGroupStatus)
-- [PGDNodeGroupEntry](#PGDNodeGroupEntry)
-- [PGDNodeGroupSettings](#PGDNodeGroupSettings)
-- [PGDProxyConfiguration](#PGDProxyConfiguration)
-- [PGDProxyEntry](#PGDProxyEntry)
-- [PGDProxySettings](#PGDProxySettings)
-- [PGDProxyStatus](#PGDProxyStatus)
-- [PGDStatus](#PGDStatus)
-- [ParentGroupConfiguration](#ParentGroupConfiguration)
-- [PgdConfiguration](#PgdConfiguration)
-- [PreProvisionedCertificate](#PreProvisionedCertificate)
-- [ReplicationCertificateStatus](#ReplicationCertificateStatus)
-- [Restore](#Restore)
-- [RestoreStatus](#RestoreStatus)
-- [RootDNSConfiguration](#RootDNSConfiguration)
-- [SQLMutation](#SQLMutation)
-- [ServerCertConfiguration](#ServerCertConfiguration)
-- [ServiceTemplate](#ServiceTemplate)
-- [TLSConfiguration](#TLSConfiguration)
-
-
-
-## Backup
-
-Backup configures the backup of cnp-pgd nodes
-
-| Name | Description | Type |
-| --------------- | ------------------------------------------------------------------------------------------ | ------------------------- |
-| `configuration` | The CNP configuration to be used for backup. ServerName value is reserved by the operator. | cnpv1.BackupConfiguration |
-| `cron ` | The scheduled backup for the data | cnpv1.ScheduledBackupSpec |
-
-
-
-## BackupStatus
-
-BackupStatus contains the current status of the pgd backup
-
-| Name | Description | Type |
-| --------------------- | ----------- | ------ |
-| `clusterName ` | | string |
-| `scheduledBackupName` | | string |
-
-
-
-## CNPStatus
-
-CNPStatus contains any relevant status for the operator about CNP
-
-| Name | Description | Type |
-| -------------------------------- | --------------------------------------------------------------------------------- | ----------------- |
-| `dataInstances ` | | int32 |
-| `witnessInstances ` | | int32 |
-| `firstRecoverabilityPoints ` | The recoverability points, keyed per CNP clusterName, as a date in RFC3339 format | map[string]string |
-| `superUserSecretIsPresent ` | | bool |
-| `applicationUserSecretIsPresent` | | bool |
-| `podDisruptionBudgetIsPresent ` | | bool |
-
-
-
-## CertManagerTemplate
-
-CertManagerTemplate contains the data to generate a certificate request
-
-| Name | Description | Type |
-| ---------- | -------------------------------------------------- | ------------------------------- |
-| `spec ` | The Certificate object specification - *mandatory* | \*certmanagerv1.CertificateSpec |
-| `metadata` | The label and annotations metadata | [Metadata](#Metadata) |
-
-
-
-## ClientCertConfiguration
-
-ClientCertConfiguration contains the information to generate the certificate for the streaming_replica user
-
-| Name | Description | Type |
-| ---------------- | ------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------- |
-| `caCertSecret ` | CACertSecret is the secret of the CA to be injected into the CloudNativePG configuration - *mandatory* | string |
-| `certManager ` | The cert-manager template used to generate the certificates | [\*CertManagerTemplate](#CertManagerTemplate) |
-| `preProvisioned` | PreProvisioned contains how to fetch the pre-generated client certificates | [\*ClientPreProvisionedCertificates](#ClientPreProvisionedCertificates) |
-
-
-
-## ClientPreProvisionedCertificates
-
-ClientPreProvisionedCertificates instruct how to fetch the pre-generated client certificates
-
-| Name | Description | Type |
-| ------------------ | --------------------------------------------------------------------------- | --------------------------------------------------------- |
-| `streamingReplica` | StreamingReplica the pre-generated certificate for 'streaming_replica' user | [\*PreProvisionedCertificate](#PreProvisionedCertificate) |
-
-
-
-## CnpBaseConfiguration
-
-CnpBaseConfiguration contains the configuration parameters that can be applied to both CNP Witness and Data nodes
-
-| Name | Description | Type |
-| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
-| `startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32 |
-| `stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32 |
-| `storage ` | Configuration of the storage of the instances - *mandatory* | cnpv1.StorageConfiguration |
-| `walStorage ` | Configuration of the WAL storage for the instances | \*cnpv1.StorageConfiguration |
-| `clusterMaxStartDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 300) | int32 |
-| `affinity ` | Affinity/Anti-affinity rules for Pods | cnpv1.AffinityConfiguration |
-| `resources ` | Resources requirements of every generated Pod. Please refer to for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core) |
-| `postgresql ` | Configuration of the PostgreSQL server | cnpv1.PostgresConfiguration |
-| `monitoring ` | The configuration of the monitoring infrastructure of this cluster | \*cnpv1.MonitoringConfiguration |
-| `logLevel ` | The instances' log level, one of the following values: error, warning, info (default), debug, trace | string |
-| `serviceAccountTemplate` | The service account template to be passed to CNP | \*cnpv1.ServiceAccountTemplate |
-| `otel ` | OpenTelemetry Configuration | [OTELConfiguration](#OTELConfiguration) |
-| `postInitSQL ` | List of SQL queries to be executed as a superuser immediately after a node has been created - to be used with extreme care (by default empty) | \[]string |
-| `postInitTemplateSQL ` | List of SQL queries to be executed as a superuser in the `template1` after a node has been created - to be used with extreme care (by default empty) | \[]string |
-| `seccompProfile ` | The SeccompProfile applied to every Pod and Container. Defaults to: `RuntimeDefault` | \*corev1.SeccompProfile |
-| `metadata ` | Metadata applied exclusively to the generated Cluster resources. Useful for applying AppArmor profiles. | [InheritedMetadata](#InheritedMetadata) |
-
-
-
-## CnpConfiguration
-
-CnpConfiguration contains the configurations of the data nodes that will be injected into the resulting clusters composing the PGD group
-
-| Name | Description | Type |
-| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
-| `enableSuperuserAccess` | When this option is enabled, the CNP operator will create or use the secret defined in the SuperuserSecret to allow superuser (postgres) access to the database. Disabled by default. | \*bool |
-| `superuserSecret ` | The secret containing the superuser password. A new secret will be created with a randomly generated password if not defined. This field is only allowed in the CNP Instances configuration. A Witness Node will always use the same SuperuserSecret as the other instances. | \*cnpv1.LocalObjectReference |
-
-
-
-## ConnectivityConfiguration
-
-ConnectivityConfiguration describes how to generate the services and certificates for the PGDGroup
-
-| Name | Description | Type |
-| ---------------------- | ----------------------------------------------------------------------------- | --------------------------------------------- |
-| `dns ` | Describes how the FQDN for the resources should be generated | [RootDNSConfiguration](#RootDNSConfiguration) |
-| `tls ` | The configuration of the TLS infrastructure - *mandatory* | [TLSConfiguration](#TLSConfiguration) |
-| `nodeServiceTemplate ` | Instructs how to generate the service for each node | [\*ServiceTemplate](#ServiceTemplate) |
-| `groupServiceTemplate` | Instructs how to generate the service for the PGDGroup | [\*ServiceTemplate](#ServiceTemplate) |
-| `proxyServiceTemplate` | Instructs how to generate the service pointing to the PGD Proxy | [\*ServiceTemplate](#ServiceTemplate) |
-
-
-
-## ConnectivityStatus
-
-ConnectivityStatus contains any relevant status for the operator about Connectivity
-
-| Name | Description | Type |
-| ----------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------- |
-| `replicationTLSCertificate ` | ReplicationTLSCertificate is the name of the replication TLS certificate, if we have it | [ReplicationCertificateStatus](#ReplicationCertificateStatus) |
-| `nodeTLSCertificates ` | NodeTLSCertificates are the names of the certificates that have been created for the PGD nodes | [\[\]NodeCertificateStatus](#NodeCertificateStatus) |
-| `unusedCertificates ` | UnusedCertificates are the names of the certificates that we don't use anymore for the PGD nodes | \[]string |
-| `nodesWithoutCertificates ` | NodesWithoutCertificates are the names of the nodes which have not a server certificate | \[]string |
-| `nodesNeedingServiceReconciliation` | NodesNeedingServiceReconciliation are the names of the nodes which have not a server certificate | \[]string |
-| `configurationHash ` | ConfigurationHash is the hash code of the connectivity configuration, used to check if we had a change in the configuration or not | string |
-
-
-
-## DNSConfiguration
-
-DNSConfiguration describes how the FQDN for the resources should be generated
-
-| Name | Description | Type |
-| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ |
-| `domain ` | Contains the domain name of by all services in the PGDGroup. It is responsibility of the user to ensure that the value specified here matches with the rendered nodeServiceTemplate and groupServiceTemplate | string |
-| `hostSuffix` | Contains an optional suffix to add to all the service names in the PGDGroup. The meaning of this setting it to allow the user to easily mark all the services created in a location for routing purpose (i.e., add a generic rule to CoreDNS to rewrite some service suffixes as local) | string |
-
-
-
-## DiscoveryJobConfig
-
-DiscoveryJobConfig contains a series of fields that configure the discovery job
-
-| Name | Description | Type |
-| --------- | ----------------------------------------------------------------------------- | ---- |
-| `delay ` | Delay amount of time to sleep between retries, measured in seconds | int |
-| `retries` | Retries how many times the operation should be retried | int |
-| `timeout` | Timeout amount of time given to the operation to succeed, measured in seconds | int |
-
-
-
-## InheritedMetadata
-
-InheritedMetadata contains metadata to be inherited by all resources related to a Cluster
-
-| Name | Description | Type |
-| ------------- | ----------- | ----------------- |
-| `labels ` | | map[string]string |
-| `annotations` | | map[string]string |
-
-
-
-## Metadata
-
-Metadata is a structure similar to the metav1.ObjectMeta, but still parseable by controller-gen to create a suitable CRD for the user.
-
-| Name | Description | Type |
-| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------- |
-| `labels ` | Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: | map[string]string |
-| `annotations` | Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: | map[string]string |
-
-
-
-## NameKindGroup
-
-NameKindGroup a struct containing name kind and group
-
-| Name | Description | Type |
-| ------- | ------------- | ------ |
-| `name ` | - *mandatory* | string |
-| `kind ` | - *mandatory* | string |
-| `group` | - *mandatory* | string |
-
-
-
-## NodeCertificateStatus
-
-NodeCertificateStatus encapsulate the status of the server certificate of a CNP node
-
-| Name | Description | Type |
-| ---------- | ---------------------------------------------------------------------------- | ------ |
-| `nodeName` | NodeName is the name of the CNP cluster using this certificate - *mandatory* | string |
-
-
-
-## NodeSummary
-
-NodeSummary shows relevant info from bdr.node_summary
-
-| Name | Description | Type |
-| ------------------------ | ------------------------------------------------------------------ | ------------ |
-| `node_name ` | Name of the node | string |
-| `node_group_name ` | NodeGroupName is the name of the joined group | string |
-| `peer_state_name ` | Consistent state of the node in human-readable form | string |
-| `peer_target_state_name` | State which the node is trying to reach (during join or promotion) | string |
-| `node_kind_name ` | The kind of node: witness or data | NodeKindName |
-
-
-
-## OTELConfiguration
-
-OTELConfiguration is the configuration for external openTelemetry
-
-| Name | Description | Type |
-| ------------- | ---------------------------------------------------------------------------------------------------------------------- | --------------------------------------------- |
-| `metricsURL ` | The OpenTelemetry HTTP endpoint URL to accept metrics data | string |
-| `traceURL ` | The OpenTelemetry HTTP endpoint URL to accept trace data | string |
-| `traceEnable` | Whether to push trace data to OpenTelemetry traceUrl - *mandatory* | bool |
-| `tls ` | TLSConfiguration provides the TLS certificate configuration when MetricsURL and TraceURL are using HTTPS | [OTELTLSConfiguration](#OTELTLSConfiguration) |
-
-
-
-## OTELTLSConfiguration
-
-OTELTLSConfiguration contains the certificate configuration for TLS connections to openTelemetry
-
-| Name | Description | Type |
-| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------- |
-| `caBundleSecretRef` | CABundleSecretRef is a reference to a secret field containing the CA bundle to verify the openTelemetry server certificate | \*cnpv1.SecretKeySelector |
-| `clientCertSecret ` | ClientCertSecret is the name of the secret containing the client certificate used to connect to openTelemetry. It must contain both the standard "tls.crt" and "tls.key" files, encoded in PEM format. | \*cnpv1.LocalObjectReference |
-
-
-
-## PGDGroup
-
-PGDGroup is the Schema for the pgdgroups API
-
-| Name | Description | Type |
-| ---------- | ----------- | ------------------------------------------------------------------------------------------------------------ |
-| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta) |
-| `spec ` | | [PGDGroupSpec](#PGDGroupSpec) |
-| `status ` | | [PGDGroupStatus](#PGDGroupStatus) |
-
-
-
-## PGDGroupCleanup
-
-PGDGroupCleanup is the Schema for the pgdgroupcleanups API
-
-| Name | Description | Type |
-| ---------- | ----------- | ------------------------------------------------------------------------------------------------------------ |
-| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta) |
-| `spec ` | | [PGDGroupCleanupSpec](#PGDGroupCleanupSpec) |
-| `status ` | | [PGDGroupCleanupStatus](#PGDGroupCleanupStatus) |
-
-
-
-## PGDGroupCleanupList
-
-PGDGroupCleanupList contains a list of PGDGroupCleanup
-
-| Name | Description | Type |
-| ---------- | ------------- | -------------------------------------------------------------------------------------------------------- |
-| `metadata` | | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta) |
-| `items ` | - *mandatory* | [\[\]PGDGroupCleanup](#PGDGroupCleanup) |
-
-
-
-## PGDGroupCleanupSpec
-
-PGDGroupCleanupSpec defines the desired state of PGDGroupCleanup
-
-| Name | Description | Type |
-| ---------- | --------------------------------------------------------------------------------------------- | ------ |
-| `executor` | - *mandatory* | string |
-| `target ` | - *mandatory* | string |
-| `force ` | Force will force the removal of the PGDGroup even if the target PGDGroup nodes are not parted | bool |
-
-
-
-## PGDGroupCleanupStatus
-
-PGDGroupCleanupStatus defines the observed state of PGDGroupCleanup
-
-| Name | Description | Type |
-| ------- | ----------- | ------------------------------ |
-| `phase` | | resources.OperatorPhaseCleanup |
-
-
-
-## PGDGroupList
-
-PGDGroupList contains a list of PGDGroup
-
-| Name | Description | Type |
-| ---------- | ------------- | -------------------------------------------------------------------------------------------------------- |
-| `metadata` | | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta) |
-| `items ` | - *mandatory* | [\[\]PGDGroup](#PGDGroup) |
-
-
-
-## PGDGroupSpec
-
-PGDGroupSpec defines the desired state of PGDGroup
-
-| Name | Description | Type |
-| ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ |
-| `imageName ` | Name of the container image, supporting both tags (`:`) and digests for deterministic and repeatable deployments (`:@sha256:`) | string |
-| `imagePullPolicy ` | Image pull policy. One of `Always`, `Never` or `IfNotPresent`. If not defined, it defaults to `IfNotPresent`. Cannot be updated. More info: | corev1.PullPolicy |
-| `imagePullSecrets ` | The list of pull secrets to be used to pull operator and or the operand images | [\[\]corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#localobjectreference-v1-core) |
-| `inheritedMetadata ` | Metadata that will be inherited by all objects related to the pgdGroup | [\*InheritedMetadata](#InheritedMetadata) |
-| `instances ` | Number of instances required in the cluster - *mandatory* | int32 |
-| `proxyInstances ` | Number of proxy instances required in the cluster | int32 |
-| `witnessInstances ` | Number of witness instances required in the cluster | int32 |
-| `backup ` | The configuration to be used for backups in the CNP instances. | [\*Backup](#Backup) |
-| `restore ` | The configuration to restore this PGD group from an Object Store service | [\*Restore](#Restore) |
-| `cnp ` | Instances configuration that will be injected into the CNP clusters that compose the PGD Group - *mandatory* | [CnpConfiguration](#CnpConfiguration) |
-| `witness ` | WitnessInstances configuration that will be injected into the WitnessInstances CNP clusters If not defined, it will default to the Instances configuration | [\*CnpBaseConfiguration](#CnpBaseConfiguration) |
-| `pgd ` | Pgd contains instructions to bootstrap this cluster - *mandatory* | [PgdConfiguration](#PgdConfiguration) |
-| `pgdProxy ` | PGDProxy contains instructions to configure PGD Proxy | [PGDProxyConfiguration](#PGDProxyConfiguration) |
-| `connectivity ` | Configures the connectivity of the PGDGroup, like services and certificates that will be used. - *mandatory* | [ConnectivityConfiguration](#ConnectivityConfiguration) |
-| `failingFinalizerTimeLimitSeconds` | The amount of seconds that the operator will wait in case of a failing finalizer. A finalizer is considered failing when the operator cannot reach any nodes of the PGDGroup | int32 |
-
-
-
-## PGDGroupStatus
-
-PGDGroupStatus defines the observed state of PGDGroup
-
-| Name | Description | Type |
-| ------------------------ | ------------------------------------------------------------------------------ | ----------------------------------------- |
-| `latestGeneratedNode ` | ID of the latest generated node (used to avoid node name clashing) | int32 |
-| `phase ` | The initialization phase of this cluster | resources.OperatorPhase |
-| `phaseDetails ` | The details of the current phase | string |
-| `phaseTroubleshootHints` | PhaseTroubleshootHints general troubleshooting indications for the given phase | string |
-| `phaseType ` | PhaseType describes the phase category. | resources.PhaseType |
-| `nodes ` | The list of summaries for the nodes in the group | [\[\]NodeSummary](#NodeSummary) |
-| `backup ` | The node that is taking backups of this PGDGroup | [BackupStatus](#BackupStatus) |
-| `restore ` | The status of the restore process | [RestoreStatus](#RestoreStatus) |
-| `PGD ` | Last known status of PGD | [PGDStatus](#PGDStatus) |
-| `CNP ` | Last known status of CNP | [CNPStatus](#CNPStatus) |
-| `PGDProxy ` | Last known status of PGDProxy | [PGDProxyStatus](#PGDProxyStatus) |
-| `connectivity ` | Last known status of Connectivity | [ConnectivityStatus](#ConnectivityStatus) |
-
-
-
-## PGDNodeGroupEntry
-
-PGDNodeGroupEntry shows information about the node groups available in the PGD configuration
-
-| Name | Description | Type |
-| ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ |
-| `name ` | Name is the name of the node group - *mandatory* | string |
-| `enableProxyRouting ` | EnableProxyRouting is true is the node group allows running PGD Proxies | bool |
-| `enableRaft ` | EnableRaft is true if the node group has a subgroup raft instance | bool |
-| `routeWriterMaxLag ` | RouteWriterMaxLag Maximum lag in bytes of the new write candidate to be | |
-
- selected as write leader, if no candidate passes this, there will be no writer
- selected automatically | int64
-`routeReaderMaxLag ` | RouteReaderMaxLag Maximum lag in bytes for node to be considered viable
- read-only node | int64
-`routeWriterWaitFlush` | RouteWriterWaitFlush Whether to wait for replication queue flush before
- switching to new leader when using `bdr.routing_leadership_transfer()` | bool
-
-
-
-## PGDNodeGroupSettings
-
-PGDNodeGroupSettings contains the settings of the PGD Group
-
-| Name | Description | Type |
-| ---------------------- | ----------------------------------------------------------------------- | ---- |
-| `routeWriterMaxLag ` | RouteWriterMaxLag Maximum lag in bytes of the new write candidate to be | |
-
- selected as write leader, if no candidate passes this, there will be no writer
- selected automatically
-Defaults to -1 | int64
-`routeReaderMaxLag ` | RouteReaderMaxLag Maximum lag in bytes for node to be considered viable
- read-only node
-Defaults to -1 | int64
-`routeWriterWaitFlush` | RouteWriterWaitFlush Whether to wait for replication queue flush before
- switching to new leader when using `bdr.routing_leadership_transfer()`
-Defaults to false | bool
-
-
-
-## PGDProxyConfiguration
-
-PGDProxyConfiguration defines the configuration of PGD Proxy
-
-| Name | Description | Type |
-| ------------------- | ----------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
-| `imageName ` | Name of the PGDProxy container image | string |
-| `logLevel ` | The PGD Proxy log level, one of the following values: error, warning, info (default), debug, trace | string |
-| `logEncoder ` | The format of the log output | string |
-| `proxyAffinity ` | ProxyAffinity/Anti-affinity rules for pods | \*corev1.Affinity |
-| `proxyNodeSelector` | ProxyNodeSelector rules for pods | map[string]string |
-| `proxyTolerations ` | ProxyTolerations rules for pods | \[]corev1.Toleration |
-| `proxyResources ` | Defines the resources assigned to the proxy. If not defined uses defaults requests and limits values. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core) |
-
-
-
-## PGDProxyEntry
-
-PGDProxyEntry shows information about the proxies available in the PGD configuration
-
-| Name | Description | Type |
-| ---------------------- | ---------------------------------------------------------------------------------------------------------------- | --------- |
-| `name ` | Name is the name of the proxy - *mandatory* | string |
-| `fallbackGroupNames ` | FallbackGroupNames are the names of the fallback groups configured for this proxy | \[]string |
-| `parentGroupName ` | ParentGroupName is the parent PGD group of this proxy | string |
-| `maxClientConn ` | MaxClientConn maximum number of connections the proxy will accept | int |
-| `maxServerConn ` | MaxServerConn maximum number of connections the proxy will make to the Postgres node | int |
-| `serverConnTimeout ` | ServerConnTimeout connection timeout for server connections in seconds | int64 |
-| `serverConnKeepalive ` | ServerConnKeepalive keepalive interval for server connections in seconds | int64 |
-| `fallbackGroupTimeout` | FallbackGroupTimeout the interval after which the routing falls back to one of the fallback_groups | int64 |
-
-
-
-## PGDProxySettings
-
-PGDProxySettings contains the settings of the proxy
-
-| Name | Description | Type |
-| ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | --------- |
-| `fallbackGroups ` | FallbackGroups is the list of groups the proxy should forward connection to when all the data nodes of this PGD group are not available | \[]string |
-| `maxClientConn ` | MaxClientConn maximum number of connections the proxy will accept. Defaults to 32767 | int |
-| `maxServerConn ` | MaxServerConn maximum number of connections the proxy will make to the Postgres node. Defaults to 32767 | int |
-| `serverConnTimeout ` | ServerConnTimeout connection timeout for server connections in seconds. Defaults to 2 | int64 |
-| `serverConnKeepalive ` | ServerConnKeepalive keepalive interval for server connections in seconds. Defaults to 10 | int64 |
-| `fallbackGroupTimeout` | FallbackGroupTimeout the interval after which the routing falls back to one of the fallback_groups. Defaults to 60 | int64 |
-
-
-
-## PGDProxyStatus
-
-PGDProxyStatus any relevant status for the operator about PGDProxy
-
-| Name | Description | Type |
-| ---------------- | ---------------------------------------------------------------------------------------------------------------------- | ------ |
-| `proxyInstances` | | int32 |
-| `writeLead ` | WriteLead is a reserved field for the operator, is not intended for external usage. Will be removed in future versions | string |
-| `proxyHash ` | ProxyHash contains the hash we use to detect if we need to reconcile the proxies | string |
-
-
-
-## PGDStatus
-
-PGDStatus any relevant status for the operator about PGD
-
-| Name | Description | Type |
-| ----------------------------------- | ------------------------------------------------------------------------------------------------- | --------------------------------------- |
-| `raftConsensusLastChangedStatus ` | RaftConsensusLastChangedStatus indicates the latest reported status from bdr.monitor_group_raft | resources.PGDRaftStatus |
-| `raftConsensusLastChangedMessage ` | RaftConsensusLastChangedMessage indicates the latest reported message from bdr.monitor_group_raft | string |
-| `raftConsensusLastChangedTimestamp` | RaftConsensusLastChangedTimestamp indicates when the status and message were first reported | string |
-| `registeredProxies ` | RegisteredProxies is the status of the registered proxies | [\[\]PGDProxyEntry](#PGDProxyEntry) |
-| `nodeGroup ` | NodeGroup is the status of the node group associated with the PGDGroup | [PGDNodeGroupEntry](#PGDNodeGroupEntry) |
-
-
-
-## ParentGroupConfiguration
-
-ParentGroupConfiguration contains the topology configuration of PGD
-
-| Name | Description | Type |
-| -------- | ------------------------------------------------------------------------------------------------- | ------ |
-| `name ` | Name of the parent group - *mandatory* | string |
-| `create` | Create is true when the operator should create the parent group if it doesn't exist | bool |
-
-
-
-## PgdConfiguration
-
-PgdConfiguration is the configuration of the PGD group structure
-
-| Name | Description | Type |
-| ------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- |
-| `parentGroup ` | ParentGroup configures the topology of the PGD group - *mandatory* | [ParentGroupConfiguration](#ParentGroupConfiguration) |
-| `discovery ` | The parameters we will use to connect to a node belonging to the parent PGD group. Even if provided, the following parameters will be overridden with default values: `application_name`, `sslmode`, `dbname` and `user`. The following parameters should not be provided nor used, as they are not even overridden with defaults:`sslkey`, `sslcert`, `sslrootcert` | \[]ConnectionString |
-| `discoveryJob ` | DiscoveryJob the configuration of the PGD Discovery job | [DiscoveryJobConfig](#DiscoveryJobConfig) |
-| `databaseName ` | Name of the database used by the application. Default: `app`. | string |
-| `ownerName ` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. | string |
-| `ownerCredentialsSecret` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | \*cnpv1.LocalObjectReference |
-| `proxySettings ` | Configuration for the proxy | [PGDProxySettings](#PGDProxySettings) |
-| `nodeGroupSettings ` | Configuration for the PGD Group | [\*PGDNodeGroupSettings](#PGDNodeGroupSettings) |
-| `globalRouting ` | GlobalRouting is true when global routing is enabled, and in this case the proxies will be created in the parent group | bool |
-| `mutations ` | List of SQL mutations to apply to the node group | SQLMutations |
-
-
-
-## PreProvisionedCertificate
-
-PreProvisionedCertificate contains the data needed to supply a pre-generated certificate
-
-| Name | Description | Type |
-| ----------- | ------------------------------------------------------------------------- | ------ |
-| `secretRef` | SecretRef a name pointing to a secret that contains a tls.crt and tls.key | string |
-
-
-
-## ReplicationCertificateStatus
-
-ReplicationCertificateStatus encapsulate the certificate status
-
-| Name | Description | Type |
-| ---------------- | --------------------------------------------------------------------- | ------ |
-| `name ` | Name is the name of the certificate | string |
-| `hash ` | Hash is the hash of the configuration for which it has been generated | string |
-| `isReady ` | Ready is true when the certificate is ready | bool |
-| `preProvisioned` | PreProvisioned is true if the certificate is preProvisioned | bool |
-
-
-
-## Restore
-
-Restore configures the restore of a PGD group from an object store
-
-| Name | Description | Type |
-| ------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- |
-| `barmanObjectStore` | The configuration for the barman-cloud tool suite | \*cnpv1.BarmanObjectStoreConfiguration |
-| `recoveryTarget ` | By default, the recovery process applies all the available WAL files in the archive (full recovery). However, you can also end the recovery as soon as a consistent state is reached or recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). More info: | \*cnpv1.RecoveryTarget |
-| `serverNames ` | The list of server names to be used as a recovery origin. One of these servers will be elected as the seeding one when evaluating the recovery target - *mandatory* | \[]string |
-
-
-
-## RestoreStatus
-
-RestoreStatus contains the current status of the restore process
-
-| Name | Description | Type |
-| ------------ | --------------------------------------------------- | ------ |
-| `serverName` | The name of the server to be restored - *mandatory* | string |
-
-
-
-## RootDNSConfiguration
-
-RootDNSConfiguration describes how the FQDN for the resources should be generated
-
-| Name | Description | Type |
-| ------------ | ---------------------------------------------------------------------- | ----------------------------------------- |
-| `additional` | AdditionalDNSConfigurations adds more possible FQDNs for the resources | [\[\]DNSConfiguration](#DNSConfiguration) |
-
-
-
-## SQLMutation
-
-SQLMutation is a series of SQL statements to apply atomically
-
-| Name | Description | Type |
-| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
-| `isApplied` | List of boolean-returning SQL queries. If any of them returns false the mutation will be applied - *mandatory* | \[]string |
-| `exec ` | List of SQL queries to be executed to apply this mutation - *mandatory* | \[]string |
-| `type ` | Type determines when the SQLMutation occurs. 'always': reconcile the mutation at each reconciliation cycle 'beforeSubgroupRaft': are executed only before the subgroupRaft is enabled If not specified, the Type defaults to 'always'. - *mandatory* | SQLMutationType |
-
-
-
-## ServerCertConfiguration
-
-ServerCertConfiguration contains the information to generate the certificates for the nodes
-
-| Name | Description | Type |
-| -------------- | ------------------------------------------------------------------------------------------------------ | ------------------------------------------- |
-| `caCertSecret` | CACertSecret is the secret of the CA to be injected into the CloudNativePG configuration - *mandatory* | string |
-| `certManager ` | The cert-manager template used to generate the certificates - *mandatory* | [CertManagerTemplate](#CertManagerTemplate) |
-
-
-
-## ServiceTemplate
-
-ServiceTemplate is a structure that allows the user to set a template for the Service generation.
-
-| Name | Description | Type |
-| ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
-| `metadata ` | Standard object's metadata. More info: | [Metadata](#Metadata) |
-| `spec ` | Specification of the desired behavior of the service. More info: | corev1.ServiceSpec |
-| `updateStrategy` | UpdateStrategy indicates how to update the services generated by this template. | \*ServiceUpdateStrategy |
-
-
-
-## TLSConfiguration
-
-TLSConfiguration is the configuration of the TLS infrastructure used by PGD to connect to the nodes
-
-| Name | Description | Type |
-| ------------ | ----------------------------------------------------------- | --------------------------------------------------- |
-| `mode ` | - *mandatory* | TLSMode |
-| `serverCert` | The configuration for the server certificates - *mandatory* | [ServerCertConfiguration](#ServerCertConfiguration) |
-| `clientCert` | The configuration for the client certificates - *mandatory* | [ClientCertConfiguration](#ClientCertConfiguration) |
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
index afb3bb0f2c0..e8ac4330e00 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
@@ -3,147 +3,150 @@ title: 'Architecture'
originalFilePath: 'src/architecture.md'
---
-Consider these main architectural aspects
-when deploying EDB Postgres Distributed in Kubernetes.
+This section covers the main architectural aspects you need to consider
+when deploying EDB Postgres Distributed in Kubernetes (PG4K-PGD).
-EDB Postgres Distributed for Kubernetes is a
+PG4K-PGD is a
[Kubernetes operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
designed to deploy and manage EDB Postgres Distributed clusters
running in private, public, hybrid, or multi-cloud environments.
## Relationship with EDB Postgres Distributed
-[EDB Postgres Distributed (PGD)](/pgd/latest/)
-is a multi-master implementation of Postgres designed for high performance and
+[EDB Postgres Distributed (PGD)](https://www.enterprisedb.com/docs/pgd/latest/)
+is a multi-master implementation of Postgres designed for high performance and
availability.
PGD generally requires deployment using
-[Trusted Postgres Architect (TPA)](/pgd/latest/tpa/),
-a tool that uses [Ansible](https://www.ansible.com) to provision and
-deploy PGD clusters.
+[*Trusted Postgres Architect*, (TPA)](https://www.enterprisedb.com/docs/pgd/latest/tpa/),
+a tool that uses [Ansible](https://www.ansible.com) for provisioning and
+deployment of PGD clusters.
-EDB Postgres Distributed for Kubernetes offers a different way of deploying PGD clusters, leveraging containers
-and Kubernetes. The advantages are that the resulting architecture:
-
-- Is self-healing and robust.
-- Is managed through declarative configuration.
-- Takes advantage of the vast and growing Kubernetes ecosystem.
+PG4K-PGD offers a different way of deploying PGD clusters, leveraging containers
+and Kubernetes, with the added advantages that the resulting architecture is
+self-healing and robust, managed through declarative configuration, and that it
+takes advantage of the vast and growing Kubernetes ecosystem.
## Relationship with EDB Postgres for Kubernetes
-A PGD cluster consists of one or more *PGD groups*, each having one or more *PGD
-nodes*. A PGD node is a Postgres database. EDB Postgres Distributed for Kubernetes internally
+A PGD cluster consists of one or more *PGD Groups*, each having one or more *PGD
+Nodes*. A PGD node is a Postgres database. PG4K-PGD internally
manages each PGD node using the `Cluster` resource as defined by EDB Postgres
-for Kubernetes, specifically a cluster with a single instance (that is, no
+for Kubernetes (PG4K), specifically a `Cluster` with a single instance (i.e. no
replicas).
-You can configure the single PostgreSQL instance created by each cluster
-declaratively using the
-[`.spec.cnp` section](api_reference.md#CnpConfiguration)
-of the PGD group spec.
+The single PostgreSQL instance created by each `Cluster` can be configured
+declaratively via the
+[`.spec.cnp` section](pg4k-pgd.v1beta1.md#pgd-k8s-enterprisedb-io-v1beta1-CnpConfiguration)
+of the PGD Group spec.
-In EDB Postgres Distributed for Kubernetes, as in EDB Postgres for Kubernetes, the underlying database implementation is responsible
-for data replication. However, it's important to note that failover and
-switchover work differently, entailing Raft election and nominating new
-write leaders. EDB Postgres for Kubernetes handles only the deployment and healing of data nodes.
+In PG4K-PGD, as in PG4K, the underlying database implementation is responsible
+for data replication. However, it is important to note that *failover* and
+*switchover* work differently, entailing Raft election and the nomination of new
+write leaders. PG4K only handles the deployment and healing of data nodes.
-## Managing PGD using EDB Postgres Distributed for Kubernetes
+## Managing PGD using PG4K-PGD
-The EDB Postgres Distributed for Kubernetes operator can manage the complete lifecycle of PGD clusters. As
-such, in addition to PGD nodes (represented as single-instance clusters), it
+The PG4K-PGD operator can manage the complete lifecycle of PGD clusters. As
+such, in addition to PGD Nodes (represented as single-instance `Clusters`), it
needs to manage other objects associated with PGD.
PGD relies on the Raft algorithm for distributed consensus to manage node
-metadata, specifically agreement on a write leader. Consensus among data
+metadata, specifically agreement on a *write leader*. Consensus among data
nodes is also required for operations such as generating new global sequences
or performing distributed DDL.
These considerations force additional actors in PGD above database nodes.
-EDB Postgres Distributed for Kubernetes manages the following:
+PG4K-PGD manages the following:
-- Data nodes. A node is a database and is managed
- by EDB Postgres for Kubernetes, creating a cluster with a single instance.
+- Data nodes: as mentioned previously, a node is a database, and is managed
+ via PG4K, creating a `Cluster` with a single instance.
- [Witness nodes](https://www.enterprisedb.com/docs/pgd/latest/nodes/#witness-nodes)
- are basic database instances that don't participate in data
- replication. Their function is to guarantee that consensus is possible in
- groups with an even number of data nodes or after network partitions. Witness
+ are basic database instances that do not participate in data
+ replication; their function is to guarantee that consensus is possible in
+ groups with an even number of data nodes, or after network partitions. Witness
nodes are also managed using a single-instance `Cluster` resource.
-- [PGD proxies](https://www.enterprisedb.com/docs/pgd/latest/routing/proxy/)
+- [PGD Proxies](https://www.enterprisedb.com/docs/pgd/latest/routing/proxy/):
act as Postgres proxies with knowledge of the write leader. PGD proxies need
information from Raft to route writes to the current write leader.
### Proxies and routing
PGD groups assume full mesh connectivity of PGD nodes. Each node must be able to
-connect to every other node using the appropriate connection string (a
-`libpq`-style DSN). Write operations don't need to be sent to every node. PGD
-takes care of replicating data after it's committed to one node.
-
-For performance, we often recommend sending write operations mostly to a
-single node: the write leader. Raft identifies the node that's the
-write leader and holds metadata about the PGD nodes. PGD proxies
-transparently route writes to write leaders and can quickly pivot to the new
+connect to every other node, using the appropriate connection string (a
+`libpq`-style DSN). Write operations don't need to be sent to every node. PGD
+will take care of replicating data after it's committed to one node.
+
+For performance, it is often recommendable to send write operations mostly to a
+single node, the *write leader*. Raft is used to identify which node is the
+write leader, and to hold metadata about the PGD nodes. PGD Proxies are used to
+transparently route writes to write leaders, and to quickly pivot to the new
write leader in case of switchover or failover.
-It's possible to configure *Raft subgroups*, each of which can maintain a
-separate write leader. In EDB Postgres Distributed for Kubernetes, a PGD group containing a PGD proxy
-comprises a Raft subgroup.
+It is possible to configure *Raft subgroups*, each of which can maintain a
+separate write leader. In PG4K-PGD, a PGD Group containing a PGD Proxy
+automatically comprises a Raft subgroup.
-Two kinds of routing are available with PGD proxies:
+There are two kinds of routing available with PGD Proxies:
-- Global routing uses the top-level Raft group and maintains one global write
+- Global routing uses the top-level Raft group, and maintains one global write
leader.
- Local routing uses subgroups to maintain separate write leaders. Local
routing is often used to achieve geographical separation of writes.
-In EDB Postgres Distributed for Kubernetes, local routing is used by default, and a configuration option is
+In PG4K-PGD, local routing is used by default, and a configuration option is
available to select global routing.
-For more information, see
-[Proxies, Raft, and Raft subgroups](/pgd/latest/routing/raft/) in the PGD documentation.
+You can find more information in the
+[PGD documentation of routing with Raft](https://www.enterprisedb.com/docs/pgd/latest/routing/raft/).
-### PGD architectures and high availability
+### PGD Architectures and High Availability
-To make good use of PGD's
-distributed multi-master capabilities and to offer high availability,
-we recommend several architectures.
+EDB proposes several recommended architectures to make good use of PGD's
+distributed multi-master capabilities and to offer high availability.
The Always On architectures are built from either one group in a single location
or two groups in two separate locations.
-See [Choosing your architecture](/pgd/latest/architectures/) in the PGD documentation
-for more information.
+Please refer to the
+[PGD architecture document](https://www.enterprisedb.com/docs/pgd/latest/architectures/)
+for further information.
## Deploying PGD on Kubernetes
-EDB Postgres Distributed for Kubernetes leverages Kubernetes to deploy and manage PGD clusters. As such, some
+PG4K-PGD leverages Kubernetes to deploy and manage PGD clusters. As such, some
adaptations are necessary to translate PGD into the Kubernetes ecosystem.
### Images and operands
-PGD can be configured to run one of three Postgres distributions. See
-[Choosing a Postgres distribution](/pgd/latest/choosing_server/)
-in the PGD documentation to understand the features of each distribution.
+PGD can be configured to run one of three Postgres distributions. Please refer
+to the
+[PGD documentation](https://www.enterprisedb.com/docs/pgd/latest/choosing_server/)
+to understand the features of each distribution.
To function in Kubernetes, containers are provided for each Postgres
distribution. These are the *operands*.
In addition, the operator images are kept in those same repositories.
-See [EDB private image registries](private_registries.md)
+Please refer to [the document on registries](private_registries.md)
for details on accessing the images.
### Kubernetes architecture
+We reproduce some of the points of the
+[PG4K document on Kubernetes architecture](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/architecture/),
+to which we refer you for further depth.
+
Kubernetes natively provides the possibility to span separate physical locations
-connected to each other by way of redundant, low-latency, private network
-connectivity. These physical locations are also known as data centers, failure zones, or,
-more frequently, *availability zones*.
+–also known as data centers, failure zones, or more frequently **availability
+zones**– connected to each other via redundant, low-latency, private network
+connectivity.
Being a distributed system, the recommended minimum number of availability zones
-for a Kubernetes cluster is three to make the control plane
+for a **Kubernetes cluster** is three (3), in order to make the control plane
resilient to the failure of a single zone. This means that each data center is
active at any time and can run workloads simultaneously.
-EDB Postgres Distributed for Kubernetes can be installed in a
+PG4K-PGD can be installed within a
[single Kubernetes cluster](#single-kubernetes-cluster)
or across
[multiple Kubernetes clusters](#multiple-kubernetes-clusters).
@@ -151,31 +154,31 @@ or across
### Single Kubernetes cluster
A multi-availability-zone Kubernetes architecture is typical of Kubernetes
-services managed by cloud providers. Such an architecture enables the EDB Postgres Distributed for Kubernetes
-and the EDB Postgres for Kubernetes operators to schedule workloads and nodes across availability
-zones, considering all zones active.
+services managed by Cloud Providers. Such an architecture enables the PG4K-PGD
+and the PG4K operators to schedule workloads and nodes across availability
+zones, considering all zones active:
![Kubernetes cluster spanning over 3 independent data centers](./images/k8s-architecture-3-az.png)
PGD clusters can be deployed in a single Kubernetes cluster and take advantage
-of Kubernetes availability zones to enable high-availability architectures,
+of Kubernetes availability zones to enable High Availability architectures,
including the Always On recommended architectures.
-You can realize the Always On, single-location architecture shown in
-[Choosing your architecture](/pgd/latest/architectures/) in the PGD documentation on
-a single Kubernetes cluster with three availability zones.
-
+The *Always On Single Location* architecture shown in the
+[PGD Architecture document](https://www.enterprisedb.com/docs/pgd/latest/architectures/):
![Always On Single Region](./images/always_on_1x3_updated.png)
-The EDB Postgres Distributed for Kubernetes operator can control the scheduling of pods (that is, which pods go
-to which data center) using affinity, tolerations, and node selectors, as is the
-case with EDB Postgres for Kubernetes. Individual scheduling controls are available for proxies as well
+can be realized on single kubernetes cluster with 3 availability zones.
+
+The PG4K-PGD operator can control the *scheduling* of pods (i.e. which pods go
+to which data center) using affinity, tolerations and node selectors, as is the
+case with PG4K. Individual scheduling controls are available for proxies as well
as nodes.
-See the
+Please refer to the
[Kubernetes documentation on scheduling](https://kubernetes.io/docs/concepts/scheduling-eviction/),
-and [Scheduling](/postgres_for_kubernetes/latest/scheduling/) in the EDB Postgres for Kubernetes documentation
-for more information.
+as well as the [PG4K documents](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/scheduling/)
+for further information.
### Multiple Kubernetes clusters
@@ -184,7 +187,7 @@ reliably communicate with each other.
![Multiple Kubernetes clusters](./images/k8s-architecture-multi.png)
-[Always On multi-location PGD architectures](/pgd/latest/architectures/)
+[Always On multi-location PGD architectures](https://www.enterprisedb.com/docs/pgd/latest/architectures/)
can be realized on multiple Kubernetes clusters that meet the connectivity
requirements.
-For more information, see [Connectivity](connectivity.md).
\ No newline at end of file
+More information can be found in the ["Connectivity"](connectivity.md) section.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
index 45e2ab42101..add53edb914 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
@@ -3,34 +3,34 @@ title: 'Backup on object stores'
originalFilePath: 'src/backup.md'
---
-EDB Postgres Distributed for Kubernetes supports *online/hot backup* of
+EDB Postgres Distributed for Kubernetes (PG4K-PGD) supports *online/hot backup* of
PGD clusters through physical backup and WAL archiving on an object store.
This means that the database is always up (no downtime required) and that
-point-in-time recovery (PITR) is available.
+Point In Time Recovery is available.
## Common object stores
-Multiple object stores are supported, such as AWS S3, Microsoft Azure Blob Storage,
-Google Cloud Storage, MinIO Gateway, or any S3-compatible provider.
-Given that EDB Postgres Distributed for Kubernetes configures the connection with object stores by relying on
-EDB Postgres for Kubernetes, see the [EDB Postgres for Kubernetes cloud provider support](/postgres_for_kubernetes/latest/backup_recovery/#cloud-provider-support)
-documentation for more information.
+Multiple object store are supported, such as `AWS S3`, `Microsoft Azure Blob Storage`,
+`Google Cloud Storage`, `MinIO Gateway`, or any S3 compatible provider.
+Given that PG4K-PGD configures the connection with object stores by relying on
+EDB Postgres for Kubernetes (PG4K), please refer to the [PG4K Cloud provider support](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#cloud-provider-support)
+documentation for additional depth.
!!! Important
- The EDB Postgres for Kubernetes documentation's Cloud Provider configuration section is
- available at `spec.backup.barmanObjectStore`. In EDB Postgres Distributed for Kubernetes examples, the object store section is at a
+ In the PG4K documentation you'll find the Cloud Provider configuration section
+ available at `spec.backup.barmanObjectStore`. Note that in PG4K-PGD examples, the object store section is found at a
different path: `spec.backup.configuration.barmanObjectStore`.
## WAL archive
-WAL archiving is the process that sends WAL files to the object storage, and it's essential to
-execute online/hot backups or PITR.
-In EDB Postgres Distributed for Kubernetes, each PGD node is set up to archive WAL files in the object store independently.
+WAL archiving is the process that sends `WAL files` to the object storage, and it's essential to
+execute *online/hot backups*, or Point in Time recovery (PITR).
+In PG4K-PGD, each PGD Node will be set up to archive WAL files in the object store independently.
-The WAL archive is defined in the PGD group `spec.backup.configuration.barmanObjectStore` stanza
+The WAL archive is defined in the PGDGroup `spec.backup.configuration.barmanObjectStore` stanza,
and is enabled as soon as a destination path and cloud credentials are set.
-You can choose to compress WAL files before uploading them and also or alternatively encrypt them.
-In adddition, you can enable parallel WAL archiving.
+You can choose to compress WAL files before they are uploaded, and/or encrypt them.
+Parallel WAL archiving can also be enabled.
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -47,16 +47,16 @@ spec:
maxParallel: 8
```
-For more information, see the [EDB Postgres for Kubernetes WAL archiving](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#wal-archiving) documentation.
+For further information, refer to the [PG4K WAL archiving](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#wal-archiving) documentation.
## Scheduled backups
-Scheduled backups are the recommended way to configure your backup strategy in EDB Postgres Distributed for Kubernetes.
-When the PGD group `spec.backup.configuration.barmanObjectStore` stanza is configured, the operator selects one of the
-PGD data nodes as the elected backup node for which it creates a `Scheduled Backup` resource.
+Scheduled backups are the recommended way to configure your backup strategy in PG4K-PGD.
+When the PGDGroup `spec.backup.configuration.barmanObjectStore` stanza is configured, the operator will select one of the
+PGD data nodes as the elected "Backup Node", for which it will automatically create a `Scheduled Backup` resource.
The `.spec.backup.cron.schedule` field allows you to define a cron schedule specification, expressed
-in the [Go `cron` package format](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format).
+in the [https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format]\(Go `cron` package format).
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -71,35 +71,35 @@ spec:
immediate: true
```
-You can suspend scheduled backups by setting `.spec.backup.cron.suspend` to `true`. This setting
-prevents any new backup from being scheduled.
+Scheduled Backups can be suspended if necessary by setting `.spec.backup.cron.suspend` to true. This will
+prevent any new backup from being scheduled while the option is set to true.
-If you want to execute a backup as soon as the `ScheduledBackup` resource is created,
-you can set `.spec.backup.cron.immediate` to `true`.
+In case you want to execute a backup as soon as the ScheduledBackup resource is created
+you can set `.spec.backup.cron.immediate` to true.
-`.spec.backupOwnerReference` indicates the `ownerReference` to use
+`.spec.backupOwnerReference` indicates which ownerReference should be used
in the created backup resources. The choices are:
-- `none` — No owner reference for created backup objects.
-- `self` — Sets the scheduled backup object as owner of the backup.
-- `cluster` — Sets the cluster as owner of the backup.
+- *none:* no owner reference for created backup objects
+- *self:* sets the Scheduled backup object as owner of the backup
+- *cluster:* sets the cluster as owner of the backup
!!! Note
- The EDB Postgres for Kubernetes `ScheduledBackup` object contains the `cluster` option to specify the
- cluster to back up. This option is currently not supported by EDB Postgres Distributed for Kubernetes and is
+ The `PG4K` ScheduledBackup object contains an additional option named `cluster` to specify the
+ Cluster to be backed up. This option is currently not supported by `PG4K-PGD`, and will be
ignored if specified.
-If an elected backup node is deleted, the operator transparently elects a new backup node
-and reconciles the `Scheduled Backup` resource accordingly.
+In case an elected "Backup node" is deleted, the operator will transparently elect a new "Backup Node"
+and reconcile the Scheduled Backup resource accordingly.
## Retention policies
-EDB Postgres Distributed for Kubernetes can manage the automated deletion of backup files from the backup
-object store using retention policies based on the recovery window.
-This process also takes care of removing unused WAL files and WALs associated with backups
+PG4K-PGD can manage the automated deletion of backup files from the backup
+object store, using **retention policies** based on the recovery window.
+This process will also take care of removing unused WAL files and WALs associated with backups
that are scheduled for deletion.
-You can define your backups with a retention policy of 30 days:
+You can define your backups with a retention policy of 30 days as follows:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -111,34 +111,34 @@ spec:
retentionPolicy: "30d"
```
-For more information, see the [EDB Postgres for Kubernetes retention policies](/postgres_for_kubernetes/latest/backup_recovery/#retention-policies) in the EDB Postgres for Kubernetes documentation.
+For further information, refer to the [PG4K Retention policies](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#retention-policies) documentation.
!!! Important
- Currently, the retention policy is applied only for the elected backup node
- backups and WAL files. Given that every other PGD node also archives its own WALs
- independently, it's your responsibility to manage the lifecycle of those WAL files,
- for example by leveraging the object storage data-retention policy.
- Also, in case you have an object storage data retention policy set up on every PGD node
+ Currently, the retention policy will only be applied for the elected "Backup Node"
+ backups and WAL files. Given that each other PGD node also archives its own WALs
+ independently, it is your responsibility to manage the lifecycle of those WAL files,
+ for example by leveraging the object storage data retention policy.
+ Also, in case you have an object storage data retention policy set up on every PGD Node
directory, make sure it's not overlapping or interfering with the retention policy managed
by the operator.
## Compression algorithms
Backups and WAL files are uncompressed by default. However, multiple compression algorithms are
-supported. For more information, see the [EDB Postgres for Kubernetes compression algorithms](/postgres_for_kubernetes/latest/backup_recovery/#compression-algorithms) documentation.
+supported. For more information, refer to the [PG4K Compression algorithms](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#compression-algorithms) documentation.
## Tagging of backup objects
-It's possible to specify tags as key-value pairs for the backup objects, namely base backups, WAL files, and history files.
-For more information, see the EDB Postgres for Kubernetes documentation about [tagging of backup objects](/postgres_for_kubernetes/latest/backup_recovery/#tagging-of-backup-objects).
+It's possible to specify tags as key-value pairs for the backup objects, namely base backups, WAL files and history files.
+For more information, refer to the [PG4K document on Tagging of backup objects](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#tagging-of-backup-objects).
-## On-demand backups of a PGD node
+## On-demand backups of a PGD Node
-A PGD node is represented as single-instance EDB Postgres for Kubernetes `Cluster` object.
+A PGD Node is represented as single-instance PG4K `Cluster` object.
As such, in case of need, it's possible to request an on-demand backup
-of a specific PGD node by creating a EDB Postgres for Kubernetes `Backup` resource.
-To do that, see [EDB Postgres for Kubernetes on-demand backups](/postgres_for_kubernetes/latest/backup_recovery/#on-demand-backups) in the EDB Postgres for Kubernetes documentation.
+of a specific PGD Node by creating a PG4K `Backup` resource.
+In order to do that, you can directly refer to the [PG4K On-demand backups](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#on-demand-backups) documentation.
!!! Hint
- You can retrieve the list of EDB Postgres for Kubernetes clusters that make up your PGD group
- by running: `kubectl get cluster -l k8s.pgd.enterprisedb.io/group=my-pgd-group -n my-namespace`
\ No newline at end of file
+ You can retrieve the list of PG4K Clusters that make up your PGDGroup
+ by running: `kubectl get cluster -l k8s.pgd.enterprisedb.io/group=my-pgd-group -n my-namespace`
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
index 87a07e5259a..92feac82401 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
@@ -1,16 +1,16 @@
---
-title: 'Before you start'
+title: 'Before You Start'
originalFilePath: 'src/before_you_start.md'
---
-Before you get started, review the terminology that's
+Before we get started, it is essential to go over some terminology that is
specific to Kubernetes and PGD.
## Kubernetes terminology
[Node](https://kubernetes.io/docs/concepts/architecture/nodes/)
: A *node* is a worker machine in Kubernetes, either virtual or physical, where
- all services necessary to run pods are managed by the control plane nodes.
+ all services necessary to run pods are managed by the control plane node(s).
[Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/)
: A *pod* is the smallest computing unit that can be deployed in a Kubernetes
@@ -19,30 +19,31 @@ specific to Kubernetes and PGD.
[Service](https://kubernetes.io/docs/concepts/services-networking/service/)
: A *service* is an abstraction that exposes as a network service an
- application that runs on a group of pods and standardizes important features,
- such as service discovery across applications, load balancing, and failover.
+ application that runs on a group of pods and standardizes important features
+ such as service discovery across applications, load balancing, failover, and so
+ on.
[Secret](https://kubernetes.io/docs/concepts/configuration/secret/)
-: A *secret* is an object that's designed to store small amounts of sensitive
- data such as passwords, access keys, or tokens and use them in pods.
+: A *secret* is an object that is designed to store small amounts of sensitive
+ data such as passwords, access keys, or tokens, and use them in pods.
-[Storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/)
+[Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/)
: A *storage class* allows an administrator to define the classes of storage in
a cluster, including provisioner (such as AWS EBS), reclaim policies, mount
options, volume expansion, and so on.
-[Persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
+[Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
: A *persistent volume* (PV) is a resource in a Kubernetes cluster that
- represents storage that was either manually provisioned by an
+ represents storage that has been either manually provisioned by an
administrator or dynamically provisioned by a *storage class* controller. A PV
- is associated with a pod using a *persistent volume claim*, and its lifecycle is
+ is associated with a pod using a *persistent volume claim* and its lifecycle is
independent of any pod that uses it. Normally, a PV is a network volume,
especially in the public cloud. A [*local persistent volume*
(LPV)](https://kubernetes.io/docs/concepts/storage/volumes/#local) is a
persistent volume that exists only on the particular node where the pod that
uses it is running.
-[Persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
+[Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
: A *persistent volume claim* (PVC) represents a request for storage, which
might include size, access mode, or a particular storage class. Similar to how
a pod consumes node resources, a PVC consumes the resources of a PV.
@@ -54,7 +55,7 @@ specific to Kubernetes and PGD.
projects, departments, teams, and so on.
[RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
-: *Role-based access control* (RBAC), also known as *role-based security*, is a
+: *Role Based Access Control* (RBAC), also known as *role-based security*, is a
method used in computer systems security to restrict access to the network and
resources of a system to authorized users only. Kubernetes has a native API to
control roles at the namespace and cluster level and associate them with
@@ -62,7 +63,7 @@ specific to Kubernetes and PGD.
[CRD](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
: A *custom resource definition* (CRD) is an extension of the Kubernetes API
- and allows developers to create new data types and objects, called *custom
+ and allows developers to create new data types and objects, *called custom
resources*.
[Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
@@ -74,13 +75,13 @@ specific to Kubernetes and PGD.
[`kubectl`](https://kubernetes.io/docs/reference/kubectl/overview/)
: `kubectl` is the command-line tool used to manage a Kubernetes cluster.
-EDB Postgres Distributed for Kubernetes requires a Kubernetes version supported by the community. See
-[Supported releases](https://www.enterprisedb.com/resources/platform-compatibility#pgdk8s) for details.
+EDB Postgres Distributed for Kubernetes requires a Kubernetes version supported by the community. Please refer to the
+["Supported releases"](https://www.enterprisedb.com/resources/platform-compatibility#pgdk8s) page for details.
## PGD terminology
-For more information, see
-[Terminology](https://www.enterprisedb.com/docs/pgd/latest/terminology/) in the PGD documentation.
+Please refer to the
+[PGD terminology page for further information](https://www.enterprisedb.com/docs/pgd/latest/terminology/).
[Node](https://www.enterprisedb.com/docs/pgd/latest/terminology/#node)
: A PGD database instance.
@@ -92,22 +93,22 @@ For more information, see
: A planned change in connection between the application and the active database node in a cluster, typically done for maintenance.
[Write leader](https://www.enterprisedb.com/docs/pgd/latest/terminology/#write-leader)
-: In Always On architectures, a node is selected as the correct connection endpoint for applications. This node is called the *write leader*. The write leader is selected by consensus of a quorum of proxy nodes.
+: In always-on architectures, a node is selected as the correct connection endpoint for applications. This node is called the write leader. The write leader is selected by consensus of a quorum of proxy nodes.
## Cloud terminology
Region
-: A *region* in the cloud is an isolated and independent geographic area
+: A *region* in the Cloud is an isolated and independent geographic area
organized in *availability zones*. Zones within a region have very little
round-trip network latency.
Zone
-: An *availability zone* in the cloud (also known as a *zone*) is an area in a
+: An *availability zone* in the Cloud (also known as *zone*) is an area in a
region where resources can be deployed. Usually, an availability zone
corresponds to a data center or an isolated building of the same data center.
## What to do next
-Now that you have familiarized with the terminology, you can
-[test EDB Postgres Distributed for Kubernetes on your laptop using a local cluster](quickstart.md) before
-deploying the operator in your selected cloud environment.
\ No newline at end of file
+Now that you have familiarized with the terminology, you can decide to
+[test EDB Postgres Distributed for Kubernetes (PG4K-PGD) on your laptop using a local cluster](quickstart.md) before
+deploying the operator in your selected cloud environment.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
index c0a4f8c098e..559a9399ee2 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
@@ -3,27 +3,28 @@ title: 'Certificates'
originalFilePath: 'src/certificates.md'
---
-EDB Postgres Distributed for Kubernetes was designed to natively support TLS certificates.
-To set up an PGD cluster, each PGD node requires:
+EDB Postgres Distributed for Kubernetes has been designed to natively support TLS certificates.
+In order to set up a PGD cluster, each PGD node require:
-- A server certification authority (CA) certificate
-- A server TLS certificate signed by the server CA
-- A client CA certificate
-- A streaming replication client certificate generated by the client CA
+- a server Certification Authority (CA) certificate
+- a server TLS certificate signed by the server Certification Authority
+- a client Certification Authority (CA) certificate
+- a streaming replication client certificate generated by the client Certification Authority
!!! Note
- You can find all the secrets used by each PGD node and the expiry dates in
- the cluster (PGD node) status.
+ You can find all the secrets used by each PGD Node and the expiry dates in
+ the Cluster (PGD Node) Status.
-EDB Postgres Distributed for Kubernetes is very flexible when it comes to TLS certificates. It operates
-primarily in two modes:
+EDB Postgres Distributed for Kubernetes is very flexible when it comes to TLS certificates, and
+primarily operates in two modes:
-1. **Operator managed** — Certificates are internally
- managed by the operator in a fully automated way and signed using a CA created
- by EDB Postgres Distributed for Kubernetes.
-2. **User provided** — Certificates are
+1. **operator managed**: certificates are internally
+ managed by the operator in a fully automated way, and signed using a CA created
+ by EDB Postgres Distributed for Kubernetes
+2. **user provided**: certificates are
generated outside the operator and imported in the cluster definition as
- secrets. EDB Postgres Distributed for Kubernetes integrates itself with cert-manager.
+ secrets - EDB Postgres Distributed for Kubernetes integrates itself with cert-manager (see
+ examples below)
-For more information, see
-[Certificates](/postgres_for_kubernetes/latest/certificates/) in the EDB Postgres for Kubernetes documentation.
\ No newline at end of file
+You can find further information in the
+[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/certificates/).
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
index 0090c42296c..bc2b8492c54 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
@@ -3,94 +3,96 @@ title: 'Connectivity'
originalFilePath: 'src/connectivity.md'
---
-Information about secure network communications in a
-PGD cluster includes:
+This section provides information about secure network communications within a
+PGD Cluster, covering the following topics:
-- [Services](#services)
-- [Domain names resolution](#domain-names-resolution) using fully qualified domain names (FQDN)
+- [services](#services)
+- [domain names resolution](#domain-names-resolution) using fully qualified domain names (FQDN)
- [TLS configuration](#tls-configuration)
-!!! Notice
- Although these topics might seem unrelated to each other, they all
+\!!! Notice
+ Although the above topics might seem unrelated to each other, they all
participate in the configuration of the PGD resources to make them universally
identifiable and accessible over a secure network.
## Services
-Resources in a PGD cluster are accessible through Kubernetes services.
-Every PGD group manages several of them, namely:
+Resources in a PGD Cluster are accessible through Kubernetes services.
+Every PGDGroup manages several of them, namely:
-- One service per node, used for internal communications (*node service*)
-- A *group service* to reach any node in the group, used primarily by EDB Postgres Distributed for Kubernetes
+- one service per node, used for internal communications (*node service*)
+- a *group service*, to reach any node in the group, used primarily by PG4K-PGD
to discover a new group in the cluster
-- A *proxy service* to enable applications to reach the write leader of the
- group transparently using PGD Proxy
+- a *proxy service*, to enable applications to reach the write leader of the
+ group, transparently using PGD proxy
-For an example that uses these services, see [Connecting an application to a PGD cluster](#connecting-to-a-pgd-cluster-from-an-application).
+For an example using these services, see [Connecting an application to a PGD cluster](#connecting-to-a-pgd-cluster-from-an-application).
![Basic architecture of an EDB Postgres Distributed for Kubernetes PGD group](./images/pg4k-pgd-basic-architecture.png)
Each service is generated from a customizable template in the `.spec.connectivity`
section of the manifest.
-All services must be reachable using their FQDN
-from all the PGD nodes in all the Kubernetes clusters. See [Domain names resolution](#domain-names-resolutions).
+All services must be reachable using their fully qualified domain name (FQDN)
+from all the PGD nodes in all the Kubernetes clusters (see below in this
+section).
-EDB Postgres Distributed for Kubernetes provides a service templating framework that gives you the
-availability to easily customize services at the following three levels:
+PG4K-PGD provides a service templating framework that gives you the
+availability to easily customize services at the following 3 levels:
Node Service Template
-: Each PGD node is reachable using a service that can be configured in the
+: Each PGD node is reachable using a service which can be configured in the
`.spec.connectivity.nodeServiceTemplate` section.
Group Service Template
-: Each PGD group has a group service that's a single entry point for the
+: Each PGD group has a group service that is a single entry point for the
whole group and that can be configured in the
`.spec.connectivity.groupServiceTemplate` section.
Proxy Service Template
: Each PGD group has a proxy service to reach the group write leader through
- the PGD proxy and can be configured in the `.spec.connectivity.proxyServiceTemplate`
+ the PGD proxy, and can be configured in the `.spec.connectivity.proxyServiceTemplate`
section. This is the entry-point service for the applications.
-You can use templates to create a LoadBalancer service or to add arbitrary
-annotations and labels to a service to integrate with other components
-available in the Kubernetes system (that is, to create external DNS names or tweak
+You can use templates to create a LoadBalancer service, and/or to add arbitrary
+annotations and labels to a service in order to integrate with other components
+available in the Kubernetes system (i.e. to create external DNS names or tweak
the generated load balancer).
## Domain names resolution
-EDB Postgres Distributed for Kubernetes ensures that all resources in a PGD group have a FQDN by adopting a convention that uses the PGD group name as a prefix
+PG4K-PGD ensures that all resources in a PGD Group have a fully qualified
+domain name (FQDN) by adopting a convention that uses the PGD Group name as a prefix
for all of them.
-As a result, it expects you to define the domain name of the PGD group. This
-can be done through the `.spec.connectivity.dns` section, which controls how the
-FQDN for the resources are generated with two fields:
+As a result, it expects that you define the domain name of the PGD Group. This
+can be done through the `.spec.connectivity.dns` section which controls how the
+FQDN for the resources are generated, with two fields:
-- `domain` — Domain name for all the objects in the PGD group to use (mandatory).
-- `hostSuffix` — Suffix to add to each service in the PGD group (optional).
+- `domain`: domain name to be used by all the objects in the PGD group (mandatory);
+- `hostSuffix`: suffix to be added to each service in the PGD group (optional).
-## TLS configuration
+## TLS Configuration
-
-EDB Postgres Distributed for Kubernetes requires that resources in a PGD cluster communicate over a secure
+
+PG4K-PGD requires that resources in a PGD Cluster communicate over a secure
connection. It relies on PostgreSQL's native support for [SSL connections](https://www.postgresql.org/docs/current/libpq-ssl.html)
to encrypt client/server communications using TLS protocols for increased
security.
-Currently, EDB Postgres Distributed for Kubernetes requires that [cert-manager](https://cert-manager.io/) is installed.
-Cert-manager was chosen as the tool to provision dynamic certificates
-given that it's widely recognized as the standard in a Kubernetes
+Currently, PG4K-PGD requires that [cert-manager](https://cert-manager.io/) is installed.
+Cert-manager has been chosen as the tool to provision dynamic certificates,
+given that it is widely recognized as the de facto standard in a Kubernetes
environment.
The `spec.connectivity.tls` section describes how the communication between the
-nodes happens:
+nodes should happen:
- `mode` is an enumeration describing how the server certificates are verified
during PGD group nodes communication. It accepts the following values, as
- documented in [SSL Support](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS)
- in the PostgreSQL documentation:
+ documented in ["SSL Support"](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS)
+ from the PostgreSQL documentation:
- `verify-full`
- `verify-ca`
@@ -98,59 +100,59 @@ nodes happens:
- `serverCert` defines the server certificates used by the PGD group nodes to
accept requests.
- The clients validate this certificate depending on the passed TLS mode.
- It accepts the same values as `mode`.
+ The clients validate this certificate depending on the passed TLS mode;
+ refer to the previous point for the accepted values.
-- `clientCert` defines the `streaming_replica` user certificate
- used by the nodes to authenticate each other.
+- `clientCert` defines the `streaming_replica` user certificate that will
+ be used by the nodes to authenticate each other.
-### Server TLS configuration
+### Server TLS Configuration
-The server certificate configuration is specified in the `.spec.connectivity.tls.serverCert.certManager`
-section of the `PGDGroup` custom resource.
+The server certificate configuration is specified in `.spec.connectivity.tls.serverCert.certManager`
+section of the PGDGroup custom resource.
-The following assumptions were made for this section to work:
+The following assumptions have been made for this section to work:
- An issuer `.spec.connectivity.tls.serverCert.certManager.issuerRef` is available
for the domain `.spec.connectivity.dns.domain` and any other domain used by
- `.spec.connectivity.tls.serverCert.certManager.altDnsNames`.
-- There's a secret containing the public certificate of the CA
- used by the issuer `.spec.connectivity.tls.serverCert.caCertSecret`.
+ `.spec.connectivity.tls.serverCert.certManager.altDnsNames`
+- There is a secret containing the public certificate of the CA
+ used by the issuer `.spec.connectivity.tls.serverCert.caCertSecret`
-The `.spec.connectivity.tls.serverCert.certManager` is used to create a per-node
-cert-manager certificate request.
-The resulting certificate is used by the underlying Postgres instance
+The `.spec.connectivity.tls.serverCert.certManager` is used to create a per node
+cert-manager certificate request
+The resulting certificate will be used by the underlying Postgres instance
to terminate TLS connections.
-The operator adds the following altDnsNames to the certificate:
+The operator will add the following altDnsNames to the certificate:
- `$node$hostSuffix.$domain`
- `$groupName$hostSuffix.$domain`
!!! Important
- It's your responsibility to add to `.spec.connectivity.tls.serverCert.certManager.altDnsNames`
- any name required from the underlying networking architecture,
- for example, load balancers used by the user to reach the nodes.
+ It's your responsibility to add in `.spec.connectivity.tls.serverCert.certManager.altDnsNames`
+ any name required from the underlying networking architecture
+ (e.g., load balancers used by the user to reach the nodes).
-### Client TLS configuration
+### Client TLS Configuration
The operator requires client certificates to be dynamically provisioned
-using cert-manager (the recommended approach) or pre-provisioned using secrets.
+via cert-manager (recommended approach) or pre-provisioned via secrets.
-#### Dynamic provisioning via cert-manager
+#### Dynamic provisioning via Cert-manager
-The client certificates configuration is managed by the `.spec.connectivity.tls.clientCert.certManager`
-section of the `PGDGroup` custom resource.
-The following assumptions were made for this section to work:
+The client certificates configuration is managed by `.spec.connectivity.tls.clientCert.certManager`
+section of the PGDGroup custom resource.
+The following assumptions have been made for this section to work:
- An issuer `.spec.connectivity.tls.clientCert.certManager.issuerRef` is available
- and signs a certificate with the common name `streaming_replica`.
-- There's a secret containing the public certificate of the CA
- used by the issuer `.spec.connectivity.tls.clientCert.caCertSecret`.
+ and will sign a certificate with the common name `streaming_replica`
+- There is a secret containing the public certificate of the CA
+ used by the issuer `.spec.connectivity.tls.clientCert.caCertSecret`
-The operator uses the configuration under `.spec.connectivity.tls.clientCert.certManager`
+The operator will use the configuration under `.spec.connectivity.tls.clientCert.certManager`
to create a certificate request per the `streaming_replica` Postgres user.
-The resulting certificate is used to secure communication between the nodes.
+The resulting certificate will be used to secure communication between the nodes.
#### Pre-provisioned certificates via secrets
@@ -158,65 +160,65 @@ Alternatively, you can specify a secret containing the pre-provisioned
client certificate for the streaming replication user through the
`.spec.connectivity.tls.clientCert.preProvisioned.streamingReplica.secretRef` option.
The certificate lifecycle in this case is managed entirely by a third party,
-either manually or automated, by updating the content of the secret.
+either manually or automated, by simply updating the content of the secret.
## Connecting to a PGD cluster from an application
-Connecting to a PGD group from an application running inside the same Kubernetes cluster
-or from outside the cluster is a simple procedure. In both cases, you connect to
-the proxy service of the PGD group as the `app` user. The proxy service is a LoadBalancer
-service that routes the connection to the write leader of the PGD group.
+Connecting to a PGD Group from an application running inside the same Kubernetes cluster
+or from outside the cluster is a simple procedure. In both cases, you will connect to
+the proxy service of the PGD Group as the `app` user. The proxy service is a LoadBalancer
+service that will route the connection to the write leader of the PGD Group.
### Connecting from inside the cluster
When connecting from inside the cluster, you can use the proxy service name to connect
-to the PGD group. The proxy service name is composed of the PGD group name and the optional
-host suffix defined in the `.spec.connectivity.dns` section of the `PGDGroup` custom resource.
+to the PGD Group. The proxy service name is composed of the PGD Group name and the (optional)
+host suffix defined in the `.spec.connectivity.dns` section of the PGDGroup custom resource.
-For example, if the PGD group name is `my-group`, and the host suffix is `.my-domain.com`,
-the proxy service name is `my-group.my-domain.com`.
+For example, if the PGD Group name is `my-group` and the host suffix is `.my-domain.com`,
+the proxy service name will be `my-group.my-domain.com`.
-Before connecting, you need to get the password for the app user from the app user
-secret. The naming format of the secret is `my-group-app` for a PGD group named `my-group`.
+Before connecting you will need to get the password for the app user from the app user
+secret. The naming format of the secret is `my-group-app` for a PGD Group named `my-group`.
-You can get the username and password from the secret using the following commands:
+You can get the username and password from the secret with the following commands:
```sh
kubectl get secret my-group-app -o jsonpath='{.data.username}' | base64 --decode
kubectl get secret my-group-app -o jsonpath='{.data.password}' | base64 --decode
```
-With this, you have all the pieces for a connection string to the PGD group:
+With this you now have all the pieces for a connection string to the PGD Group:
```text
postgresql://:@:5432/
```
-Or, for a `psql` invocation:
+or for a `psql` invocation:
```sh
psql -U -h
```
-Where `app-user` and `app-password` are the values you got from the secret,
+where `app-user` and `app-password` are the values you got from the secret,
and `database` is the name of the database you want to connect
-to. (The default is `app` for the app user.)
+to (the default is `app` for the app user.)
### Connecting from outside the Kubernetes cluster
When connecting from outside the Kubernetes cluster, in the general case,
-the [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) resource or a [load balancer](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) is necessary.
-Check your cloud provider or local installation for more information about their
-behavior in your environment.
+the [*Ingress*](https://kubernetes.io/docs/concepts/services-networking/ingress/) resource or a [*Load Balancer*](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) will be necessary.
+Check your cloud provider or local installation for more information about the
+behavior of them in your environment.
-Ingresses and load balancers require a pod selector to forward connection to
-the PGD proxies. When configuring them, we suggest using the following labels:
+Ingresses and Load Balancers require a Pod selector to forward connection to
+the PGD proxies. When configuring them, we suggest to use the following labels:
-- `k8s.pgd.enterprisedb.io/group` — Set the PGD group name.
-- `k8s.pgd.enterprisedb.io/workloadType` — Set to `pgd-proxy`.
+- `k8s.pgd.enterprisedb.io/group`: set the the PGD group name
+- `k8s.pgd.enterprisedb.io/workloadType`: set to `pgd-proxy`
If using Kind or other solutions for local development, the easiest way to
-access the PGD group from outside is to use port forwarding
+access the PGD Group from outside is to use port forwarding
to the proxy service. You can use the following command to forward port 5432 on your
local machine to the proxy service:
@@ -224,4 +226,4 @@ local machine to the proxy service:
kubectl port-forward svc/my-group.my-domain.com 5432:5432
```
-Where `my-group.my-domain.com` is the proxy service name from the previous example.
\ No newline at end of file
+where `my-group.my-domain.com` is the proxy service name from the previous example.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/group_cleanup.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/group_cleanup.mdx
new file mode 100644
index 00000000000..a280d91bdb9
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/group_cleanup.mdx
@@ -0,0 +1,78 @@
+---
+title: 'PGDGroup parting'
+originalFilePath: 'src/group_cleanup.md'
+---
+
+## Deletion and finalizers
+
+When deleting a PGD Group, the operator will start parting every node in the group first.
+It will connect to an active instance and part every node in the target group.
+Once a node is parted, it will not participate in replication and consensus operations.
+To make sure the node is correctly parted before being deleted, the operator uses the
+`k8s.pgd.enterprisedb.io/partNodes` finalizer. Please refer to the
+[kubernetes document on finalizers](https://kubernetes.io/docs/concepts/overview/working-with-objects/finalizers/)
+for context.
+
+!!! Note
+ If a namespace holding a PGD Group is deleted directly, we can't ensure
+ the deleting and parting sequence is carried out correctly. Before deleting
+ a namespace, it is recommended to delete all the contained PGD groups.
+
+## Time limit
+
+When parting a node, the operator needs to connect to an active instance to
+execute the `bdr.part_node` function. To avoid this operation hanging,
+a time limit for the finalizer is used; by default, it is 300 seconds.
+After the time limit expires, the finalizer will be removed, and the node
+will be deleted anyway, potentially leaving stale metadata in the global PGD catalog.
+This time limit can be configured through `spec.failingFinalizerTimeLimitSeconds`,
+which is specified in seconds.
+
+## Skip finalizer
+
+For testing purposes only, the operator also provides an annotation to skip the
+finalizer: `k8s.pgd.enterprisedb.io/noFinalizers`.
+When this annotation is added to a PGDGroup, the finalizer will be skipped when
+the PGDGroup is being deleted, and the nodes will not be parted from the PGD cluster.
+
+## PGDGroup cleanup
+
+### Cleanup parted node
+
+Once the PGDGroup is deleted, its metadata will remain in the catalog in `PARTED`
+state in the `bdr.node_summary` table. The PG4k-PGD operator
+defines a CRD named `PGDGroupCleanup` to help clean up the `PARTED` PGDGroup.
+
+In the example below, the `PGDGroupCleanup` executes locally from `region-a`,
+and will clean up all of region-b, with the pre-requisite that all the nodes must be
+ in the `PARTED` state.
+
+```yaml
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroupCleanup
+metadata:
+ name: region-b-cleanup
+spec:
+ executor: region-a
+ target: region-b
+```
+
+Please note that if the target group (`region-b` in the example) contains nodes
+not in a `PARTED` state, the Group Cleanup will stop in phase
+`PGDGroupCleanup - Target PGDGroup is not parted, waiting for it to be parted before executing PGDGroupCleanup`.
+In cases of extreme need, we can add the `force` option.
+
+!!! Warning
+ Using `force` can leave the PGD cluster in an inconsistent state. Use it only to
+ recover from failures in which you can't part the group nodes any other way.
+
+```yaml
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroupCleanup
+metadata:
+ name: region-b-cleanup
+spec:
+ force: true
+ executor: region-a
+ target: region-b
+```
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
index 59a381222af..f7af2b1cd49 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
@@ -3,7 +3,8 @@ title: EDB Postgres Distributed for Kubernetes
originalFilePath: src/index.md
indexCards: none
navigation:
- - release_notes
+ - rel_notes
+ - '!release_notes*'
- '#Getting Started'
- before_you_start
- use_cases
@@ -12,25 +13,28 @@ navigation:
- quickstart
- '#Using'
- using_pgd
- - security
- backup
- recovery
+ - security
+ - connectivity
- certificates
- ssl_connections
- - connectivity
+ - pause_resume
- private_registries
+ - labels_annotations
+ - group_cleanup
- openshift
+ - tde
- samples
- - api_reference
- - '!api_reference.md.in'
+ - pg4k-pgd.v1beta1
+ - supported_versions
directoryDefaults:
iconName: logos/KubernetesMono
- hideVersion: true
- displayBanner: Preview release v0.7.1
+
---
-EDB Postgres Distributed for Kubernetes (`pg4k-pgd`) is an
-operator designed to manage EDB Postgres Distributed (PGD) workloads on
+**EDB Postgres Distributed for Kubernetes** (`pg4k-pgd`, or PG4K-PGD) is an
+operator designed to manage **EDB Postgres Distributed** v5 workloads on
Kubernetes, with traffic routed by PGD Proxy.
The main custom resource that the operator provides is called `PGDGroup`.
@@ -40,65 +44,54 @@ Architectures can also be deployed across different Kubernetes clusters.
## Before you start
EDB Postgres Distributed for Kubernetes provides you with a way to deploy
-EDB Postgres Distributed in a Kubernetes environment. Therefore, we recommend
-reading the
-[EDB Postgres Distributed documentation](/pgd/latest/).
+EDB Postgres Distributed in a Kubernetes environment. As a result, it
+is fundamental that you have read the
+["EDB Postgres Distributed" documentation](https://www.enterprisedb.com/docs/pgd/latest/).
-To start working with EDB Postgres
-Distributed for Kubernetes, read the following in the PGD documentation:
+The following chapters are very important to start working with EDB Postgres
+Distributed for Kubernetes:
-- [Terminology](/pgd/latest/terminology/)
-- [PGD overview](/pgd/latest/overview/)
-- [Choosing your architecture](/pgd/latest/architectures/)
-- [Choosing a Postgres distribution](/pgd/latest/choosing_server/)
+- [Terminology](https://www.enterprisedb.com/docs/pgd/latest/terminology/)
+- [Overview](https://www.enterprisedb.com/docs/pgd/latest/overview/)
+- [Architectures](https://www.enterprisedb.com/docs/pgd/latest/architectures/)
+- [Choosing a Postgres distribution](https://www.enterprisedb.com/docs/pgd/latest/choosing_server/)
-For advanced usage and maximum customization, it's also important to be familiar with the
-[EDB Postgres for Kubernetes documentation](/postgres_for_kubernetes/latest/),
-as described in [Architecture](architecture.md#relationship-with-edb-postgres-for-kubernetes).
+For advanced usage and maximum customization, it is also important to familiarize with
+["EDB Postgres for Kubernetes" (PG4K) documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/),
+as described in the ["Architecture" section](architecture.md#relationship-with-edb-postgres-for-kubernetes).
## Supported Kubernetes distributions
EDB Postgres Distributed for Kubernetes is available for:
-- Kubernetes version 1.23 or later through a Helm chart
-- Red Hat OpenShift version 4.10 or later only through the Red Hat OpenShift
- certified operator
+- Kubernetes version 1.23 or higher through a Helm Chart
+- Red Hat OpenShift version 4.10 or higher through the Red Hat OpenShift
+ Certified Operator only
## Requirements
EDB Postgres Distributed for Kubernetes requires that the Kubernetes/OpenShift
-clusters hosting the distributed PGD cluster were prepared by you to cater for:
+clusters hosting the distributed PGD cluster have been prepared by you to cater for:
-- The public key infrastructure (PKI) encompassing all the Kubernetes clusters
- the PGD global group is spread across. mTLS is required to authenticate
- and authorize all nodes in the mesh topology and guarantee encrypted communication.
+- the Public Key Infrastructure (PKI) encompassing all the Kubernetes clusters
+ the PGD Global Group is spread across, as mTLS is required to authenticate
+ and authorize all nodes in the mesh topology and guarantee encrypted communication
- Networking infrastructure across all Kubernetes clusters involved in the
- PGD global group to ensure that each node can communicate with each other
+ PGD Global Group to ensure that each node can communicate with each other
-EDB Postgres Distributed for Kubernetes also requires Cert Manager 1.10 or later.
+EDB Postgres Distributed for Kubernetes also requires Cert Manager 1.10 or higher.
!!! Seealso "About connectivity"
- See [Connectivity](connectivity.md) for more information.
-
-
+ Please refer to the ["Connectivity" section](connectivity.md) for more information.
## API reference
For a list of resources provided by EDB Postgres Distributed for Kubernetes,
-see the [API reference](api_reference.md).
+please refer to the [API reference](pg4k-pgd.v1beta1.md).
## Trademarks
-[Postgres, PostgreSQL, and the Slonik logo](https://www.postgresql.org/about/policies/trademarks/)
+*[Postgres, PostgreSQL and the Slonik Logo](https://www.postgresql.org/about/policies/trademarks/)
are trademarks or registered trademarks of the PostgreSQL Community Association
-of Canada, and used with their permission.
+of Canada, and used with their permission.*
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
index 9988697bd45..d50d4b7993b 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
@@ -8,11 +8,11 @@ originalFilePath: 'src/installation_upgrade.md'
EDB Postgres Distributed for Kubernetes can be installed using the provided
[Helm chart](https://github.com/EnterpriseDB/edb-postgres-for-kubernetes-charts).
-If you don't have [Helm](https://helm.sh) installed yet, follow these
-[instructions](https://helm.sh/docs/intro/quickstart/) to install it
+If you don't have [Helm](https://helm.sh) installed yet, please follow the
+[official instructions](https://helm.sh/docs/intro/quickstart/) to install it
in your system.
-After Helm is installed, add the repository:
+Assuming you have Helm installed, the first step is to add the repository:
```console
helm repo add edb \
@@ -20,16 +20,16 @@ helm repo add edb \
```
!!! Important
- You need access to the private EDB repository where both the operator
+ You'll need access to the private EDB repository where both the operator
and operand images are stored. Access requires a valid
[EDB subscription plan](https://www.enterprisedb.com/products/plans-comparison).
- See [Accessing EDB private image registries](private_registries.md) for details.
+ Please refer to ["Accessing EDB private image registries"](private_registries.md) for further details.
Given that the container images for both the operator and the selected operand
are in EDB's private registry, you need your credentials to enable `helm` to
retrieve them.
-Make sure to replace your repo and token in the following command:
+Make sure to replace your repo and token in the command below:
```console
helm upgrade --dependency-update \
@@ -43,14 +43,14 @@ helm upgrade --dependency-update \
In particular:
-- Set `@@REPOSITORY@@` to the name of the repository, as explained in [Which repository to
- choose?](private_registries.md#which-repository-to-choose).
-- Set `@@TOKEN@@` to the repository token for your EDB account, as explained in
- [How to retrieve the token](private_registries.md#how-to-retrieve-the-token).
+- set `@@REPOSITORY@@` to the name of the repository, as explained in ["Which repository to
+ choose?"](private_registries.md#which-repository-to-choose)
+- set `@@TOKEN@@` to the repository token for your EDB account, as explained in
+ ["How to retrieve the token"](private_registries.md#how-to-retrieve-the-token)
-Be sure to create a cert issuer before you start deploying PGD clusters.
-The Helm chart prompts you to do this, but in case you miss it,
-you can, for example, run:
+Please remember to create a cert issuer before you start deploying PGD clusters.
+The helm chart will already suggest that you do this, but in case you miss it,
+you may run, for example:
```sh
kubectl apply -f \
@@ -58,26 +58,17 @@ kubectl apply -f \
```
!!! Info
- For more details on the Helm chart, see the
+ For further details on the Helm chart, please refer to the
[Helm chart repo documentation](https://github.com/EnterpriseDB/edb-postgres-for-kubernetes-charts).
With the operators and a self-signed cert issuer deployed, you can start
-creating PGD clusters. See the
-[Quick start](quickstart.md#part-3-deploy-a-pgd-cluster) for an example.
+creating PGD clusters. Please refer to the
+["Quickstart"](quickstart.md#part-3-deploy-a-pgd-cluster) for an example.
-
## Red Hat OpenShift
-If you're trying to install EDB Postgres Distributed for Kubernetes on Red Hat OpenShift,
-see [Red Hat OpenShift](openshift.md), which contains
-information on the certified operator maintained by EDB.
\ No newline at end of file
+If you are trying to install EDB Postgres Distributed for Kubernetes on Red Hat OpenShift,
+please refer to the ["Red Hat OpenShift section"](openshift.md) which contains
+information on the certified operator maintained by EDB.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx
new file mode 100644
index 00000000000..cbe565eac1b
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx
@@ -0,0 +1,70 @@
+---
+title: 'Predefined labels'
+originalFilePath: 'src/labels_annotations.md'
+---
+
+These predefined labels are managed by the EDB Postgres Distributed for
+Kubernetes operator.
+
+`k8s.pgd.enterprisedb.io/certificateType`
+: Indicates the type of the certificates. `replication` indicates a certificate
+to be used to authenticate the replication client, `server` indicates a
+certificate to be used for server authentication.
+
+`k8s.pgd.enterprisedb.io/group`
+: Name of the pgdgroup that the resource belongs to. Added to cluster or
+instance resources
+
+`k8s.pgd.enterprisedb.io/isWitnessService`
+: Indicates a service is for a witness node
+
+`k8s.pgd.enterprisedb.io/type`
+: Type of the resource, added to cluster or instance resources, usually `node`
+
+`k8s.pgd.enterprisedb.io/workloadType`
+: Indicates the workload type of the resource, added to cluster or instance
+resources. `pgd-node-data` indicates data node; `pgd-node-witness` a witness
+node; `pgd-proxy` for pgd proxy node;
+`proxy-svc` for pgd proxy service; `group-svc` for pgd group service to
+communicate with any node in the PGDGroup;
+`node-svc` is a service created from the cnp service template;
+`scheduled-backup` is added to scheduledBackup
+resources; `bootstrap-cross-location-pgd-group` is added to the pod that
+creates a cross-location PGD group;
+`pgd-node-restore` is added to the pod that starts the node restore process.
+
+## Predefined annotations
+
+`k8s.pgd.enterprisedb.io/dirtyMetadata`
+: Set in CNP cluster that have been generated from a backup and need to have
+their metadata cleaned up
+before creating the PGD node. This is written by the restore job.
+
+`k8s.pgd.enterprisedb.io/hash`
+: Contains the hash of the used PGDGroup spec
+
+`k8s.pgd.enterprisedb.io/latestCleanupExecuted`
+: Set in the PGDGroup to indicate that the cleanup has been executed.
+
+`k8s.pgd.enterprisedb.io/node`
+: Contains the name of the node for which a certain certificate has been
+generated. Added to the certificate resources.
+
+`k8s.pgd.enterprisedb.io/noFinalizers`
+: Set in the PGDGroup with value `true` to skip the finalizer execution. This
+is for internal use only.
+
+`k8s.pgd.enterprisedb.io/pause`
+: Set in the PGDGroup to pause a PGDGroup.
+
+`k8s.pgd.enterprisedb.io/recoverabilityPointsByMethod`
+: Set in the PGDGroup to store the CNP clusters' First Recoverability points by
+method in a tamper-proof place.
+
+`k8s.pgd.enterprisedb.io/seedingServer`
+: Set in the PGDGroup to indicate to the operator which is the server to be
+restored. This is written by the restore job.
+
+`k8s.pgd.enterprisedb.io/seedingSnapshots`
+: Set in the PGDGroup to indicate to the operator which are the snapshots to be
+restored. This is written by the restore job.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx
index 4f1885c3d4a..25abcb596f9 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx
@@ -4,26 +4,26 @@ originalFilePath: 'src/openshift.md'
---
EDB Postgres Distributed for Kubernetes is a certified operator that can be
-installed on OpenShift using a web interface.
+installed on OpenShift via the web interface.
## Ensuring access to EDB private registry
!!! Important
- You need access to the private EDB repository where both the operator
+ You'll need access to the private EDB repository where both the operator
and operand images are stored. Access requires a valid
[EDB subscription plan](https://www.enterprisedb.com/products/plans-comparison).
- See [Accessing EDB private image registries](private_registries.md) for details.
+ Please refer to ["Accessing EDB private image registries"](private_registries.md) for further details.
-The OpenShift install uses pull secrets to access the
+The OpenShift install will use pull secrets in order to access the
operand and operator images, which are held in a private repository.
-Once you have credentials to the private repo, you need to create
-two pull secrets in the `openshift-operators` namespace:
+Once you have credentials to the private repo, you will need to create
+two pull secrets in the `openshift-operators` namespace, named:
-- `pgd-operator-pull-secret` for the EDB Postgres Distributed for Kubernetes operator images
-- `postgresql-operator-pull-secret` for the EDB Postgres for Kubernetes operator images
+- `pgd-operator-pull-secret`, for the EDB Postgres Distributed for Kubernetes operator images
+- `postgresql-operator-pull-secret`, for the EDB Postgres for Kubernetes operator images
-You can create each secret using the `oc create` command:
+You can create each secret via the `oc create` command, as follows:
```sh
oc create secret docker-registry pgd-operator-pull-secret \
@@ -37,62 +37,68 @@ oc create secret docker-registry postgresql-operator-pull-secret \
--docker-password="@@TOKEN@@"
```
-Where:
+where:
-- `@@REPOSITORY@@` is the name of the repository, as explained in [Which repository to
- choose?](private_registries.md#which-repository-to-choose).
+- `@@REPOSITORY@@` is the name of the repository, as explained in ["Which repository to
+ choose?"](private_registries.md#which-repository-to-choose)
- `@@TOKEN@@` is the repository token for your EDB account, as explained in
- [How to retrieve the token](private_registries.md#how-to-retrieve-the-token).
+ ["How to retrieve the token"](private_registries.md#how-to-retrieve-the-token)
## Installing the operator
The EDB Postgres Distributed for Kubernetes operator can be found in the Red
Hat OperatorHub directly from your OpenShift dashboard.
-1. From the hamburger menu, select **Operators > OperatorHub**.
+1. Navigate in the web console to the `Operators -> OperatorHub` page:
-2. In the web console, use the search box to filter the listing. For example, enter `EDB` or `pgd`:
+ ![Menu OperatorHub](./images/openshift/operatorhub_1.png)
+
+2. Use the search box to restrict the listing, e.g. using `EDB` or `pgd`:
![Install OperatorHub](./images/openshift/find-pgd-openshift.png)
-3. Read the information about the operator and select **Install**.
+3. Read the information about the Operator and select `Install`
-4. In the Operator Installation page, select:
+4. The following `Operator installation` page expects you to choose:
- - The installation mode. [Cluster-wide](#cluster-wide-installation) is currently the
- only mode.
- - The update channel (currently **preview**).
- - The approval strategy, following the availability on the marketplace of
+ - the installation mode ([cluster-wide](#cluster-wide-installation) is the
+ only mode at the moment)
+ - the update channel (at the moment `preview`)
+ - the approval strategy, following the availability on the market place of
a new release of the operator, certified by Red Hat:
- - **Automatic**: OLM upgrades the running operator with the
- new version.
- - **Manual**: OpenShift waits for human intervention by requiring an
- approval in the **Installed Operators** section.
+ - `Automatic`: OLM automatically upgrades the running operator with the
+ new version
+ - `Manual`: OpenShift waits for human intervention, by requiring an
+ approval in the `Installed Operators` section
### Cluster-wide installation
-With cluster-wide installation, you're asking OpenShift to install the
-operator in the default `openshift-operators` namespace and to make it
+With cluster-wide installation, you are asking OpenShift to install the
+Operator in the default `openshift-operators` namespace and to make it
available to all the projects in the cluster.
+
This is the default and normally recommended approach to install EDB Postgres
Distributed for Kubernetes.
-From the web console, for **Installation mode**, select **All namespaces on the cluster (default)**.
+From the web console, select `All namespaces on the cluster (default)` as
+`Installation mode`:
+
+![Install all namespaces](./images/openshift/all-namespaces.png)
-On installation, the operator is visible in all namespaces. In case there
+On installation, the operator will be visible in all namespaces. In case there
were problems during installation, check the logs in any pods in the
-`openshift-operators` project on the **Workloads > Pods** page
+`openshift-operators` project on the `Workloads → Pods` page,
as you would with any other OpenShift operator.
!!! Important "Beware"
- By choosing the cluster-wide installation you, can't easily move to a
- single-project installation later.
+ By choosing the cluster-wide installation you cannot easily move to a
+ single project installation at a later time.
## Creating a PGD cluster
-After the installation by OpenShift, the operator deployment
-is in the `openshift-operators` namespace. Notice the cert-manager operator was
-also installed, as was the EDB Postgres for Kubernetes operator
+After the installation from OpenShift, you should find the operator deployment
+in the `openshift-operators` namespace. Notice the cert-manager operator will
+also get installed, as will the EDB Postgres for Kubernetes operator
(`postgresql-operator-controller-manager`).
```sh
@@ -104,14 +110,14 @@ postgresql-operator-controller-manager-1-20-0 1/1 1 1
…
```
-After checking that the `pgd-operator-controller-manager` deployment is READY, you can
+Checking that the `pgd-operator-controller-manager` deployment is READY, we can
start creating PGD clusters. The EDB Postgres Distributed for Kubernetes
repository contains some useful sample files.
-You must deploy your PGD clusters on a dedicated namespace/project. The
+Remember to deploy your PGD clusters on a dedicated namespace/project. The
default namespace is reserved.
-First, then, create a new namespace, and deploy a
+First then, you should create a new namespace, and deploy a
[self-signed certificate `Issuer`](https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml)
in it:
@@ -121,7 +127,9 @@ oc apply -n my-namespace -f \
https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
```
-### Using PGD in a single OpenShift cluster in a single region
+### Using PGD in a single Openshift Cluster in a single region
+
+Please see the following section for [multi-cluster and multi-region](#using-pgd-in-multiple-openshift-clusters-in-multiple-regions) deployments.
Now you can deploy a PGD cluster, for example a flexible 3-region, which
contains two data groups and a witness group. You can find the YAML manifest
@@ -131,7 +139,7 @@ in the file [`flexible_3regions.yaml`](../samples/flexible_3regions.yaml).
oc apply -f flexible_3regions.yaml -n my-namespace
```
-Your PGD groups start to come up:
+You should start seeing your PGD groups come up:
```sh
$ oc get pgdgroups -n my-namespace
@@ -141,40 +149,40 @@ region-b 2 1 PGDGroup - Healthy
region-c 0 1 PGDGroup - Healthy 23m
```
-### Using PGD in multiple OpenShift clusters in multiple regions
+### Using PGD in multiple Openshift Clusters in multiple regions
-To deploy PGD in multiple OpenShift clusters in multiple regions, you must first establish a way for the
-PGD groups to communicate with each other. The recommended way of achieving this with multiple OpenShift clusters is to use
+In order to deploy PGD in multiple Openshift Clusters in multiple regions you must first establish a way for the
+PGD Groups to communicate with each other. The recommended way of achieving this with multiple Openshift clusters is to use
[Submariner](https://submariner.io/getting-started/quickstart/openshift/). Configuring the connectivity is outside the
-scope of this documentation. However, once you've established connectivity between the OpenShift clusters, you can deploy
-PGD groups synced with one another.
+scope of this document, but once you have established connectivity between the Openshift Clusters you can deploy
+PGD Groups synced with one another.
!!! Warning
- This example assumes you're deploying three PGD groups, one in each OpenShift
- cluster, and that you established connectivity between the OpenShift clusters using Submariner.
+ This example assumes you are deploying three PGD Groups, one in each Openshift
+ Cluster, and that you have established connectivity between the Openshift Clusters using Submariner.
-Similar to the [single-cluster example](#using-pgd-in-a-single-openshift-cluster-in-a-single-region), this example creates
+Similar to the [single cluster example](#using-pgd-in-a-single-openshift-cluster-in-a-single-region), we will create
two data PGD groups and one witness group. In contrast to that example,
-each group lives in a different OpenShift cluster.
+each group will live in a different Openshift Cluster.
-In addition to basic connectivity between the OpenShift clusters, you need to ensure that each OpenShift cluster
-contains a certificate authority that's trusted by the other OpenShift clusters. This condition is required for the PGD groups
+In addition to basic connectivity between the Openshift Clusters, you will need to ensure that each Openshift Cluster
+contains a certificate authority that is trusted by the other Openshift Clusters. This is required for the PGD Groups
to communicate with each other.
-The OpenShift clusters can all use
+The Openshift clusters can all use
the same certificate authority, or each cluster can have its own certificate
-authority. Either way, you need to ensure that each OpenShift cluster's
-certificates trust the other OpenShift clusters' certificate authorities.
-
-This example uses a self-signed certificate
-that has a single certificate authority used for all certificates on all the OpenShift clusters.
-
-The example refers to the OpenShift clusters as `OpenShift Cluster A`, `OpenShift Cluster B`, and
-`OpenShift Cluster C`. In OpenShift, an installation of the EDB Postgres Distributed for Kubernetes operator from OperatorHub includes an
-installation of the cert-manager operator. We recommend creating and managing certificates with cert-manager.
-
-1. Create a namespace to hold `OpenShift Cluster A`, and in it also create the needed objects for a self-signed certificate. Assuming
-that the PGD operator and the cert-manager are installed, you create a [self-signed certificate `Issuer`](https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml)
+authority. Either way, it needs to be ensured that each Openshift cluster's
+certificates trust the other Openshift clusters' certificate authorities.
+
+For illustration, we are going to demo using a self-signed certificate
+that has a single certificate authority used for all certificates on all our Openshift clusters.
+
+In this demo we will refer to the Openshift clusters as `Openshift Cluster A`, `Openshift Cluster B`, and
+`Openshift Cluster C` . In Openshift, an installation of the PG4K-PGD-Operator from OperatorHub will include an
+installation of the *cert-manager* operator; creating and managing certificates with cert-manager is
+recommended. We create a namespace to hold `Openshift Cluster A`, and in it
+we will also create the needed objects for a self-signed certificate. Assuming
+that the PGD operator and the cert-manager are installed, we create a [self-signed certificate `Issuer`](https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml)
in that namespace.
```sh
@@ -183,21 +191,21 @@ oc apply -n pgd-group -f \
https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
```
-1. After a few moments, cert-manager creates the issuers and certificates. There are also now
+After a few moments, cert-manager should have created the Issuers and Certificates. Additionally, there should now be
two secrets in the `pgd-group` namespace: `server-ca-key-pair` and `client-ca-key-pair`. These secrets contain
-the certificates and private keys for the server and client certificate authorities. You need to copy these secrets
-to the other OpenShift clusters before applying the `issuer-selfsigned.yaml` manifest. You can use the
-`oc get secret` command to get the contents of the secrets:
+the certificates and private keys for the server and client certificate authorities. We will need to copy these secrets
+to the other Openshift Clusters **before applying** the `issuer-selfsigned.yaml` manifest. We can use the
+`oc get secret` command to get the contents of the secrets.
```sh
oc get secret server-ca-key-pair -n pgd-group -o yaml > server-ca-key-pair.yaml
oc get secret client-ca-key-pair -n pgd-group -o yaml > client-ca-key-pair.yaml
```
-1. After removing the content specific to `OpenShift Cluster A`
-from these secrets (such as uid, resourceVersion, and timestamp), you can switch
-context to `OpenShift Cluster B`. Then create the namespace, create the
-secrets in it, and only then apply the `issuer-selfsigned.yaml` file:
+After removing the content specific to `Openshift Cluster A`
+from the above secrets (such as uid, resourceVersion and timestamp,) we can switch our
+context to `Openshift Cluster B`; we create the namespace, create our
+secrets in it, and only then apply the `issuer-selfsigned.yaml` file.
```sh
oc create ns pgd-group
@@ -207,8 +215,8 @@ oc apply -n pgd-group -f \
https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
```
-1. You can switch context to `OpenShift Cluster C` and repeat
-the same process followed for Cluster B:
+Finally, we can switch our context to `Openshift Cluster C`, and repeat
+the same process we followed for Cluster B.
```sh
oc create ns pgd-group
@@ -218,7 +226,8 @@ oc apply -n pgd-group -f \
https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
```
-1. On `OpenShift Cluster A`, you can create your first PGD group, called `region-a`. The YAML manifest for the PGD group is:
+Now, back on `Openshift Cluster A`, we can create our first PGD Group, called `region-a`. The YAML manifest for the PGD Group is as
+follows:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -267,20 +276,20 @@ spec:
group: cert-manager.io
```
- !!! Important
- The format of the hostnames in the `discovery` section differs from the single-cluster
- example. That's because Submariner is being used to connect the OpenShift clusters, and Submariner uses the
- `..svc.clusterset.local` domain to route traffic between the OpenShift clusters. `region-a-group` is the
- name of the service to be created for the PGD group named `region-a`.
+!!! Important
+ Please note that the format of the hostnames in the `discovery` section differs from the single cluster
+ example. This is because we are using Submariner to connect the Openshift Clusters, and Submariner uses the
+ `..svc.clusterset.local` domain to route traffic between the Openshift Clusters. `region-a-group` is the
+ name of the service that will be created for the PGD Group named `region-a`.
-1. Apply the `region-a` PGD group YAML:
+Let's apply the `region-a` PGD Group YAML:
```sh
oc apply -f region-a.yaml -n pgd-group
```
-1. You can now switch context to `OpenShift Cluster B` and create the second PGD group. The YAML for the PGD group in Cluster B
-is as follows. The only difference is the `metadata.name`.
+We can now switch our context to `Openshift Cluster B` and create our second PGD Group. The YAML for the PGD Group in Cluster B
+is as follows, the only difference is the `metadata.name`:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -324,14 +333,14 @@ spec:
group: cert-manager.io
```
-1. Apply the `region-b` PGD group YAML:
+Apply the `region-b` PGD Group YAML:
```sh
oc apply -f region-b.yaml -n pgd-group
```
-1. You can switch context to `OpenShift Cluster C` and create the third PGD group. The YAML for the PGD
-group is:
+And finally, we can switch our context to `Openshift Cluster C` and create our third PGD Group. The YAML for the PGD
+Group is as follows:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -375,30 +384,29 @@ spec:
group: cert-manager.io
```
-1. Apply the `region-c` PGD group YAML:
+Apply the `region-c` PGD Group YAML:
```sh
oc apply -f region-c.yaml -n pgd-group
```
-Now you can switch context back to `OpenShift Cluster A` and check the status of the PGD group there:
+Now we can switch our context back to `Openshift Cluster A` and check the status of our PGD Group there.
```sh
oc get pgdgroup region-a -n pgd-group
```
-The PGD group is in the phase
+We should expect to find the PGD group in phase
`PGD - Waiting for node discovery`.
-After creating the PGD groups in each OpenShift cluster, which in turn creates the services for each node, you
-need to expose the services to the other OpenShift clusters. You can do this in various ways.
-
-If you're using
-Submariner, you can do it using the
+After creating the PGD Groups in each Openshift Cluster, which will in turn create the services for each node, you will
+need to expose the services to the other Openshift Clusters. This can be done in various ways.
+Since we are using
+Submariner, we will do it using the
[`subctl`](https://submariner.io/operations/deployment/subctl/)
-command. Run the `subctl export service` command
-for each service in the
-`pgd-group` namespace that has a `-group` or `-node` suffix. You can do this by running the following bash
+command. We need to run the `subctl export service` command
+for each service in our
+`pgd-group` namespace that has a `-group` or `-node` suffix. We can accomplish this by running the following bash
`for` loop on each cluster:
```sh
@@ -407,12 +415,7 @@ for service in $(oc get svc -n pgd-group --no-headers -o custom-columns="NAME:.m
done
```
-After a few minutes, the status shows that the PGD group is healthy. Once each PGD group is healthy, you can write
-to the `app` database in either of the two data nodes: `region-a` or `region-b`. The data is replicated to the
+After a few minutes the status should show that the PGD Group is healthy. Once each PGD Group is healthy, you can write
+to the `app` database in either of the two data nodes, `region-a` or `region-b`, and the data will be replicated to the
other data node.
-
\ No newline at end of file
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
new file mode 100644
index 00000000000..f4f6c92653a
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
@@ -0,0 +1,57 @@
+---
+title: 'Declarative Pausing and Resuming'
+originalFilePath: 'src/pause_resume.md'
+---
+
+The declarative Pausing and Resuming feature enables saving CPU power by removing the
+database Pods, while keeping the database PVCs.
+
+Declarative Pausing and Resuming leverages the hibernation functionality available for
+EDB Postgres for Kubernetes. For additional depth, and explanation of how
+hibernation works, we refer you to the
+[PG4K documentation on declarative hibernation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/declarative_hibernation/).
+
+Pause is requested by adding the `k8s.pgd.enterprisedb.io/pause`
+annotation in the desired PGD Group.
+
+For example,
+
+```sh
+kubectl annotate pgdgroup region-a k8s.pgd.enterprisedb.io/pause=on
+```
+
+After a few seconds, the requested PGD Group will be in paused state, with
+all the database pods removed.
+
+```sh
+kubectl get pgdgroups
+
+NAME DATA INSTANCES WITNESS INSTANCES PHASE AGE
+region-a 2 1 PGDGroup - Paused 25m
+region-b 2 1 PGDGroup - Healthy 25m
+region-c 0 1 PGDGroup - Healthy 25m
+```
+
+To resume a paused PGD Group, you can simply set the annotation to "off".
+Remember to add the `--overwrite` flag.
+
+```sh
+kubectl annotate pgdgroup region-a k8s.pgd.enterprisedb.io/pause=off --overwrite
+```
+
+In a few seconds, you should see the nodes start resuming, and the pods to
+be re-created.
+
+```sh
+kubectl get pgdgroups
+
+NAME DATA INSTANCES WITNESS INSTANCES PHASE AGE
+region-a 2 1 Pause - resume nodes 1m
+region-b 2 1 PGDGroup - Healthy 25m
+region-c 0 1 PGDGroup - Healthy 25m
+```
+
+There are some requirements before the pause annotation can put the PGD group
+on Pause. Ideally, the PGD Group should be in Healthy state. Alternativelly, if
+all the data nodes in the PGD Group are Healthy at the individual level, Pause
+can also be initiated.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx
new file mode 100644
index 00000000000..7964a54772e
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx
@@ -0,0 +1,2816 @@
+---
+title: 'API Reference'
+originalFilePath: 'src/pg4k-pgd.v1beta1.md'
+---
+
+Package v1beta1 contains API Schema definitions for the pgd v1beta1 API group
+
+## Resource Types
+
+- [PGDGroup](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroup)
+- [PGDGroupCleanup](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupCleanup)
+
+CertificateKeystores
+
+**Appears in:**
+
+- [CertificateSpec](#cert-manager-io-v1-CertificateSpec)
+
+CertificateKeystores configures additional keystore output formats to be
+created in the Certificate's output Secret.
+
+
+Field | Description |
+
+jks
+JKSKeystore
+ |
+
+ JKS configures options for storing a JKS keystore in the
+spec.secretName Secret resource.
+ |
+
+pkcs12
+PKCS12Keystore
+ |
+
+ PKCS12 configures options for storing a PKCS12 keystore in the
+spec.secretName Secret resource.
+ |
+
+
+
+
+CertificatePrivateKey
+
+**Appears in:**
+
+- [CertificateSpec](#cert-manager-io-v1-CertificateSpec)
+
+CertificatePrivateKey contains configuration options for private keys
+used by the Certificate controller.
+This allows control of how private keys are rotated.
+
+
+Field | Description |
+
+rotationPolicy
+PrivateKeyRotationPolicy
+ |
+
+ RotationPolicy controls how private keys should be regenerated when a
+re-issuance is being processed.
+If set to Never, a private key will only be generated if one does not
+already exist in the target spec.secretName . If one does exists but it
+does not have the correct algorithm or size, a warning will be raised
+to await user intervention.
+If set to Always, a private key matching the specified requirements
+will be generated whenever a re-issuance occurs.
+Default is 'Never' for backward compatibility.
+ |
+
+encoding
+PrivateKeyEncoding
+ |
+
+ The private key cryptography standards (PKCS) encoding for this
+certificate's private key to be encoded in.
+If provided, allowed values are PKCS1 and PKCS8 standing for PKCS#1
+and PKCS#8, respectively.
+Defaults to PKCS1 if not specified.
+ |
+
+algorithm
+PrivateKeyAlgorithm
+ |
+
+ Algorithm is the private key algorithm of the corresponding private key
+for this certificate. If provided, allowed values are either RSA ,Ed25519 or ECDSA
+If algorithm is specified and size is not provided,
+key size of 256 will be used for ECDSA key algorithm and
+key size of 2048 will be used for RSA key algorithm.
+key size is ignored when using the Ed25519 key algorithm.
+ |
+
+size
+int
+ |
+
+ Size is the key bit size of the corresponding private key for this certificate.
+If algorithm is set to RSA , valid values are 2048 , 4096 or 8192 ,
+and will default to 2048 if not specified.
+If algorithm is set to ECDSA , valid values are 256 , 384 or 521 ,
+and will default to 256 if not specified.
+If algorithm is set to Ed25519 , Size is ignored.
+No other values are allowed.
+ |
+
+
+
+
+CertificateSpec
+
+**Appears in:**
+
+- [CertManagerTemplate](#pgd-k8s-enterprisedb-io-v1beta1-CertManagerTemplate)
+
+CertificateSpec defines the desired state of Certificate.
+A valid Certificate requires at least one of a CommonName, DNSName, or
+URISAN to be valid.
+
+
+Field | Description |
+
+subject
+X509Subject
+ |
+
+ Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name).
+ |
+
+commonName
+string
+ |
+
+ CommonName is a common name to be used on the Certificate.
+The CommonName should have a length of 64 characters or fewer to avoid
+generating invalid CSRs.
+This value is ignored by TLS clients when any subject alt name is set.
+This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4
+ |
+
+duration
+Duration
+ |
+
+ The requested 'duration' (i.e. lifetime) of the Certificate. This option
+may be ignored/overridden by some issuer types. If unset this defaults to
+90 days. Certificate will be renewed either 2/3 through its duration or
+renewBefore period before its expiry, whichever is later. Minimum
+accepted duration is 1 hour. Value must be in units accepted by Go
+time.ParseDuration https://golang.org/pkg/time/#ParseDuration
+ |
+
+renewBefore
+Duration
+ |
+
+ How long before the currently issued certificate's expiry
+cert-manager should renew the certificate. The default is 2/3 of the
+issued certificate's duration. Minimum accepted value is 5 minutes.
+Value must be in units accepted by Go time.ParseDuration
+https://golang.org/pkg/time/#ParseDuration
+ |
+
+dnsNames
+[]string
+ |
+
+ DNSNames is a list of DNS subjectAltNames to be set on the Certificate.
+ |
+
+ipAddresses
+[]string
+ |
+
+ IPAddresses is a list of IP address subjectAltNames to be set on the Certificate.
+ |
+
+uris
+[]string
+ |
+
+ URIs is a list of URI subjectAltNames to be set on the Certificate.
+ |
+
+emailAddresses
+[]string
+ |
+
+ EmailAddresses is a list of email subjectAltNames to be set on the Certificate.
+ |
+
+secretName [Required]
+string
+ |
+
+ SecretName is the name of the secret resource that will be automatically
+created and managed by this Certificate resource.
+It will be populated with a private key and certificate, signed by the
+denoted issuer.
+IMPORTANT: this field was required in the original cert-manager API declaration
+ |
+
+keystores
+CertificateKeystores
+ |
+
+ Keystores configures additional keystore output formats stored in the
+secretName Secret resource.
+ |
+
+issuerRef [Required]
+ObjectReference
+ |
+
+ IssuerRef is a reference to the issuer for this certificate.
+If the kind field is not set, or set to Issuer , an Issuer resource
+with the given name in the same namespace as the Certificate will be used.
+If the kind field is set to ClusterIssuer , a ClusterIssuer with the
+provided name will be used.
+The name field in this stanza is required at all times.
+ |
+
+isCA
+bool
+ |
+
+ IsCA will mark this Certificate as valid for certificate signing.
+This will automatically add the cert sign usage to the list of usages .
+ |
+
+usages
+[]KeyUsage
+ |
+
+ Usages is the set of x509 usages that are requested for the certificate.
+Defaults to digital signature and key encipherment if not specified.
+ |
+
+privateKey
+CertificatePrivateKey
+ |
+
+ Options to control private keys used for the Certificate.
+ |
+
+encodeUsagesInRequest
+bool
+ |
+
+ EncodeUsagesInRequest controls whether key usages should be present
+in the CertificateRequest
+ |
+
+revisionHistoryLimit
+int32
+ |
+
+ revisionHistoryLimit is the maximum number of CertificateRequest revisions
+that are maintained in the Certificate's history. Each revision represents
+a single CertificateRequest created by this Certificate, either when it
+was created, renewed, or Spec was changed. Revisions will be removed by
+oldest first if the number of revisions exceeds this number. If set,
+revisionHistoryLimit must be a value of 1 or greater. If unset (nil ),
+revisions will not be garbage collected. Default value is nil .
+ |
+
+
+
+
+ConditionStatus
+
+(Alias of `string`)
+
+ConditionStatus represents a condition's status.
+
+JKSKeystore
+
+**Appears in:**
+
+- [CertificateKeystores](#cert-manager-io-v1-CertificateKeystores)
+
+JKSKeystore configures options for storing a JKS keystore in the spec.secretName
+Secret resource.
+
+
+Field | Description |
+
+create [Required]
+bool
+ |
+
+ Create enables JKS keystore creation for the Certificate.
+If true, a file named keystore.jks will be created in the target
+Secret resource, encrypted using the password stored in
+passwordSecretRef .
+The keystore file will only be updated upon re-issuance.
+A file named truststore.jks will also be created in the target
+Secret resource, encrypted using the password stored in
+passwordSecretRef containing the issuing Certificate Authority
+ |
+
+passwordSecretRef [Required]
+SecretKeySelector
+ |
+
+ PasswordSecretRef is a reference to a key in a Secret resource
+containing the password used to encrypt the JKS keystore.
+ |
+
+
+
+
+KeyUsage
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [CertificateSpec](#cert-manager-io-v1-CertificateSpec)
+
+KeyUsage specifies valid usage contexts for keys.
+See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
+https://tools.ietf.org/html/rfc5280#section-4.2.1.12
+
+Valid KeyUsage values are as follows:
+"signing",
+"digital signature",
+"content commitment",
+"key encipherment",
+"key agreement",
+"data encipherment",
+"cert sign",
+"crl sign",
+"encipher only",
+"decipher only",
+"any",
+"server auth",
+"client auth",
+"code signing",
+"email protection",
+"s/mime",
+"ipsec end system",
+"ipsec tunnel",
+"ipsec user",
+"timestamping",
+"ocsp signing",
+"microsoft sgc",
+"netscape sgc"
+
+LocalObjectReference
+
+**Appears in:**
+
+- [SecretKeySelector](#cert-manager-io-v1-SecretKeySelector)
+
+LocalObjectReference is a reference to an object in the same namespace as the referent.
+If the referent is a cluster-scoped resource (e.g. a ClusterIssuer),
+the reference instead refers to the resource with the given name in the
+configured 'cluster resource namespace', which is set as a flag on the
+controller component (and defaults to the namespace that cert-manager
+runs in).
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name of the resource being referred to.
+More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ |
+
+
+
+
+ObjectReference
+
+**Appears in:**
+
+- [CertificateSpec](#cert-manager-io-v1-CertificateSpec)
+
+ObjectReference is a reference to an object with a given name, kind and group.
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name of the resource being referred to.
+ |
+
+group
+string
+ |
+
+ Group of the resource being referred to.
+ |
+
+
+
+
+PKCS12Keystore
+
+**Appears in:**
+
+- [CertificateKeystores](#cert-manager-io-v1-CertificateKeystores)
+
+PKCS12Keystore configures options for storing a PKCS12 keystore in the
+spec.secretName
Secret resource.
+
+
+Field | Description |
+
+create [Required]
+bool
+ |
+
+ Create enables PKCS12 keystore creation for the Certificate.
+If true, a file named keystore.p12 will be created in the target
+Secret resource, encrypted using the password stored in
+passwordSecretRef .
+The keystore file will only be updated upon re-issuance.
+A file named truststore.p12 will also be created in the target
+Secret resource, encrypted using the password stored in
+passwordSecretRef containing the issuing Certificate Authority
+ |
+
+passwordSecretRef [Required]
+SecretKeySelector
+ |
+
+ PasswordSecretRef is a reference to a key in a Secret resource
+containing the password used to encrypt the PKCS12 keystore.
+ |
+
+
+
+
+PrivateKeyAlgorithm
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [CertificatePrivateKey](#cert-manager-io-v1-CertificatePrivateKey)
+
+PrivateKeyAlgorithm represent a private key algorithm
+
+PrivateKeyEncoding
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [CertificatePrivateKey](#cert-manager-io-v1-CertificatePrivateKey)
+
+PrivateKeyEncoding represent a private key encoding
+
+PrivateKeyRotationPolicy
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [CertificatePrivateKey](#cert-manager-io-v1-CertificatePrivateKey)
+
+PrivateKeyRotationPolicy denotes how private keys should be generated or sourced when a Certificate
+is being issued.
+
+SecretKeySelector
+
+**Appears in:**
+
+- [JKSKeystore](#cert-manager-io-v1-JKSKeystore)
+
+- [PKCS12Keystore](#cert-manager-io-v1-PKCS12Keystore)
+
+SecretKeySelector is a reference to a specific 'key' within a Secret resource.
+In some instances, key
is a required field.
+
+
+Field | Description |
+
+LocalObjectReference
+LocalObjectReference
+ |
+(Members of LocalObjectReference are embedded into this type.)
+ The name of the Secret resource being referred to.
+ |
+
+key
+string
+ |
+
+ The key of the entry in the Secret resource's data field to be used.
+Some instances of this field may be defaulted, in others it may be
+required.
+ |
+
+
+
+
+X509Subject
+
+**Appears in:**
+
+- [CertificateSpec](#cert-manager-io-v1-CertificateSpec)
+
+X509Subject Full X509 name specification
+
+
+Field | Description |
+
+organizations
+[]string
+ |
+
+ Organizations to be used on the Certificate.
+ |
+
+countries
+[]string
+ |
+
+ Countries to be used on the Certificate.
+ |
+
+organizationalUnits
+[]string
+ |
+
+ Organizational Units to be used on the Certificate.
+ |
+
+localities
+[]string
+ |
+
+ Cities to be used on the Certificate.
+ |
+
+provinces
+[]string
+ |
+
+ State/Provinces to be used on the Certificate.
+ |
+
+streetAddresses
+[]string
+ |
+
+ Street addresses to be used on the Certificate.
+ |
+
+postalCodes
+[]string
+ |
+
+ Postal codes to be used on the Certificate.
+ |
+
+serialNumber
+string
+ |
+
+ Serial number to be used on the Certificate.
+ |
+
+
+
+
+PGDGroup
+
+PGDGroup is the Schema for the pgdgroups API
+
+
+Field | Description |
+
+apiVersion [Required] string | pgd.k8s.enterprisedb.io/v1beta1 |
+kind [Required] string | PGDGroup |
+spec [Required]
+PGDGroupSpec
+ |
+
+ No description provided. |
+
+status [Required]
+PGDGroupStatus
+ |
+
+ No description provided. |
+
+
+
+
+PGDGroupCleanup
+
+PGDGroupCleanup is the Schema for the pgdgroupcleanups API
+
+
+Field | Description |
+
+apiVersion [Required] string | pgd.k8s.enterprisedb.io/v1beta1 |
+kind [Required] string | PGDGroupCleanup |
+spec [Required]
+PGDGroupCleanupSpec
+ |
+
+ No description provided. |
+
+status [Required]
+PGDGroupCleanupStatus
+ |
+
+ No description provided. |
+
+
+
+
+Backup
+
+**Appears in:**
+
+- [PGDGroupSpec](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupSpec)
+
+Backup configures the backup of cnp-pgd nodes
+
+
+Field | Description |
+
+configuration [Required]
+BackupConfiguration
+ |
+
+ The CNP configuration to be used for backup. ServerName value is reserved by the operator.
+ |
+
+cron [Required]
+ScheduledBackupSpec
+ |
+
+ The scheduled backup for the data
+ |
+
+
+
+
+BackupStatus
+
+**Appears in:**
+
+- [PGDGroupStatus](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupStatus)
+
+BackupStatus contains the current status of the pgd backup
+
+
+Field | Description |
+
+clusterName [Required]
+string
+ |
+
+ No description provided. |
+
+scheduledBackupName [Required]
+string
+ |
+
+ No description provided. |
+
+scheduledBackupHash [Required]
+string
+ |
+
+ No description provided. |
+
+
+
+
+CNPStatus
+
+**Appears in:**
+
+- [PGDGroupStatus](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupStatus)
+
+CNPStatus contains any relevant status for the operator about CNP
+
+
+Field | Description |
+
+dataInstances [Required]
+int32
+ |
+
+ No description provided. |
+
+witnessInstances [Required]
+int32
+ |
+
+ No description provided. |
+
+firstRecoverabilityPointsByMethod [Required]
+map[string]RecoverabilityPointsByMethod
+ |
+
+ The recoverability points by method, keyed per CNP clusterName
+nolint: lll
+ |
+
+firstRecoverabilityPoints [Required]
+map[string]string
+ |
+
+ The recoverability points, keyed per CNP clusterName, as a date in RFC3339 format
+ |
+
+superUserSecretIsPresent [Required]
+bool
+ |
+
+ No description provided. |
+
+applicationUserSecretIsPresent [Required]
+bool
+ |
+
+ No description provided. |
+
+podDisruptionBudgetIsPresent [Required]
+bool
+ |
+
+ No description provided. |
+
+
+
+
+CertManagerTemplate
+
+**Appears in:**
+
+- [ClientCertConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-ClientCertConfiguration)
+
+- [ServerCertConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-ServerCertConfiguration)
+
+CertManagerTemplate contains the data to generate a certificate request
+
+
+Field | Description |
+
+spec [Required]
+CertificateSpec
+ |
+
+ The Certificate object specification
+ |
+
+metadata [Required]
+Metadata
+ |
+
+ The label and annotations metadata
+ |
+
+
+
+
+ClientCertConfiguration
+
+**Appears in:**
+
+- [TLSConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-TLSConfiguration)
+
+ClientCertConfiguration contains the information to generate the certificate for the streaming_replica user
+
+
+Field | Description |
+
+caCertSecret [Required]
+string
+ |
+
+ CACertSecret is the secret of the CA to be injected into the CloudNativePG
+configuration
+ |
+
+certManager [Required]
+CertManagerTemplate
+ |
+
+ The cert-manager template used to generate the certificates
+ |
+
+preProvisioned [Required]
+ClientPreProvisionedCertificates
+ |
+
+ PreProvisioned contains how to fetch the pre-generated client certificates
+ |
+
+
+
+
+ClientPreProvisionedCertificates
+
+**Appears in:**
+
+- [ClientCertConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-ClientCertConfiguration)
+
+ClientPreProvisionedCertificates instruct how to fetch the pre-generated client certificates
+
+
+Field | Description |
+
+streamingReplica [Required]
+PreProvisionedCertificate
+ |
+
+ StreamingReplica the pre-generated certificate for 'streaming_replica' user
+ |
+
+
+
+
+CnpBaseConfiguration
+
+**Appears in:**
+
+- [CnpConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-CnpConfiguration)
+
+- [PGDGroupSpec](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupSpec)
+
+CnpBaseConfiguration contains the configuration parameters that can be applied to both CNP Witness and Data nodes
+
+
+Field | Description |
+
+startDelay [Required]
+int32
+ |
+
+ The time in seconds that is allowed for a PostgreSQL instance to
+successfully start up (default 3600)
+ |
+
+stopDelay [Required]
+int32
+ |
+
+ The time in seconds that is allowed for a PostgreSQL instance node to
+gracefully shutdown (default 180)
+ |
+
+smartShutdownTimeout
+int32
+ |
+
+ The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete.
+Make sure you reserve enough time for the operator to request a fast shutdown of Postgres
+(that is: stopDelay - smartShutdownTimeout ).
+ |
+
+storage [Required]
+StorageConfiguration
+ |
+
+ Configuration of the storage of the instances
+ |
+
+walStorage [Required]
+StorageConfiguration
+ |
+
+ Configuration of the WAL storage for the instances
+ |
+
+clusterMaxStartDelay [Required]
+int32
+ |
+
+ The time in seconds that is allowed for a PostgreSQL instance to
+successfully start up (default 300)
+ |
+
+affinity
+AffinityConfiguration
+ |
+
+ Affinity/Anti-affinity rules for Pods
+ |
+
+resources
+ResourceRequirements
+ |
+
+ Resources requirements of every generated Pod. Please refer to
+https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+for more information.
+ |
+
+postgresql
+PostgresConfiguration
+ |
+
+ Configuration of the PostgreSQL server
+ |
+
+monitoring [Required]
+MonitoringConfiguration
+ |
+
+ The configuration of the monitoring infrastructure of this cluster
+ |
+
+logLevel [Required]
+string
+ |
+
+ The instances' log level, one of the following values: error, warning, info (default), debug, trace
+ |
+
+serviceAccountTemplate [Required]
+ServiceAccountTemplate
+ |
+
+ The service account template to be passed to CNP
+ |
+
+otel [Required]
+OTELConfiguration
+ |
+
+ OpenTelemetry Configuration
+ |
+
+postInitSQL [Required]
+[]string
+ |
+
+ List of SQL queries to be executed as a superuser immediately
+after a node has been created - to be used with extreme care
+(by default empty)
+ |
+
+postInitTemplateSQL [Required]
+[]string
+ |
+
+ List of SQL queries to be executed as a superuser in the template1
+after a node has been created - to be used with extreme care
+(by default empty)
+ |
+
+seccompProfile [Required]
+SeccompProfile
+ |
+
+ The SeccompProfile applied to every Pod and Container.
+Defaults to: RuntimeDefault
+ |
+
+metadata [Required]
+InheritedMetadata
+ |
+
+ Metadata applied exclusively to the generated Cluster resources. Useful for applying AppArmor profiles.
+ |
+
+managed [Required]
+ManagedConfiguration
+ |
+
+ The configuration that is used by the portions of PostgreSQL that are managed by the CNP instance manager
+ |
+
+
+
+
+CnpConfiguration
+
+**Appears in:**
+
+- [PGDGroupSpec](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupSpec)
+
+CnpConfiguration contains the configurations of the data nodes that will be injected
+into the resulting clusters composing the PGD group
+
+
+Field | Description |
+
+CnpBaseConfiguration
+CnpBaseConfiguration
+ |
+(Members of CnpBaseConfiguration are embedded into this type.)
+ No description provided. |
+
+enableSuperuserAccess
+bool
+ |
+
+ When this option is enabled, the CNP operator will create or use the secret defined
+in the SuperuserSecret to allow superuser (postgres) access to the database.
+When this option is disabled on a running Group, the operator will ignore the content
+of the secret and set the password of the postgres user to NULL .
+Enabled by default.
+ |
+
+superuserSecret
+LocalObjectReference
+ |
+
+ The secret containing the superuser password.
+A new secret will be created with a randomly generated password if not defined.
+This field is only allowed in the CNP Instances configuration.
+A Witness Node will always use the same SuperuserSecret as the other instances.
+ |
+
+
+
+
+ConnectionString
+
+(Alias of `map[string]string`)
+
+**Appears in:**
+
+- [PgdConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-PgdConfiguration)
+
+ConnectionString represent the parameters to connect to a
+PostgreSQL cluster
+
+ConnectivityConfiguration
+
+**Appears in:**
+
+- [PGDGroupSpec](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupSpec)
+
+ConnectivityConfiguration describes how to generate the services and certificates for the PGDGroup
+
+
+Field | Description |
+
+dns [Required]
+RootDNSConfiguration
+ |
+
+ Describes how the FQDN for the resources should be generated
+ |
+
+tls [Required]
+TLSConfiguration
+ |
+
+ The configuration of the TLS infrastructure
+ |
+
+nodeServiceTemplate [Required]
+ServiceTemplate
+ |
+
+ Instructs how to generate the service for each node
+ |
+
+groupServiceTemplate [Required]
+ServiceTemplate
+ |
+
+ Instructs how to generate the service for the PGDGroup
+ |
+
+proxyServiceTemplate [Required]
+ServiceTemplate
+ |
+
+ Instructs how to generate the service pointing to the PGD Proxy
+ |
+
+
+
+
+ConnectivityStatus
+
+**Appears in:**
+
+- [PGDGroupStatus](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupStatus)
+
+ConnectivityStatus contains any relevant status for the operator about Connectivity
+
+
+Field | Description |
+
+replicationTLSCertificate [Required]
+ReplicationCertificateStatus
+ |
+
+ ReplicationTLSCertificate is the name of the replication TLS certificate, if we have it
+ |
+
+nodeTLSCertificates [Required]
+[]NodeCertificateStatus
+ |
+
+ NodeTLSCertificates are the names of the certificates that have been created for the PGD nodes
+ |
+
+unusedCertificates [Required]
+[]string
+ |
+
+ UnusedCertificates are the names of the certificates that we don't use anymore
+for the PGD nodes
+ |
+
+nodesWithoutCertificates [Required]
+[]string
+ |
+
+ NodesWithoutCertificates are the names of the nodes which have not a server certificate
+ |
+
+nodesNeedingServiceReconciliation [Required]
+[]string
+ |
+
+ NodesNeedingServiceReconciliation are the names of the nodes which have not a server certificate
+ |
+
+configurationHash [Required]
+string
+ |
+
+ ConfigurationHash is the hash code of the connectivity configuration, used to
+check if we had a change in the configuration or not
+ |
+
+
+
+
+DNSConfiguration
+
+**Appears in:**
+
+- [RootDNSConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-RootDNSConfiguration)
+
+DNSConfiguration describes how the FQDN for the resources should be generated
+
+
+Field | Description |
+
+domain [Required]
+string
+ |
+
+ Contains the domain name of by all services in the PGDGroup. It is responsibility of the user to ensure that the
+value specified here matches with the rendered nodeServiceTemplate and groupServiceTemplate
+ |
+
+hostSuffix [Required]
+string
+ |
+
+ Contains an optional suffix to add to all the service names in the PGDGroup. The meaning of this setting it to
+allow the user to easily mark all the services created in a location for routing purpose
+(i.e., add a generic rule to CoreDNS to rewrite some service suffixes as local)
+ |
+
+
+
+
+DiscoveryJobConfig
+
+**Appears in:**
+
+- [PgdConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-PgdConfiguration)
+
+DiscoveryJobConfig contains a series of fields that configure the discovery job
+
+
+Field | Description |
+
+delay [Required]
+int
+ |
+
+ Delay amount of time to sleep between retries, measured in seconds
+ |
+
+retries [Required]
+int
+ |
+
+ Retries how many times the operation should be retried
+ |
+
+timeout [Required]
+int
+ |
+
+ Timeout amount of time given to the operation to succeed, measured in seconds
+ |
+
+
+
+
+
+
+**Appears in:**
+
+- [CnpBaseConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-CnpBaseConfiguration)
+
+- [PGDGroupSpec](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupSpec)
+
+InheritedMetadata contains metadata to be inherited by all resources related to a Cluster
+
+
+Field | Description |
+
+labels [Required]
+map[string]string
+ |
+
+ No description provided. |
+
+annotations [Required]
+map[string]string
+ |
+
+ No description provided. |
+
+
+
+
+
+
+**Appears in:**
+
+- [CertManagerTemplate](#pgd-k8s-enterprisedb-io-v1beta1-CertManagerTemplate)
+
+- [ServiceTemplate](#pgd-k8s-enterprisedb-io-v1beta1-ServiceTemplate)
+
+Metadata is a structure similar to the metav1.ObjectMeta, but still
+parseable by controller-gen to create a suitable CRD for the user.
+
+
+Field | Description |
+
+labels
+map[string]string
+ |
+
+ Map of string keys and values that can be used to organize and categorize
+(scope and select) objects. May match selectors of replication controllers
+and services.
+More info: http://kubernetes.io/docs/user-guide/labels
+ |
+
+annotations
+map[string]string
+ |
+
+ Annotations is an unstructured key value map stored with a resource that may be
+set by external tools to store and retrieve arbitrary metadata. They are not
+queryable and should be preserved when modifying objects.
+More info: http://kubernetes.io/docs/user-guide/annotations
+ |
+
+
+
+
+NodeCertificateStatus
+
+**Appears in:**
+
+- [ConnectivityStatus](#pgd-k8s-enterprisedb-io-v1beta1-ConnectivityStatus)
+
+NodeCertificateStatus encapsulate the status of the server certificate
+of a CNP node
+
+
+Field | Description |
+
+ReplicationCertificateStatus
+ReplicationCertificateStatus
+ |
+(Members of ReplicationCertificateStatus are embedded into this type.)
+ No description provided. |
+
+nodeName [Required]
+string
+ |
+
+ NodeName is the name of the CNP cluster using this certificate
+ |
+
+
+
+
+NodeKindName
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [NodeSummary](#pgd-k8s-enterprisedb-io-v1beta1-NodeSummary)
+
+NodeKindName is a type containing the potential values of node_kind_name from bdr.node_summary
+
+NodeSummary
+
+**Appears in:**
+
+- [PGDGroupStatus](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupStatus)
+
+NodeSummary shows relevant info from bdr.node_summary
+
+
+Field | Description |
+
+node_name [Required]
+string
+ |
+
+ Name of the node
+ |
+
+node_group_name [Required]
+string
+ |
+
+ NodeGroupName is the name of the joined group
+ |
+
+peer_state_name [Required]
+string
+ |
+
+ Consistent state of the node in human-readable form
+ |
+
+peer_target_state_name [Required]
+string
+ |
+
+ State which the node is trying to reach (during join or promotion)
+ |
+
+node_kind_name [Required]
+NodeKindName
+ |
+
+ The kind of node: witness or data
+ |
+
+
+
+
+NodesExtensionsStatus
+
+(Alias of `[]github.com/EnterpriseDB/pg4k-pgd/api/v1beta1.NodeExtensionStatus`)
+
+NodesExtensionsStatus contains a list of NodeExtensionStatus entries
+
+OTELConfiguration
+
+**Appears in:**
+
+- [CnpBaseConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-CnpBaseConfiguration)
+
+OTELConfiguration is the configuration for external openTelemetry
+
+
+Field | Description |
+
+metricsURL [Required]
+string
+ |
+
+ The OpenTelemetry HTTP endpoint URL to accept metrics data
+ |
+
+traceURL [Required]
+string
+ |
+
+ The OpenTelemetry HTTP endpoint URL to accept trace data
+ |
+
+traceEnable [Required]
+bool
+ |
+
+ Whether to push trace data to OpenTelemetry traceUrl
+ |
+
+tls [Required]
+OTELTLSConfiguration
+ |
+
+ TLSConfiguration provides the TLS certificate configuration when MetricsURL and TraceURL are using HTTPS
+ |
+
+
+
+
+OTELTLSConfiguration
+
+**Appears in:**
+
+- [OTELConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-OTELConfiguration)
+
+OTELTLSConfiguration contains the certificate configuration for TLS connections to openTelemetry
+
+
+Field | Description |
+
+caBundleSecretRef [Required]
+SecretKeySelector
+ |
+
+ CABundleSecretRef is a reference to a secret field containing the CA bundle
+to verify the openTelemetry server certificate
+ |
+
+clientCertSecret [Required]
+LocalObjectReference
+ |
+
+ ClientCertSecret is the name of the secret containing the client certificate used to connect
+to openTelemetry. It must contain both the standard "tls.crt" and "tls.key" files,
+encoded in PEM format.
+ |
+
+
+
+
+PGDGroupCleanupSpec
+
+**Appears in:**
+
+- [PGDGroupCleanup](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupCleanup)
+
+PGDGroupCleanupSpec defines the desired state of PGDGroupCleanup
+
+
+Field | Description |
+
+executor [Required]
+string
+ |
+
+ No description provided. |
+
+target [Required]
+string
+ |
+
+ No description provided. |
+
+force [Required]
+bool
+ |
+
+ Force will force the removal of the PGDGroup even if the target PGDGroup nodes are not parted
+ |
+
+
+
+
+PGDGroupCleanupStatus
+
+**Appears in:**
+
+- [PGDGroupCleanup](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupCleanup)
+
+PGDGroupCleanupStatus defines the observed state of PGDGroupCleanup
+
+
+Field | Description |
+
+phase [Required]
+github.com/EnterpriseDB/pg4k-pgd/pkg/resources.OperatorPhaseCleanup
+ |
+
+ No description provided. |
+
+
+
+
+PGDGroupSpec
+
+**Appears in:**
+
+- [PGDGroup](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroup)
+
+PGDGroupSpec defines the desired state of PGDGroup
+
+
+Field | Description |
+
+imageName [Required]
+string
+ |
+
+ Name of the container image, supporting both tags (<image>:<tag> )
+and digests for deterministic and repeatable deployments
+(<image>:<tag>@sha256:<digestValue> )
+ |
+
+imagePullPolicy
+PullPolicy
+ |
+
+ Image pull policy.
+One of Always , Never or IfNotPresent .
+If not defined, it defaults to IfNotPresent .
+Cannot be updated.
+More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ |
+
+imagePullSecrets [Required]
+[]LocalObjectReference
+ |
+
+ The list of pull secrets to be used to pull operator and or the operand images
+ |
+
+inheritedMetadata [Required]
+InheritedMetadata
+ |
+
+ Metadata that will be inherited by all objects related to the pgdGroup
+ |
+
+instances [Required]
+int32
+ |
+
+ Number of instances required in the cluster
+ |
+
+proxyInstances [Required]
+int32
+ |
+
+ Number of proxy instances required in the cluster
+ |
+
+witnessInstances [Required]
+int32
+ |
+
+ Number of witness instances required in the cluster
+ |
+
+backup [Required]
+Backup
+ |
+
+ The configuration to be used for backups in the
+CNP instances.
+ |
+
+restore [Required]
+Restore
+ |
+
+ The configuration to restore this PGD group from an Object Store
+service
+ |
+
+cnp [Required]
+CnpConfiguration
+ |
+
+ Instances configuration that will be injected into the CNP
+clusters that compose the PGD Group
+ |
+
+witness [Required]
+CnpBaseConfiguration
+ |
+
+ WitnessInstances configuration that will be injected into
+the WitnessInstances CNP clusters
+If not defined, it will default to the Instances configuration
+ |
+
+pgd [Required]
+PgdConfiguration
+ |
+
+ Pgd contains instructions to bootstrap this cluster
+ |
+
+pgdProxy [Required]
+PGDProxyConfiguration
+ |
+
+ PGDProxy contains instructions to configure PGD Proxy
+ |
+
+connectivity [Required]
+ConnectivityConfiguration
+ |
+
+ Configures the connectivity of the PGDGroup, like services
+and certificates that will be used.
+ |
+
+failingFinalizerTimeLimitSeconds [Required]
+int32
+ |
+
+ The amount of seconds that the operator will wait in case of a failing finalizer.
+A finalizer is considered failing when the operator cannot reach any nodes of the PGDGroup
+ |
+
+
+
+
+PGDGroupStatus
+
+**Appears in:**
+
+- [PGDGroup](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroup)
+
+PGDGroupStatus defines the observed state of PGDGroup
+
+
+Field | Description |
+
+latestGeneratedNode [Required]
+int32
+ |
+
+ ID of the latest generated node (used to avoid node name clashing)
+ |
+
+phase [Required]
+github.com/EnterpriseDB/pg4k-pgd/pkg/resources.OperatorPhase
+ |
+
+ The initialization phase of this cluster
+ |
+
+phaseDetails [Required]
+string
+ |
+
+ The details of the current phase
+ |
+
+phaseTroubleshootHints [Required]
+string
+ |
+
+ PhaseTroubleshootHints general troubleshooting indications for the given phase
+ |
+
+phaseType [Required]
+github.com/EnterpriseDB/pg4k-pgd/pkg/resources.PhaseType
+ |
+
+ PhaseType describes the phase category.
+ |
+
+conditions [Required]
+[]Condition
+ |
+
+ Conditions for PGDGroup object
+ |
+
+nodes [Required]
+[]NodeSummary
+ |
+
+ The list of summaries for the nodes in the group
+ |
+
+backup [Required]
+BackupStatus
+ |
+
+ The node that is taking backups of this PGDGroup
+ |
+
+restore [Required]
+RestoreStatus
+ |
+
+ The status of the restore process
+ |
+
+PGD [Required]
+PGDStatus
+ |
+
+ Last known status of PGD
+ |
+
+CNP [Required]
+CNPStatus
+ |
+
+ Last known status of CNP
+ |
+
+PGDProxy [Required]
+PGDProxyStatus
+ |
+
+ Last known status of PGDProxy
+ |
+
+connectivity [Required]
+ConnectivityStatus
+ |
+
+ Last known status of Connectivity
+ |
+
+pause [Required]
+PauseStatus
+ |
+
+ Last known status of Pause
+ |
+
+
+
+
+PGDNodeGroupEntry
+
+**Appears in:**
+
+- [PGDStatus](#pgd-k8s-enterprisedb-io-v1beta1-PGDStatus)
+
+PGDNodeGroupEntry shows information about the node groups available
+in the PGD configuration
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the name of the node group
+ |
+
+enableProxyRouting [Required]
+bool
+ |
+
+ EnableProxyRouting is true is the node group allows running PGD Proxies
+ |
+
+enableRaft [Required]
+bool
+ |
+
+ EnableRaft is true if the node group has a subgroup raft instance
+ |
+
+routeWriterMaxLag [Required]
+int64
+ |
+
+ RouteWriterMaxLag Maximum lag in bytes of the new write candidate to be
+selected as write leader, if no candidate passes this, there will be no writer
+selected automatically
+ |
+
+routeReaderMaxLag [Required]
+int64
+ |
+
+ RouteReaderMaxLag Maximum lag in bytes for node to be considered viable
+read-only node
+ |
+
+routeWriterWaitFlush [Required]
+bool
+ |
+
+ RouteWriterWaitFlush Whether to wait for replication queue flush before
+switching to new leader when using bdr.routing_leadership_transfer()
+ |
+
+
+
+
+PGDNodeGroupSettings
+
+**Appears in:**
+
+- [PgdConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-PgdConfiguration)
+
+PGDNodeGroupSettings contains the settings of the PGD Group
+
+
+Field | Description |
+
+routeWriterMaxLag [Required]
+int64
+ |
+
+ RouteWriterMaxLag Maximum lag in bytes of the new write candidate to be
+selected as write leader, if no candidate passes this, there will be no writer
+selected automatically
+Defaults to -1
+ |
+
+routeReaderMaxLag [Required]
+int64
+ |
+
+ RouteReaderMaxLag Maximum lag in bytes for node to be considered viable
+read-only node
+Defaults to -1
+ |
+
+routeWriterWaitFlush [Required]
+bool
+ |
+
+ RouteWriterWaitFlush Whether to wait for replication queue flush before
+switching to new leader when using bdr.routing_leadership_transfer()
+Defaults to false
+ |
+
+
+
+
+PGDProxyConfiguration
+
+**Appears in:**
+
+- [PGDGroupSpec](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupSpec)
+
+PGDProxyConfiguration defines the configuration of PGD Proxy
+
+
+Field | Description |
+
+imageName [Required]
+string
+ |
+
+ Name of the PGDProxy container image
+ |
+
+logLevel [Required]
+string
+ |
+
+ The PGD Proxy log level, one of the following values: error, warning, info (default), debug, trace
+ |
+
+logEncoder [Required]
+string
+ |
+
+ The format of the log output
+ |
+
+proxyAffinity [Required]
+Affinity
+ |
+
+ ProxyAffinity/Anti-affinity rules for pods
+ |
+
+proxyNodeSelector [Required]
+map[string]string
+ |
+
+ ProxyNodeSelector rules for pods
+ |
+
+proxyTolerations [Required]
+[]Toleration
+ |
+
+ ProxyTolerations rules for pods
+ |
+
+proxyResources
+ResourceRequirements
+ |
+
+ Defines the resources assigned to the proxy. If not defined uses defaults requests and limits values.
+ |
+
+
+
+
+PGDProxyEntry
+
+**Appears in:**
+
+- [PGDStatus](#pgd-k8s-enterprisedb-io-v1beta1-PGDStatus)
+
+PGDProxyEntry shows information about the proxies available
+in the PGD configuration
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the name of the proxy
+ |
+
+fallbackGroupNames [Required]
+[]string
+ |
+
+ FallbackGroupNames are the names of the fallback groups configured
+for this proxy
+ |
+
+parentGroupName [Required]
+string
+ |
+
+ ParentGroupName is the parent PGD group of this proxy
+ |
+
+maxClientConn [Required]
+int
+ |
+
+ MaxClientConn maximum number of connections the proxy will accept
+ |
+
+maxServerConn [Required]
+int
+ |
+
+ MaxServerConn maximum number of connections the proxy will make to the
+Postgres node
+ |
+
+serverConnTimeout [Required]
+int64
+ |
+
+ ServerConnTimeout connection timeout for server connections in seconds
+ |
+
+serverConnKeepalive [Required]
+int64
+ |
+
+ ServerConnKeepalive keepalive interval for server connections in seconds
+ |
+
+fallbackGroupTimeout [Required]
+int64
+ |
+
+ FallbackGroupTimeout the interval after which the routing falls back
+to one of the fallback_groups
+ |
+
+consensusGracePeriod [Required]
+int64
+ |
+
+ ConsensusGracePeriod the duration in seconds for which proxy continues to route even upon loss of a Raft leader.
+ |
+
+
+
+
+PGDProxySettings
+
+**Appears in:**
+
+- [PgdConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-PgdConfiguration)
+
+PGDProxySettings contains the settings of the proxy
+
+
+Field | Description |
+
+fallbackGroups [Required]
+[]string
+ |
+
+ FallbackGroups is the list of groups the proxy should forward connection to
+when all the data nodes of this PGD group are not available
+ |
+
+maxClientConn [Required]
+int
+ |
+
+ MaxClientConn maximum number of connections the proxy will accept.
+Defaults to 32767
+ |
+
+maxServerConn [Required]
+int
+ |
+
+ MaxServerConn maximum number of connections the proxy will make to the
+Postgres node.
+Defaults to 32767
+ |
+
+serverConnTimeout [Required]
+int64
+ |
+
+ ServerConnTimeout connection timeout for server connections in seconds.
+Defaults to 2
+ |
+
+serverConnKeepalive [Required]
+int64
+ |
+
+ ServerConnKeepalive keepalive interval for server connections in seconds.
+Defaults to 10
+ |
+
+fallbackGroupTimeout [Required]
+int64
+ |
+
+ FallbackGroupTimeout the interval after which the routing falls back
+to one of the fallback_groups.
+Defaults to 60
+ |
+
+consensusGracePeriod [Required]
+int64
+ |
+
+ ConsensusGracePeriod the duration in seconds for which proxy continues to route even upon loss of a Raft leader.
+If set to 0s, proxy stops routing immediately.
+Defaults to 6
+ |
+
+
+
+
+PGDProxyStatus
+
+**Appears in:**
+
+- [PGDGroupStatus](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupStatus)
+
+PGDProxyStatus any relevant status for the operator about PGDProxy
+
+
+Field | Description |
+
+proxyInstances [Required]
+int32
+ |
+
+ No description provided. |
+
+writeLead [Required]
+string
+ |
+
+ WriteLead is a reserved field for the operator, is not intended for external usage.
+Will be removed in future versions
+ |
+
+proxyHash [Required]
+string
+ |
+
+ ProxyHash contains the hash we use to detect if we need to reconcile the proxies
+ |
+
+
+
+
+PGDStatus
+
+**Appears in:**
+
+- [PGDGroupStatus](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupStatus)
+
+PGDStatus any relevant status for the operator about PGD
+
+
+Field | Description |
+
+raftConsensusLastChangedStatus [Required]
+github.com/EnterpriseDB/pg4k-pgd/pkg/resources.PGDRaftStatus
+ |
+
+ RaftConsensusLastChangedStatus indicates the latest reported status from bdr.monitor_group_raft
+ |
+
+raftConsensusLastChangedMessage [Required]
+string
+ |
+
+ RaftConsensusLastChangedMessage indicates the latest reported message from bdr.monitor_group_raft
+ |
+
+raftConsensusLastChangedTimestamp [Required]
+string
+ |
+
+ RaftConsensusLastChangedTimestamp indicates when the status and message were first reported
+ |
+
+registeredProxies [Required]
+[]PGDProxyEntry
+ |
+
+ RegisteredProxies is the status of the registered proxies
+ |
+
+nodeGroup [Required]
+PGDNodeGroupEntry
+ |
+
+ NodeGroup is the status of the node group associated with the PGDGroup
+ |
+
+
+
+
+ParentGroupConfiguration
+
+**Appears in:**
+
+- [PgdConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-PgdConfiguration)
+
+ParentGroupConfiguration contains the topology configuration
+of PGD
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name of the parent group
+ |
+
+create [Required]
+bool
+ |
+
+ Create is true when the operator should create the parent
+group if it doesn't exist
+ |
+
+
+
+
+PauseStatus
+
+**Appears in:**
+
+- [PGDGroupStatus](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupStatus)
+
+PauseStatus contains the information of group hibernating
+
+
+Field | Description |
+
+active [Required]
+bool
+ |
+
+ Active indicates the PGDGroup is either:
+
+- in process of pausing
+- already paused
+- in process of resuming
+
+ |
+
+instances [Required]
+int32
+ |
+
+ Instances is the number of paused PGD instances
+ |
+
+lastStartedTime [Required]
+Time
+ |
+
+ LastStartedTime is the last time the PGDGroup started pausing
+ |
+
+lastCompletedTime [Required]
+Time
+ |
+
+ LastCompletedTime is last time the PGDGroup completed pausing
+ |
+
+lastResumeStartedTime [Required]
+Time
+ |
+
+ LastResumeStartedTime is the last time the PGDGroup started resuming
+ |
+
+lastResumeCompletedTime [Required]
+Time
+ |
+
+ LastCompletedTime is last time the PGDGroup completed resuming
+ |
+
+
+
+
+PgdConfiguration
+
+**Appears in:**
+
+- [PGDGroupSpec](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupSpec)
+
+PgdConfiguration is the configuration of the PGD group structure
+
+
+Field | Description |
+
+parentGroup [Required]
+ParentGroupConfiguration
+ |
+
+ ParentGroup configures the topology of the PGD group
+ |
+
+discovery [Required]
+[]ConnectionString
+ |
+
+ The parameters we will use to connect to a node belonging
+to the parent PGD group.
+Even if provided, the following parameters will be overridden with default values:
+application_name , sslmode , dbname and user .
+The following parameters should not be provided nor used, as they are not even
+overridden with defaults:sslkey , sslcert , sslrootcert
+ |
+
+discoveryJob [Required]
+DiscoveryJobConfig
+ |
+
+ DiscoveryJob the configuration of the PGD Discovery job
+ |
+
+databaseName [Required]
+string
+ |
+
+ Name of the database used by the application. Default: app .
+ |
+
+ownerName [Required]
+string
+ |
+
+ Name of the owner of the database in the instance to be used
+by applications. Defaults to the value of the database key.
+ |
+
+ownerCredentialsSecret [Required]
+LocalObjectReference
+ |
+
+ Name of the secret containing the initial credentials for the
+owner of the user database. If empty a new secret will be
+created from scratch
+ |
+
+proxySettings [Required]
+PGDProxySettings
+ |
+
+ Configuration for the proxy
+ |
+
+nodeGroupSettings [Required]
+PGDNodeGroupSettings
+ |
+
+ Configuration for the PGD Group
+ |
+
+globalRouting [Required]
+bool
+ |
+
+ GlobalRouting is true when global routing is enabled, and in this
+case the proxies will be created in the parent group
+ |
+
+mutations [Required]
+SQLMutations
+ |
+
+ List of SQL mutations to apply to the node group
+ |
+
+
+
+
+PreProvisionedCertificate
+
+**Appears in:**
+
+- [ClientPreProvisionedCertificates](#pgd-k8s-enterprisedb-io-v1beta1-ClientPreProvisionedCertificates)
+
+PreProvisionedCertificate contains the data needed to supply a pre-generated certificate
+
+
+Field | Description |
+
+secretRef [Required]
+string
+ |
+
+ SecretRef a name pointing to a secret that contains a tls.crt and tls.key
+ |
+
+
+
+
+RecoverabilityPointsByMethod
+
+(Alias of `map[github.com/EnterpriseDB/cloud-native-postgres/api/v1.BackupMethod]k8s.io/apimachinery/pkg/apis/meta/v1.Time`)
+
+**Appears in:**
+
+- [CNPStatus](#pgd-k8s-enterprisedb-io-v1beta1-CNPStatus)
+
+RecoverabilityPointsByMethod contains the first recoverability points for a given backup method
+
+ReplicationCertificateStatus
+
+**Appears in:**
+
+- [ConnectivityStatus](#pgd-k8s-enterprisedb-io-v1beta1-ConnectivityStatus)
+
+- [NodeCertificateStatus](#pgd-k8s-enterprisedb-io-v1beta1-NodeCertificateStatus)
+
+ReplicationCertificateStatus encapsulate the certificate status
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the name of the certificate
+ |
+
+hash [Required]
+string
+ |
+
+ Hash is the hash of the configuration for which it has been generated
+ |
+
+isReady [Required]
+bool
+ |
+
+ Ready is true when the certificate is ready
+ |
+
+preProvisioned [Required]
+bool
+ |
+
+ PreProvisioned is true if the certificate is preProvisioned
+ |
+
+
+
+
+Restore
+
+**Appears in:**
+
+- [PGDGroupSpec](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupSpec)
+
+Restore configures the restore of a PGD group from an object store
+
+
+Field | Description |
+
+volumeSnapshots
+VolumeSnapshotsConfiguration
+ |
+
+ The configuration for volumeSnapshot restore
+ |
+
+barmanObjectStore [Required]
+BarmanObjectStoreConfiguration
+ |
+
+ The configuration for the barman-cloud tool suite
+ |
+
+recoveryTarget [Required]
+RecoveryTarget
+ |
+
+ By default, the recovery process applies all the available
+WAL files in the archive (full recovery). However, you can also
+end the recovery as soon as a consistent state is reached or
+recover to a point-in-time (PITR) by specifying a RecoveryTarget object,
+as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...).
+More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET
+ |
+
+serverNames [Required]
+[]string
+ |
+
+ The list of server names to be used as a recovery origin. One
+of these servers will be elected as the seeding one when evaluating
+the recovery target, this option is only used when restore from barmanObjectStore.
+ |
+
+
+
+
+RestoreStatus
+
+**Appears in:**
+
+- [PGDGroupStatus](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupStatus)
+
+RestoreStatus contains the current status of the restore
+process
+
+
+Field | Description |
+
+serverName [Required]
+string
+ |
+
+ The name of the server to be restored
+ |
+
+VolumeSnapshots [Required]
+[]VolumeSnapshotRestoreStatus
+ |
+
+ selected volumeSnapshots to restore
+ |
+
+
+
+
+RootDNSConfiguration
+
+**Appears in:**
+
+- [ConnectivityConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-ConnectivityConfiguration)
+
+RootDNSConfiguration describes how the FQDN for the resources should be generated
+
+
+Field | Description |
+
+DNSConfiguration
+DNSConfiguration
+ |
+(Members of DNSConfiguration are embedded into this type.)
+ No description provided. |
+
+additional [Required]
+[]DNSConfiguration
+ |
+
+ AdditionalDNSConfigurations adds more possible FQDNs for the resources
+ |
+
+
+
+
+SQLMutation
+
+SQLMutation is a series of SQL statements to apply atomically
+
+
+Field | Description |
+
+isApplied [Required]
+[]string
+ |
+
+ List of boolean-returning SQL queries. If any of them returns
+false the mutation will be applied
+ |
+
+exec [Required]
+[]string
+ |
+
+ List of SQL queries to be executed to apply this mutation
+ |
+
+type
+SQLMutationType
+ |
+
+ Type determines when the SQLMutation occurs.
+'always': reconcile the mutation at each reconciliation cycle
+'beforeSubgroupRaft': are executed only before the subgroupRaft is enabled
+If not specified, the Type defaults to 'always'.
+ |
+
+
+
+
+SQLMutationType
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [SQLMutation](#pgd-k8s-enterprisedb-io-v1beta1-SQLMutation)
+
+SQLMutationType a supported type of SQL Mutation
+
+SQLMutations
+
+(Alias of `[]github.com/EnterpriseDB/pg4k-pgd/api/v1beta1.SQLMutation`)
+
+**Appears in:**
+
+- [PgdConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-PgdConfiguration)
+
+SQLMutations A list of SQLMutation
+
+ScheduledBackupSpec
+
+**Appears in:**
+
+- [Backup](#pgd-k8s-enterprisedb-io-v1beta1-Backup)
+
+ScheduledBackupSpec defines the desired state of ScheduledBackup
+
+
+Field | Description |
+
+suspend [Required]
+bool
+ |
+
+ If this backup is suspended or not
+ |
+
+immediate [Required]
+bool
+ |
+
+ If the first backup has to be immediately start after creation or not
+ |
+
+schedule [Required]
+string
+ |
+
+ The schedule does not follow the same format used in Kubernetes CronJobs
+as it includes an additional second specifier,
+see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
+ |
+
+backupOwnerReference [Required]
+string
+ |
+
+ Indicates which ownerReference should be put inside the created backup resources.
+
+- none: no owner reference for created backup objects (same behavior as before the field was introduced)
+- self: sets the Scheduled backup object as owner of the backup
+- cluster: set the cluster as owner of the backup
+
+ |
+
+target [Required]
+BackupTarget
+ |
+
+ The policy to decide which instance should perform this backup. If empty,
+it defaults to cluster.spec.backup.target .
+Available options are empty string, primary and prefer-standby .
+primary to have backups run always on primary instances,
+prefer-standby to have backups run preferably on the most updated
+standby, if available.
+ |
+
+method
+BackupMethod
+ |
+
+ The backup method to be used, possible options are barmanObjectStore
+and volumeSnapshot . Defaults to: barmanObjectStore .
+ |
+
+online
+bool
+ |
+
+ Whether the default type of backup with volume snapshots is
+online/hot (true , default) or offline/cold (false )
+Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ |
+
+onlineConfiguration
+OnlineConfiguration
+ |
+
+ Configuration parameters to control the online/hot backup with volume snapshots
+Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ |
+
+
+
+
+ServerCertConfiguration
+
+**Appears in:**
+
+- [TLSConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-TLSConfiguration)
+
+ServerCertConfiguration contains the information to generate the certificates for the nodes
+
+
+Field | Description |
+
+caCertSecret [Required]
+string
+ |
+
+ CACertSecret is the secret of the CA to be injected into the CloudNativePG
+configuration
+ |
+
+certManager [Required]
+CertManagerTemplate
+ |
+
+ The cert-manager template used to generate the certificates
+ |
+
+
+
+
+ServiceTemplate
+
+**Appears in:**
+
+- [ConnectivityConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-ConnectivityConfiguration)
+
+ServiceTemplate is a structure that allows the user to set a template for the Service generation.
+
+
+Field | Description |
+
+metadata
+Metadata
+ |
+
+ Standard object's metadata.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ |
+
+spec
+ServiceSpec
+ |
+
+ Specification of the desired behavior of the service.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+updateStrategy
+ServiceUpdateStrategy
+ |
+
+ UpdateStrategy indicates how to update the services generated by this template.
+ |
+
+
+
+
+ServiceUpdateStrategy
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [ServiceTemplate](#pgd-k8s-enterprisedb-io-v1beta1-ServiceTemplate)
+
+ServiceUpdateStrategy defines the type for updating LoadBalancers. Allowed values are "patch" and "replace".
+
+TLSConfiguration
+
+**Appears in:**
+
+- [ConnectivityConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-ConnectivityConfiguration)
+
+TLSConfiguration is the configuration of the TLS infrastructure used
+by PGD to connect to the nodes
+
+
+Field | Description |
+
+mode [Required]
+TLSMode
+ |
+
+ No description provided. |
+
+serverCert [Required]
+ServerCertConfiguration
+ |
+
+ The configuration for the server certificates
+ |
+
+clientCert [Required]
+ClientCertConfiguration
+ |
+
+ The configuration for the client certificates
+ |
+
+
+
+
+TLSMode
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [TLSConfiguration](#pgd-k8s-enterprisedb-io-v1beta1-TLSConfiguration)
+
+TLSMode describes which mode should be used for the node to node communications
+
+VolumeSnapshotRestoreStatus
+
+**Appears in:**
+
+- [RestoreStatus](#pgd-k8s-enterprisedb-io-v1beta1-RestoreStatus)
+
+VolumeSnapshotRestoreStatus the volumeSnapshot to restore
+
+
+
+VolumeSnapshotsConfiguration
+
+**Appears in:**
+
+- [Restore](#pgd-k8s-enterprisedb-io-v1beta1-Restore)
+
+VolumeSnapshotsConfiguration contains the configuration for the volumeSnapshots restore
+
+
+Field | Description |
+
+selector [Required]
+LabelSelector
+ |
+
+ Label selector used to select the volumeSnapshot to restore
+ |
+
+
+
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx
index 5c07733f271..0bc9675cdc9 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx
@@ -9,26 +9,26 @@ container image registries under `docker.enterprisedb.com`.
!!! Important
Access to the private registries requires an account with EDB and is
- reserved for EDB customers with a valid [subscription plan](https://www.enterprisedb.com/products/plans-comparison#selfmanagedenterpriseplan).
- Credentials are run through your EDB account.
- For trials, see [Trials](#trials).
+ reserved to EDB customers with a valid [subscription plan](https://www.enterprisedb.com/products/plans-comparison#selfmanagedenterpriseplan).
+ Credentials will be funneled through your EDB account.
+ For trials, please refer to the ["Trials"](#trials) section below.
## Which repository to choose?
-EDB Postgres Distributed for Kubernetes is available as part of the Extreme
-High Availability Add-On on top of either the EDB Enterprise Plan or EDB
-Standard Plan.
+EDB Postgres Distributed for Kubernetes is available as part of the "Extreme
+High Availability Add-On" on top of either the "EDB Enterprise Plan" or "EDB
+Standard Plan".
Depending on your subscription plan, EDB Postgres Distributed for Kubernetes
-is in one of the following repositories.
+will be in one of the following repositories, as described in the table below:
| Plan | Repository |
| --------------------- | -------------------- |
| EDB Standard Plan | `k8s_standard_pgd` |
| EDB EnterpriseDB Plan | `k8s_enterprise_pgd` |
-Use the name of the repository as the username when you
-log in to the EDB container registry, for example, through `docker login` or a
+The name of the repository shall be used as the *Username* when you try to
+login to the EDB container registry, for example through `docker login` or a
[`kubernetes.io/dockerconfigjson` pull secret](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types).
!!! Important
@@ -38,24 +38,25 @@ log in to the EDB container registry, for example, through `docker login` or a
## How to retrieve the token
-In the [repos page in EDB](https://www.enterprisedb.com/repos-downloads),
-is an EDB Repos 2.0 section where a repo token appears obscured.
+In the ["repos" page in EDB](https://www.enterprisedb.com/repos-downloads),
+you'll find an *EDB Repos 2.0* section where a `Repo Token` is shown obscured.
![EDB Repo Portal](images/edb-repo-portal.png)
-Next to the repo token is a **Copy Token** button to copy the token and an eye icon
-for looking at the content of the token.
+Next to the "Repo Token" you'll find a button to copy the token, and an eye icon
+in case you want to look at the content of the token.
-Use the repo token as the password when you log in to the EDB
+The "Repo Token" shall be used as the *Password* when you try to login to EDB
container registry.
### Example with `docker login`
-You can log in using Docker from your terminal. We suggest that you
-copy the repo token using **Copy Token**. The `docker` command prompts you for a username and a password.
+You should be able to logon via Docker from your terminal. We suggest you
+copy the Repo Token using the `Copy Token` button. The `docker` command below
+will prompt you for a username and a password.
-The username is the repo you're trying to access,
-and the password is the token you just copied:
+As explained above, the username should be the repo you are trying to access
+while the password is the token you just copied.
```sh
$ docker login docker.enterprisedb.com
@@ -66,37 +67,37 @@ Login Succeeded
## Trials
-If you're a trialist or a preview user, use `k8s_enterprise_pgd` as the name
-of the repository, and follow the instructions in
-[How to retrieve the token](#how-to-retrieve-the-token) for the token.
+If you are a trialist or a preview user, use `k8s_enterprise_pgd` as the name
+of the repository and follow the instructions in
+["How to retrieve the token"](#how-to-retrieve-the-token) for the token.
## Operand images
EDB Postgres Distributed for Kubernetes is an operator that supports running
-EDB Postgres Distributed (PGD) version 5 on three PostgreSQL distributions:
+Postgres Distributed (PGD) version 5 on three PostgreSQL distributions:
- PostgreSQL
-- EDB Postgres Advanced Server
+- EDB Postgres Advanced
- EDB Postgres Extended
!!! Important
- See [Choosing a Postgres distribution](/pgd/latest/choosing_server/)
- in the PGD documentation for details and a comparison of PGD on the
+ Please refer to ["Choosing a Postgres distribution"](https://www.enterprisedb.com/docs/pgd/latest/choosing_server/)
+ from the PGD documentation for details and a comparison of PGD on the
different supported PostgreSQL distributions.
Due to the immutable application container adoption in EDB operators, the
-operator expects for the container images to include all the binaries required
+operator expects that the container images include all the binaries required
to run the requested version of PGD on top of the required distribution and
version of Postgres.
-These images follow the requirements and the conventions described in
-[Container image requirements](/postgres_for_kubernetes/latest/container_images/)
-in the EDB Postgres for Kubernetes documentation, adding the `bdr5`
+These images follow the requirements and the conventions described in the
+["Container image requirements"](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/container_images/)
+page of the EDB Postgres for Kubernetes documentation, adding the `bdr5`
extension.
-The table shows the image name prefix for each Postgres distribution.
+In the table below you can find the image name prefix for each Postgres distribution:
| Postgres distribution | Versions | Image name | Repositories |
| --------------------- | -------- | --------------------------- | ---------------------------------------- |
| EDB Postgres Extended | 15, 14 | `edb-postgres-extended-pgd` | `k8s_standard_pgd`, `k8s_enterprise_pgd` |
-| EDB Postgres Advanced | 15, 14 | `edb-postgres-advanced-pgd` | `k8s_enterprise_pgd` |
\ No newline at end of file
+| EDB Postgres Advanced | 15, 14 | `edb-postgres-advanced-pgd` | `k8s_enterprise_pgd` |
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
index bdd094e8917..0df0cf3505a 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
@@ -1,48 +1,49 @@
---
-title: 'Quick start'
+title: 'Quickstart'
originalFilePath: 'src/quickstart.md'
---
-You can test an EDB Postgres Distributed (PGD) cluster on your
-laptop or computer using EDB Postgres Distributed for Kubernetes
+This section describes how to test an EDB Postgres Distributed (PGD) cluster on your
+laptop/computer using EDB Postgres Distributed for Kubernetes (PG4K-PGD)
on a single local
Kubernetes cluster built with [Kind](https://kind.sigs.k8s.io/).
!!! Warning
- These instructions are only for demonstration,
- testing, and practice purposes and must not be used in production.
+ The instructions contained in this section are for demonstration,
+ testing, and practice purposes only and must not be used in production.
-This quick start shows you how to start an EDB Postgres Distributed
-cluster on your local Kubernetes installation so you can experiment with it.
+By following the instructions on this page you should be able to start an EDB Postgres Distributed
+cluster on your local Kubernetes installation and experiment with it.
!!! Important
- To connect to the Kubernetes cluster, make sure that you have `kubectl` installed on your machine.
- See the Kubernetes documentation
- on [installing `kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
+ Make sure that you have `kubectl` installed on your machine in order
+ to connect to the Kubernetes cluster. Please follow the Kubernetes documentation
+ on [how to install `kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
-## Part 1 - Set up the local Kubernetes playground
+## Part 1 - Setup the local Kubernetes playground
-Install Kind, a tool for running local Kubernetes
-clusters using Docker container nodes. (Kind stands for Kubernetes IN Docker.)
-If you already have access to a Kubernetes cluster, you can skip to Part 2.
+This section is about installing Kind, a tool for running local Kubernetes
+clusters using Docker container "nodes" (Kind stands for "Kubernetes IN Docker"
+indeed).
+If you already have access to a Kubernetes cluster, you may skip to the next
+section.
-Install Kind on your environment following the instructions in [Kind Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start).
-Then, create a Kubernetes cluster:
+Install `kind` on your environment following the instructions in the [Kind Quickstart](https://kind.sigs.k8s.io/docs/user/quick-start),
+then create a Kubernetes cluster with:
```sh
kind create cluster --name pgd
```
-
+
## Part 2 - Install EDB Postgres Distributed for Kubernetes
-After you have a Kubernetes installation up and running on your laptop, you
-can install EDB Postgres Distributed for Kubernetes.
+Now that you have a Kubernetes installation up and running on your laptop, you
+can proceed with the installation of EDB Postgres Distributed for Kubernetes.
-See [Installation](installation_upgrade.md) for details.
+Please refer to the ["Installation"](installation_upgrade.md) section and then
+proceed with the deployment of a PGD cluster.
## Part 3 - Deploy a PGD cluster
@@ -50,15 +51,15 @@ As with any other deployment in Kubernetes, to deploy a PGD cluster you need to
apply a configuration file that defines your desired `PGDGroup` resources that
make up a PGD cluster.
-Some sample files are included in the EDB Postgres Distributed for Kubernetes repository. The
+Some sample files are included in the PG4K-PGD repository. The
[flexible_3regions.yaml](../samples/flexible_3regions.yaml) manifest
-contains the definition of a PGD cluster with two data groups and a global
-witness node spread across three regions. Each data group consists of two data nodes
+contains the definition of a PGD cluster with 2 Data Groups and a global
+witness node spread across 3 regions. Each Data Group consists of 2 data nodes
and a local witness node.
-!!! Note
- For more details about the available options, see
- the [API reference](api_reference.md).
+!!! Note "There's more"
+ For more detailed information about the available options, please refer
+ to the ["API Reference" section](pg4k-pgd.v1beta1.md).
You can deploy the `flexible-3-regions` example by saving it first and running:
@@ -66,20 +67,20 @@ You can deploy the `flexible-3-regions` example by saving it first and running:
kubectl apply -f flexible_3regions.yaml
```
-You can check that the pods are being created using the `get pods` command:
+You can check that the pods are being created with the `get pods` command:
```sh
kubectl get pods
```
-The pods are being created as part of PGD nodes. As described in the
-[Architecture](architecture.md), they're implemented on top
-of EDB Postgres for Kubernetes clusters.
+The pods are being created as part of PGD nodes, and as described in the
+[architecture document](architecture.md), they are implemented on top
+of PG4K Clusters.
-You can list the clusters then, which shows the PGD nodes:
+We can list the clusters then, which will give us the PGD nodes:
```sh
-$ kubectl get clusters
+$ kubectl get clusters
NAME AGE INSTANCES READY STATUS PRIMARY
region-a-1 2m50s 1 1 Cluster in healthy state region-a-1-1
region-a-2 118s 1 1 Cluster in healthy state region-a-2-1
@@ -89,7 +90,7 @@ region-a-3 91s 1 1 Cluster in healthy state region-a-3-1
```
Ultimately, the PGD nodes are created as part of the PGD groups
-that make up your PGD cluster.
+that make up our PGD cluster.
```sh
$ kubectl get pgdgroups
@@ -99,4 +100,4 @@ region-b 2 1 PGDGroup - Healthy 4m50s
region-c 0 1 PGDGroup - Healthy 4m50s
```
-Notice how the region-c group is only a witness node.
\ No newline at end of file
+Notice how the region-c group is only a witness node.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx
index e2b8444a5de..20b381082af 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx
@@ -3,35 +3,36 @@ title: 'Recovery'
originalFilePath: 'src/recovery.md'
---
-In EDB Postgres Distributed for Kubernetes, recovery is available as a way
-to bootstrap a new PGD group starting from an available physical backup of a PGD node.
-The recovery can't be performed in place on an existing PGD group.
-EDB Postgres Distributed for Kubernetes also supports point-in-time recovery (PITR), which allows you to restore a PGD group up to
+In EDB Postgres Distributed for Kubernetes (PG4K-PGD), recovery is available as a way
+to bootstrap a new PGD Group starting from an available physical backup of a PGD Node.
+The recovery cannot be performed "in-place" on an existing PGD Group.
+PG4K-PGD also supports Point In Time Recovery, which allows you to restore a PGDGroup up to
any point in time, from the first available backup in your catalog to the last archived
-WAL. Having a WAL archive is mandatory in this case.
+WAL (having a WAL archive is mandatory in this case).
## Prerequisite
-Before recovering from a backup:
+Before recovering from a Backup, take care to apply the following considerations:
- Make sure that the PostgreSQL configuration (`.spec.cnp.postgresql.parameters`) of the
- recovered cluster is compatible with the original one from a physical replication standpoint.
+ recovered cluster is compatible, from a physical replication standpoint, with the original one.
-- When recovering in a newly created namespace, first set up a cert-manager CA issuer before deploying the recovered PGD group.
+- When recovering in a newly created namespace, remember to first setup a cert-manager CA Issuer before deploying the recovered PGDGroup.
-For more information, see [EDB Postgres for Kubernetes recovery - Additional considerations](/postgres_for_kubernetes/latest/bootstrap/#additional-considerations) in the EDB Postgres for Kubernetes documentation.
+For further information, refer to the [PG4K Recovery - Additional considerations](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/bootstrap/#additional-considerations) documentation section.
## Recovery from an object store
-You can recover from a PGD node backup created by Barman Cloud and stored on supported object storage.
+You can recover from a PGD Node backup created by Barman Cloud and stored on supported object storage.
-For example, given a PGD group` named `pgdgroup-example` with three instances with backups available, your object storage contains a directory for each node:
+For example, given a PGDGroup named `pgdgroup-example` with 3 instances, with Backups available, your object storage
+should contain a directory for each node:
`pgdgroup-example-1`, `pgdgroup-example-2`, `pgdgroup-example-3`
-This example defines a full recovery from the object store.
-The operator transparently selects the latest backup between the defined `serverNames` and
-replays up to the last available WAL.
+The following example will define a full recovery from the object store.
+The operator will transparently select the latest backup between the defined `serverNames`, and
+replay up to the last available WAL.
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -62,25 +63,25 @@ spec:
!!! Important
Make sure to correctly configure the WAL section according to the source cluster.
- In the example, since the `pgdgroup-example` PGD group uses `compression`
- and `encryption`, make sure to set the proper parameters also in the PGD group
+ In the above example, since the `pgdgroup-example` PGDGroup uses `compression`
+ and `encryption`, make sure to set the proper parameters also in the PGDGroup
that's being created by the `restore`.
!!! Note
- The example takes advantage of the parallel WAL restore feature,
- dedicating up to eight jobs to concurrently fetch the required WAL files from the archive.
+ In the above example we are taking advantage of the parallel WAL restore feature,
+ dedicating up to 8 jobs to concurrently fetch the required WAL files from the archive.
This feature can appreciably reduce the recovery time. Make sure that you plan ahead
for this scenario and tune the value of this parameter for your environment.
- It makes a difference when you need it.
+ It will certainly make a difference when you'll need it.
-## PITR from an object store
+## Point in time recovery (PITR) from an object store
-Instead of replaying all the WALs up to the latest one, after extracting a base backup, you can ask PostgreSQL to stop replaying
-WALs at any point in time.
-PostgreSQL uses this technique to achieve PITR.
-(The presence of a WAL archive is mandatory.)
+Instead of replaying all the WALs up to the latest one, we can ask PostgreSQL to stop replaying
+WALs at any given point in time, after having extracted a base backup.
+PostgreSQL uses this technique to achieve point-in-time recovery (PITR).
+The presence of a WAL archive is mandatory.
-This example defines a time-base target for the recovery:
+The following example will define a time base target for the recovery:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -112,19 +113,19 @@ spec:
```
!!! Important
- PITR requires you to specify a `targetTime` recovery target by using the options described
- in [Recovery targets](#recovery-targets). When you use `targetTime` or `targetLSN`, the operator
- selects the closest backup that was completed before that target. Otherwise, it
+ PITR requires you to specify a `targetTime` recovery target, by using the options described
+ in the "Recovery targets" section below. When you use `targetTime` or `targetLSN`, the operator
+ automatically selects the closest backup that was completed before that target. Otherwise, it
selects the last available backup in chronological order between the specified `serverNames`.
## Recovery from an object store specifying a `backupID`
The `.spec.restore.recoveryTarget.backupID` option allows you to specify a base backup from
-which to start the recovery process. By default, this value is empty.
-If you assign a value to it, the operator uses that backup as the base for the recovery. The value must be in the form of a Barman backup ID.
+which to initiate the recovery process. By default, this value is empty.
+If you assign a value to it (in the form of a Barman backup ID), the operator will use that backup as base for the recovery.
-This example recovers a new PGD group from a specific backupID of the
-`pgdgroup-backup-1` PGD node:
+The following example recovers a new PGDGroup from a specific backupID of the
+`pgdgroup-backup-1` PGD Node:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -154,16 +155,18 @@ spec:
```
!!! Important
- When a `backupID` is specified, make sure to define only the related PGD node
+ When a `backupID` is specified, make sure to only define the related PGD Node
in the `serverNames` option, and avoid defining the other ones.
!!! Note
Defining a specific `backupID` is especially needed when using one of the
following recovery targets: `targetName`, `targetXID`, and `targetImmediate`.
- In such cases, it's important to specify `backupID`, unless
- the last available backup in the catalog is okay.
+ In such cases, it is important to specify `backupID`, unless you are OK with
+ the last available backup in the catalog.
## Recovery targets
-Beyond PITR are other recovery target criteria you can use.
-For more information on all the available recovery targets, see [EDB Postgres for Kubernetes recovery targets](/postgres_for_kubernetes/latest/bootstrap/#point-in-time-recovery-pitr) in the EDB Postgres for Kubernetes documentation.
\ No newline at end of file
+Beyond PITR there are other recovery target criteria you can use.
+For more information on all the available Recovery Targets you can
+refer to the [PG4K Recovery targets](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/bootstrap/#point-in-time-recovery-pitr)
+documentation (end of paragraph).
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/release_notes.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/release_notes.mdx
index 7f76443619f..c792c26aacb 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/release_notes.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/release_notes.mdx
@@ -5,8 +5,28 @@ originalFilePath: 'src/release_notes.md'
History of user-visible changes for EDB Postgres Distributed for Kubernetes.
-## Version 0.7.1
+## Version 1.0.0
-** release date:\*** 8 September 2023
+**Release date:** 15 February 2024
-Internal release for testing purposes
+This is the first major stable release of EDB Postgres Distributed for
+Kubernetes, a Kubernetes operator to deploy and manage
+EDB Postgres Distributed clusters.
+
+The operator implements the `PGDGroup` custom resource
+in the API group `pgd.k8s.enterprisedb.io`.
+This resource can be used to create and manage EDB Postgres Distributed clusters
+inside Kubernetes with capabilities including:
+
+- Deployment of EDB Postgres Distributed clusters with versions 5 and later
+- Additional self-healing capability on top of that of Postgres Distributed,
+ such as recovery and restart of failed PGD nodes
+- Definition of the services to connect applications to the
+ write leader of each PGD group
+- Implementation of Raft subgroups
+- Support for Local Persistent Volumes with PVC templates
+- Reuse of Persistent Volumes storage in Pods
+- TLS connections and client certificate authentication
+- Continuous backup to an S3 compatible object store
+- Pause and resume a PGD cluster, saving computational resources by temporarily
+ removing database pods while keeping the database PVCs.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
index 7e54016add4..a0cc3b93770 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
@@ -4,16 +4,16 @@ originalFilePath: 'src/samples.md'
---
!!! Important
- The available examples are for demonstration and
+ Examples available from this section are for demonstration and
experimentation purposes only.
-These examples are configuration files for setting up
-your EDB Postgres Distributed cluster in a Kubernetes environment.
+In this section, you can find some examples of configuration files to set up
+your EDB Postgres Distributed Cluster in a Kubernetes environment.
Flexible 3 regions
: [`flexible_3regions.yaml`](../samples/flexible_3regions.yaml):
- a PGD cluster with two data groups and a global witness node spread across three
- regions, where each data groups consists of two data nodes and a local witness
+ a PGD cluster with 2 Data Groups and a global witness node spread across 3
+ regions, where each Data Groups consists of 2 data nodes and a local witness
node.
-For a list of available options, see the [API reference](api_reference.md).
\ No newline at end of file
+For a list of available options, please refer to the ["API Reference" page](pg4k-pgd.v1beta1.md).
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx
index c7a69c946d3..9b6b296253c 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx
@@ -3,30 +3,31 @@ title: 'Security'
originalFilePath: 'src/security.md'
---
-Security for EDB Postgres Distributed for Kubernetes is
-analyzed at three layers: code, container, and cluster.
+This section contains information about security for EDB Postgres Distributed for Kubernetes,
+that are analyzed at 3 different layers: Code, Container and Cluster.
!!! Warning
- In addition to security practices described here, you must
- perform regular InfoSec duties on your Kubernetes cluster.
- Familiarize yourself with [Overview of Cloud Native Security](https://kubernetes.io/docs/concepts/security/overview/)
- in the Kubernetes documentation.
+ The information contained in this page must not exonerate you from
+ performing regular InfoSec duties on your Kubernetes cluster. Please
+ familiarize yourself with the ["Overview of Cloud Native Security"](https://kubernetes.io/docs/concepts/security/overview/)
+ page from the Kubernetes documentation.
!!! Seealso "About the 4C's Security Model"
- See [The 4C's Security Model in Kubernetes](https://www.enterprisedb.com/blog/4cs-security-model-kubernetes)
- blog article for a better understanding and context of the approach EDB
- takes with security in EDB Postgres Distributed for Kubernetes.
+ Please refer to ["The 4C's Security Model in Kubernetes"](https://www.enterprisedb.com/blog/4cs-security-model-kubernetes)
+ blog article to get a better understanding and context of the approach EDB
+ has taken with security in EDB Postgres Distributed for Kubernetes.
## Code
-Source code of EDB Postgres Distributed for Kubernetes is systematically scanned for static analysis purposes,
-including security problems. EDB uses a popular open-source linter for Go called
+Source code of EDB Postgres Distributed for Kubernetes is *systematically scanned* for static analysis purposes,
+including **security problems**, using a popular open-source linter for Go called
[GolangCI-Lint](https://github.com/golangci/golangci-lint) directly in the CI/CD pipeline.
-GolangCI-Lint can run several linters on the same source code.
+GolangCI-Lint can run several *linters* on the same source code.
-One of these is [Golang Security Checker](https://github.com/securego/gosec), or `gosec`.
-`gosec` is a linter that scans the abstract syntactic tree of the source against a set of rules aimed at discovering well-known vulnerabilities, threats, and weaknesses hidden in
-the code. These threads include hard-coded credentials, integer overflows, SQL injections, and others.
+One of these is [Golang Security Checker](https://github.com/securego/gosec), or simply `gosec`,
+a linter that scans the abstract syntactic tree of the source against a set of rules aimed at
+the discovery of well-known vulnerabilities, threats, and weaknesses hidden in
+the code such as hard-coded credentials, integer overflows and SQL injections - to name a few.
!!! Important
A failure in the static code analysis phase of the CI/CD pipeline is a blocker
@@ -35,104 +36,104 @@ the code. These threads include hard-coded credentials, integer overflows, SQL i
## Container
-Every container image that's part of EDB Postgres Distributed for Kubernetes is built by way of CI/CD pipelines following every commit.
-Such images include not only those of the operator but also of the operands, specifically every supported PostgreSQL version.
-In the pipelines, images are scanned with:
+Every container image that is part of EDB Postgres Distributed for Kubernetes is automatically built via CI/CD pipelines following every commit.
+Such images include not only the operator's, but also the operands' - specifically every supported PostgreSQL version.
+Within the pipelines, images are scanned with:
-- [Dockle](https://github.com/goodwithtech/dockle) for best practices in terms
+- [Dockle](https://github.com/goodwithtech/dockle): for best practices in terms
of the container build process
-- [Clair](https://github.com/quay/clair) for vulnerabilities found in both the
+- [Clair](https://github.com/quay/clair): for vulnerabilities found in both the
underlying operating system and libraries and applications that they run
!!! Important
- All operand images are rebuilt once a day by our pipelines in case
- of security updates at the base image and package level, providing patch level updates
+ All operand images are automatically rebuilt once a day by our pipelines in case
+ of security updates at the base image and package level, providing **patch level updates**
for the container images that EDB distributes.
-The following guidelines and frameworks were taken into account for container-level security:
+The following guidelines and frameworks have been taken into account for container-level security:
-- The [Container Image Creation and Deployment Guide](https://dl.dod.cyber.mil/wp-content/uploads/devsecops/pdf/DevSecOps_Enterprise_Container_Image_Creation_and_Deployment_Guide_2.6-Public-Release.pdf),
+- the ["Container Image Creation and Deployment Guide"](https://dl.dod.cyber.mil/wp-content/uploads/devsecops/pdf/DevSecOps_Enterprise_Container_Image_Creation_and_Deployment_Guide_2.6-Public-Release.pdf),
developed by the Defense Information Systems Agency (DISA) of the United States Department of Defense (DoD)
-- The [CIS Benchmark for Docker](https://www.cisecurity.org/benchmark/docker/),
+- the ["CIS Benchmark for Docker"](https://www.cisecurity.org/benchmark/docker/),
developed by the Center for Internet Security (CIS)
-!!! Seealso "About the container-level security"
- See the [Security and Containers in EDB Postgres Distributed for Kubernetes](https://www.enterprisedb.com/blog/security-and-containers-cloud-native-postgresql)
- blog article for more information about the approach that EDB takes on
+!!! Seealso "About the Container level security"
+ Please refer to ["Security and Containers in EDB Postgres Distributed for Kubernetes"](https://www.enterprisedb.com/blog/security-and-containers-cloud-native-postgresql)
+ blog article for more information about the approach that EDB has taken on
security at the container level in EDB Postgres Distributed for Kubernetes.
## Cluster
Security at the cluster level takes into account all Kubernetes components that
-form both the control plane and the nodes as well as the applications that run in
-the cluster, including PostgreSQL.
+form both the control plane and the nodes, as well as the applications that run in
+the cluster (PostgreSQL included).
-### Role-based access control (RBAC)
+### Role Based Access Control (RBAC)
The operator interacts with the Kubernetes API server with a dedicated service
-account called pgd-operator-controller-manager. In Kubernetes this account is installed
-by default in the `pgd-operator-system` namespace. A cluster role
-binds between this service account and the pgd-operator-controller-manager
-cluster role that defines the set of rules, resources, and verbs granted to the operator.
+account called `pgd-operator-controller-manager`. In Kubernetes this is installed
+by default in the `pgd-operator-system` namespace, with a cluster role
+binding between this service account and the `pgd-operator-controller-manager`
+cluster role which defines the set of rules/resources/verbs granted to the operator.
-RedHat OpenShift directly manages the operator RBAC entities by way of [Operator
+RedHat OpenShift directly manage the operator RBAC entities via [Operator
Lifecycle
-Manager (OLM)](https://docs.openshift.com/container-platform/4.13/operators/understanding/olm/olm-understanding-olm.html). OLM
-allows you to grant permissions only where they're required,
+Manager](https://docs.openshift.com/container-platform/4.13/operators/understanding/olm/olm-understanding-olm.html),
+allowing the user to grant permissions only where they are required,
implementing the principle of least privilege.
!!! Important
- These permissions are exclusively reserved for the operator's service
- account to interact with the Kubernetes API server. They aren't directly
+ The above permissions are exclusively reserved for the operator's service
+ account to interact with the Kubernetes API server. They are not directly
accessible by the users of the operator that interact only with `PGDGroup`
and `PGDGroupCleanup` resources.
-The following are some examples and, most importantly, the reasons why
+Below we provide some examples and, most importantly, the reasons why
EDB Postgres Distributed for Kubernetes requires full or partial management of standard Kubernetes
namespaced resources.
`jobs`
-: The operator needs to handle jobs to manage different `PGDGroup` phases.
+: The operator needs to handle jobs to manage different `PGDGroup`'s phases.
`poddisruptionbudgets`
-: The operator uses pod disruption budgets to make sure enough PGD nodes
+: The operator uses pod disruption budgets to make sure enough PGD Nodes
are kept active during maintenance operations.
`pods`
-: The operator needs to manage PGD nodes as a `Cluster` resource.
+: The operator needs to manage PGD Nodes (as a `Cluster` resource).
`secrets`
: Unless you provide certificates and passwords to your data nodes,
the operator adopts the "convention over configuration" paradigm by
- self-provisioning random-generated passwords and TLS certificates and by
+ self-provisioning random generated passwords and TLS certificates, and by
storing them in secrets.
`serviceaccounts`
: The operator needs to create a service account to
- enable the `PGDGroup` recovery job to retrieve the backup objects from
+ enable the PGDGroup recovery job to retrieve the backup objects from
the object store where they reside.
`services`
: The operator needs to control network access to the PGD cluster
- from applications and properly manage
+ from applications, and properly manage
failover/switchover operations in an automated way.
`statefulsets`
-: The operator needs to manage PGD proxies.
+: The operator needs to manage PGD Proxies.
`validatingwebhookconfigurations` and `mutatingwebhookconfigurations`
: The operator injects its self-signed webhook CA into both webhook
configurations, which are needed to validate and mutate all the resources it
- manages. For more details, see the
+ manages. For more details, please see the
[Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/).
To see all the permissions required by the operator, you can run `kubectl
describe clusterrole pgd-operator-manager-role`.
-EDB Postgres Distributed for Kubernetes internally manages the PGD nodes using the `Cluster` resource as defined by EDB Postgres
-for Kubernetes. See the
-[EDB Postgres for Kubernetes documentation](/postgres_for_kubernetes/latest/security/)
-for the list of permissions used by the EDB Postgres for Kubernetes operator service account.
+PG4K-PGD internally manages the PGD nodes using the `Cluster` resource as defined by EDB Postgres
+for Kubernetes (PG4K). We refer you to the
+[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/security/)
+for the list of permissions used by PG4K operator service account.
### Calls to the API server made by the instance manager
@@ -144,72 +145,53 @@ a dedicated `ServiceAccount` created by the operator that shares the same
PostgreSQL `Cluster` resource name.
!!! Important
- The operand can access only a specific and limited subset of resources
- through the API server. A service account is the recommended way to access the API server from within a pod. See the
- [Kubernetes documentation](https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/) for details.
+ The operand can only access a specific and limited subset of resources
+ through the API server. A service account is the
+ [recommended way to access the API server from within a Pod](https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/).
-See the
+We refer you to the
[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/security/)
-for more information on the instance manager.
+for additional depth on the instance manager.
-### Pod security policies
+### Pod Security Policies
-A [pod security policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/)
+A [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/)
is the Kubernetes way to define security rules and specifications that a pod needs to meet
to run in a cluster.
-For InfoSec reasons, every Kubernetes platform must implement them.
+For InfoSec reasons, every Kubernetes platform should implement them.
-EDB Postgres Distributed for Kubernetes doesn't require privileged mode for containers execution.
-The PostgreSQL containers run as the postgres system user. No component requires running as root.
+EDB Postgres Distributed for Kubernetes does not require *privileged* mode for containers execution.
+The PostgreSQL containers run as `postgres` system user. No component whatsoever requires running as `root`.
-Likewise, volumes access doesn't require privileged mode or root privileges.
-Proper permissions must be assigned by the Kubernetes platform or administrators.
-The PostgreSQL containers run with a read-only root filesystem, that is, no writable layer.
+Likewise, Volumes access does not require *privileges* mode or `root` privileges either.
+Proper permissions must be properly assigned by the Kubernetes platform and/or administrators.
+The PostgreSQL containers run with a read-only root filesystem (i.e. no writable layer).
The operator explicitly sets the required security contexts.
-On Red Hat OpenShift, Cloud Native PostgreSQL runs in the `restricted` security context constraint,
+On Red Hat OpenShift, Cloud Native PostgreSQL runs in `restricted` security context constraint,
the most restrictive one. The goal is to limit the execution of a pod to a namespace allocated UID
and SELinux context.
!!! Seealso "Security Context Constraints in OpenShift"
- For more information on security context constraints (SCC) in
- OpenShift, see the
- [Managing SCC in OpenShift](https://www.openshift.com/blog/managing-sccs-in-openshift)
+ For further information on Security Context Constraints (SCC) in
+ OpenShift, please refer to the
+ ["Managing SCC in OpenShift"](https://www.openshift.com/blog/managing-sccs-in-openshift)
article.
-!!! Warning "Security context constraints and namespaces"
- As stated in the [Openshift documentation](https://docs.openshift.com/container-platform/latest/authentication/managing-security-context-constraints.html#role-based-access-to-ssc_configuring-internal-oauth),
- SCCs aren't applied in the default namespaces (`default`, `kube-system`,
- `kube-public`, `openshift-node`, `openshift-infra`, `openshift`). Don't use them
- to run pods. CNP clusters deployed in those namespaces
+!!! Warning "Security Context Constraints and namespaces"
+ As stated by [Openshift documentation](https://docs.openshift.com/container-platform/latest/authentication/managing-security-context-constraints.html#role-based-access-to-ssc_configuring-internal-oauth)
+ SCCs are not applied in the default namespaces (`default`, `kube-system`,
+ `kube-public`, `openshift-node`, `openshift-infra`, `openshift`) and those
+ should not be used to run pods. CNP clusters deployed in those namespaces
will be unable to start due to missing SCCs.
-
-#### Exposed ports
+#### Exposed Ports
-EDB Postgres Distributed for Kubernetes exposes ports at operator, instance manager, and operand
-levels, as shown in the table.
+EDB Postgres Distributed for Kubernetes exposes ports at operator, instance manager and operand
+levels, as listed in the table below:
| System | Port number | Exposing | Name | Certificates | Authentication |
| :--------------- | :---------- | :------------------ | :--------------- | :----------- | :------------- |
@@ -221,29 +203,29 @@ levels, as shown in the table.
### PGD
-The current implementation of EDB Postgres Distributed for Kubernetes creates
-passwords for the postgres superuser and the database owner.
+The current implementation of EDB Postgres Distributed for Kubernetes automatically creates
+passwords for the `postgres` superuser and the database owner.
-As far as encryption of passwords is concerned, EDB Postgres Distributed for Kubernetes follows
-the default behavior of PostgreSQL: starting with PostgreSQL 14,
-`password_encryption` is by default set to `scram-sha-256`. On earlier
-versions, it's set to `md5`.
+As far as encryption of password is concerned, EDB Postgres Distributed for Kubernetes follows
+the default behavior of PostgreSQL: starting from PostgreSQL 14,
+`password_encryption` is by default set to `scram-sha-256`, while on earlier
+versions it is set to `md5`.
!!! Important
- See [Connection DSNs and SSL](/pgd/latest/nodes/#connection-dsns-and-ssl-tls)
- in the PGD documentation for details.
+ Please refer to the ["Connection DSNs and SSL"](https://www.enterprisedb.com/docs/pgd/latest/nodes/#connection-dsns-and-ssl-tls)
+ section in the PGD documentation for details.
-You can disable management of the postgres user password using secrets by setting
+You can disable management of the `postgres` user password via secrets by setting
`enableSuperuserAccess` to `false` in the `cnp` section of the spec.
!!! Note
The operator supports toggling the `enableSuperuserAccess` option. When you
- disable it on a running cluster, the operator ignores the content of the secret.
- Remove it (if previously generated by the operator) and set the password of the
- postgres user to `NULL`, in effect disabling remote access through password authentication.
+ disable it on a running cluster, the operator will ignore the content of the secret,
+ remove it (if previously generated by the operator) and set the password of the
+ `postgres` user to `NULL` (de facto disabling remote access through password authentication).
### Storage
EDB Postgres Distributed for Kubernetes delegates encryption at rest to the underlying storage class. For
data protection in production environments, we highly recommend that you choose
-a storage class that supports encryption at rest.
\ No newline at end of file
+a storage class that supports encryption at rest.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
index 47d71bcf954..3f652fe5436 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
@@ -1,17 +1,17 @@
---
-title: 'Client TLS/SSL connections'
+title: 'Client TLS/SSL Connections'
originalFilePath: 'src/ssl_connections.md'
---
!!! Seealso "Certificates"
- See [Certificates](certificates.md)
- for more details on how EDB Postgres Distributed for Kubernetes supports TLS certificates.
+ Please refer to the ["Certificates"](certificates.md)
+ page for more details on how EDB Postgres Distributed for Kubernetes supports TLS certificates.
-The EDB Postgres Distributed for Kubernetes operator was designed to work with TLS/SSL for both encryption in transit and
-authentication on server and client sides. PGD nodes are created as cluster
-resources using the EDB Postgres for Kubernetes operator. This
-includes deploying a certification
-authority (CA) to create and sign TLS client certificates.
+The EDB Postgres Distributed for Kubernetes operator has been designed to work with TLS/SSL for both encryption in transit and
+authentication, on server and client sides. PGD nodes are created as Cluster
+resources using the EDB Postgres for Kubernetes (PG4K) operator, and this
+includes the deployment of a Certification
+Authority (CA) to create and sign TLS client certificates.
-See the [EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/ssl_connections/)
-for more information on issuers and certificates.
\ No newline at end of file
+Please refer to the [EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/ssl_connections/)
+for further information on issuers and certificates.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/supported_versions.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/supported_versions.mdx
new file mode 100644
index 00000000000..4e18e4c8e42
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/supported_versions.mdx
@@ -0,0 +1,22 @@
+---
+title: 'Supported versions'
+originalFilePath: 'src/supported_versions.md'
+---
+
+*This page lists the status for currently supported
+releases of EDB Postgres Distributed for Kubernetes*.
+
+## Support status of EDB Postgres for Kubernetes releases
+
+| Version | Currently Supported | Release Date | End of Life | Supported Kubernetes Versions | Supported OpenShift Versions | Supported Postgres versions |
+| ------- | ------------------- | -------------- | ----------- | ----------------------------- | ---------------------------- | --------------------------- |
+| 1.0 | Yes | April 24, 2024 | - | 1.26 -> 1.29 | 4.12 -> 4.14 | 12 -> 16 |
+
+The Postgres (operand) versions are limited to those supported by
+[EDB Postgres Distributed (PGD).](https://www.enterprisedb.com/docs/pgd/latest/)
+
+!!! Important
+ Please be aware that this page is informative only.
+ The ["Platform Compatibility"](https://www.enterprisedb.com/product-compatibility#cnp) page
+ from the EDB website contains the official list of supported software and
+ Kubernetes distributions.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/tde.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/tde.mdx
new file mode 100644
index 00000000000..5c99a8aa65a
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/tde.mdx
@@ -0,0 +1,119 @@
+---
+title: 'Transparent Data Encryption (TDE)'
+originalFilePath: 'src/tde.md'
+---
+
+!!! Important
+ TDE is available *only* for operands that support it:
+ EPAS versions 15 and newer, Postgres Extended versions 15 and newer.
+
+Transparent Data Encryption, or TDE, is a technology used by several database
+vendors to **encrypt data at rest**, i.e. database files on disk.
+TDE does not however encrypt data in use.
+
+TDE is included in EDB Postgres Advanced Server (EPAS) or EDB Postgres
+Extended, starting with version 15, and it is supported by EDB Postgres
+Distributed for Kubernetes.
+
+!!! Important
+ Before you proceed, please take some time to familiarize with the
+ [TDE feature in the EPAS documentation](https://www.enterprisedb.com/docs/tde/latest/).
+
+With TDE activated, both WAL files and files for tables will be encrypted.
+Data encryption/decryption is entirely transparent to the user, as it is
+managed by the database without requiring any application changes or updated
+client drivers.
+
+The support for TDE on EDB Postgres Distributed for Kubernetes relies on the
+implementation from EDB Postgres for Kubernetes (PG4K). Please refer to
+[the PG4K documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/tde/)
+for the full context.
+
+We show now how to use TDE with a passphrase stored in a Kubernetes Secret,
+which will be used to encrypt the EPAS binary key.
+
+!!! Seealso "EPAS documentation"
+ Please refer to [the EPAS documentation](https://www.enterprisedb.com/docs/tde/latest/key_stores/)
+ for details on the EPAS encryption key.
+
+TDE on EDB Postgres Distributed for Kubernetes relies on the PG4K
+implementation.
+To activate TDE on a cluster, we use the `epas` section of the manifest,
+which is within the `cnp` section used for PG4K-level directives such as
+storage.
+Use the `tde` stanza to enable TDE, and set the name of the Kubernetes secret
+holding the TDE encryption key.
+
+The following YAML portion contains both a secret holding a passphrase
+(base-64 encoded), and the `epas` section activating TDE with the passphrase.
+
+```yaml
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: tde-key
+data:
+ key: bG9zcG9sbGl0b3NkaWNlbnBpb3Bpb3Bpb2N1YW5kb3RpZW5lbmhhbWJyZWN1YW5kb3RpZW5lbmZyaW8=
+
+---
+apiVersion: pgd.k8s.enterprisedb.io/v1beta1
+kind: PGDGroup
+[…]
+spec:
+ instances: 3
+[…]
+ cnp:
+ postgresql:
+ epas:
+ tde:
+ enabled: true
+ secretKeyRef:
+ name: tde-key
+ key: key
+ storage:
+ size: 1Gi
+```
+
+Again, please refer to [the PG4K documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/tde/)
+for additional depth, including how to create the encryption secret and
+additional ways of using TDE.
+
+As shown in the [TDE feature documentation](https://www.enterprisedb.com/docs/tde/latest/),
+the information will be encrypted at rest.
+
+For example, open a `psql` terminal into one of your data nodes.
+
+```sh
+kubectl exec -ti -- psql app
+```
+
+and create a new table including a text column.
+
+```sql
+create table foo(bar int, baz varchar);
+insert into foo(bar, baz) values (1, 'hello'), (2, 'goodbye');
+```
+
+And then verify the location where the newly defined table is stored on disk:
+
+```sql
+select pg_relation_filepath('foo');
+ pg_relation_filepath
+----------------------
+ base/16385/16387
+```
+
+You can open a terminal on the same data node:
+
+```sh
+kubectl exec -ti -- bash
+```
+
+and verify the file has been encrypted.
+
+```sh
+cd $PGDATA/base/16385
+hexdump -C 16387 | grep hello
+hexdump -C 16387 | grep goodbye
+```
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
index 831bcd9cf1d..538d13427b7 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
@@ -3,18 +3,19 @@ title: 'Use cases'
originalFilePath: 'src/use_cases.md'
---
-EDB Postgres Distributed for Kubernetes was designed to work with applications
-that reside in the same Kubernetes cluster for a full cloud native
+EDB Postgres Distributed for Kubernetes has been designed to work with applications
+that reside in the same Kubernetes cluster, for a full cloud native
experience.
However, it might happen that, while the database can be hosted
-inside a Kubernetes cluster, applications can't be containerized
-at the same time and need to run in a traditional environment such
+inside a Kubernetes cluster, applications cannot be containerized
+at the same time and need to run in a *traditional environment* such
as a VM.
-The following is a summary of the basic considerations. See the
-[EDB Postgres for Kubernetes documentation](/postgres_for_kubernetes/latest/use_cases/)
-for more detail.
+We reproduce here a summary of the basic considerations, and refer
+you to the
+[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/use_cases/)
+for further depth.
## Case 1: Applications inside Kubernetes
@@ -23,21 +24,21 @@ namespace inside a Kubernetes cluster.
![Application and Database inside Kubernetes](./images/apps-in-k8s.png)
-The application, normally stateless, is managed as a standard deployment,
-with multiple replicas spread over different Kubernetes nodes and internally
-exposed through a ClusterIP service.
+The application, normally stateless, is managed as a standard `Deployment`,
+with multiple replicas spread over different Kubernetes node, and internally
+exposed through a `ClusterIP` service.
-The service is exposed externally to the end user through an Ingress and the
-provider's load balancer facility by way of HTTPS.
+The service is exposed externally to the end user through an `Ingress` and the
+provider's load balancer facility, via HTTPS.
## Case 2: Applications outside Kubernetes
-Another possible use case is to manage your PGD database inside
-Kubernetes while having your applications outside of it, for example, in a
-virtualized environment. In this case, PGD is represented by an IP
-address or host name and a TCP port, corresponding to the defined Ingress
+Another possible use case is to manage your Postgres Distributed database inside
+Kubernetes, while having your applications outside of it (for example in a
+virtualized environment). In this case, Postgres Distributed is represented by an IP
+address (or host name) and a TCP port, corresponding to the defined Ingress
resource in Kubernetes.
-The application can still benefit from a TLS connection to PGD.
+The application can still benefit from a TLS connection to Postgres Distributed.
-![Application outside Kubernetes](./images/apps-outside-k8s.png)
\ No newline at end of file
+![Application outside Kubernetes](./images/apps-outside-k8s.png)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
index 4c971e5752a..7ec726a1e7f 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
@@ -1,34 +1,34 @@
---
-title: 'Managing EDB Postgres Distributed (PGD) databases'
+title: 'Managing EDB Postgres Distributed databases'
originalFilePath: 'src/using_pgd.md'
---
As described in the [architecture document](architecture.md),
EDB Postgres Distributed for Kubernetes is an operator created to deploy
-PGD databases.
+Postgres Distributed (PGD) databases.
It provides an alternative over deployment with TPA, and by leveraging the
Kubernetes ecosystem, it can offer self-healing and declarative control.
-The operator is also responsible of the backup and restore operations.
-See [Backup](backup.md).
+The operator is also responsible of the backup and restore operations
+(see the [backup](backup.md) document.)
-However, many of the operations and control of PGD clusters aren't
+However, many of the operations and control of PGD clusters are not
managed by the operator.
-The pods created by EDB Postgres Distributed for Kubernetes come with the
-[PGD CLI](https://www.enterprisedb.com/docs/pgd/latest/cli/) installed. You can use
-this tool, for example, to execute a switchover.
+The pods created by EDB Postgres Distributed for Kubernetes come with
+[PGD CLI](https://www.enterprisedb.com/docs/pgd/latest/cli/) installed, and
+this is the tool that can be used, for example, to execute a switchover.
## PGD CLI
!!! Warning
- Don't use the PGD CLI to create and delete resources. For example,
- avoid the `create-proxy` and `delete-proxy` commands.
+ The PGD CLI should not be used to create/delete resources. For example,
+ the `create-proxy`, `delete-proxy` commands should be avoided.
Provisioning of resources is under the control of the operator, and manual
- creation and deletion isn't supported.
+ creation/deletion is not supported.
-As an example, execute a switchover command.
+As an example, let's execute a switchover command.
-We recommend that you use the PGD CLI from proxy pods. To find them,
-get a pod listing for your cluster:
+It is recommendable to use the PGD CLI from proxy pods. Let's find them.
+You can get a pod listing for your cluster:
```shell
kubectl get pods -n my-namespace
@@ -41,14 +41,14 @@ location-a-proxy-0 1/1 Running 0 2h
location-a-proxy-1 1/1 Running 0 2h
```
-The proxy nodes have `proxy` in the name. Choose one, and get a command
+The proxy nodes have `proxy` in the name. Let's choose one and get a command
prompt in it:
```shell
kubectl exec -n my-namespace -ti location-a-proxy-0 -- bash
```
-You now have a bash session open with the proxy pod. The `pgd` command
+You should now have a bash session open with the proxy pod. The `pgd` command
is available:
```shell
@@ -91,37 +91,37 @@ location-a-3 1403922770 location-a data ACTIVE ACTIVE Up 3
## Accessing the database
-In [Use cases](use_cases.md) is a discussion on using the
-database within the Kubernetes cluster versus from outside. In
-[Connectivity](connectivity.md), you can find a discussion on services,
+In the [use cases document](use_cases.md) you can find a discussion on using the
+database within the Kubernetes cluster vs. from outside, and in the
+[connectivity document](connectivity.md), you can find a discussion on services,
which is relevant for accessing the database from applications.
-However you implement your system, your applications must use the proxy
-service to connect to reap the benefits of PGD and
+However you implement your system, your applications should use the proxy
+service to connect, in order to reap the benefits of Postgres Distributed, and
of the increased self-healing capabilities added by the EDB Postgres Distributed
for Kubernetes operator.
!!! Important
- As per the EDB Postgres for Kubernetes defaults, data nodes are
- created with a database called `app` and owned by a user named `app`, in
- contrast to the `bdrdb` database described in the EDB Postgres
- Distributed documentation. You can configure these values
- in the `cnp` section of the manifest.
- For reference, see [Bootstrap](/postgres_for_kubernetes/latest/bootstrap/) in the EDB Postgres for Kubernetes
- documentation.
+ Note that, as per the EDB Postgres for Kubernetes defaults, data nodes are
+ created with a database called `app`, owned by a user named `app`, in
+ contrast to the `bdrdb` database you'll find in the EDB Postgres
+ Distributed documentation. This
+ is configurable by the user, in the `cnp` section of the manifest.
+ See the [EDB Postgres for Kubernetes bootstrapping document](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/bootstrap/)
+ for reference.
-You might, however, want access to your PGD data nodes for administrative tasks,
-using the psql CLI.
+You may, however, want access to your PGD data nodes for administrative tasks,
+using the `psql` CLI.
-You can get a pod listing
-for your PGD cluster and `kubectl exec` into a data node:
+As we did in the previous section on using the PGD CLI, we can get a pod listing
+for our PGD cluster, and `kubectl exec` into a data node:
```shell
kubectl exec -n my-namespace -ti location-a-1-1 -- psql
```
-In the familiar territory of psql, remember that the default
-created database is named `app` (see previous warning).
+In the familiar territory of `psql`, you should remember that the default
+created database is named `app` (see warning above).
```terminal
postgres=# \c app
@@ -139,10 +139,10 @@ peer_target_state_name | ACTIVE
<- snipped ->
```
-For your applications, use the non-privileged role (`app`
+For your applications, of course, you should use the non-privileged role (`app`
by default).
-You need the user credentials, which are stored in a Kubernetes secret:
+You will need the user credentials, which are stored in a Kubernetes secret:
```shell
kubectl get secrets
@@ -152,7 +152,7 @@ NAME TYPE DATA AGE
location-a-app kubernetes.io/basic-auth 2 2h
```
-This secret contains the username and password needed for the Postgres DSN,
+This secret contains the username and password needed for the postgres DSN,
encoded in base64:
```shell
@@ -168,4 +168,4 @@ metadata:
labels:
<- snipped ->
-```
\ No newline at end of file
+```
From 3c227ec44aa6552741073ca4219bee22c7b7c395 Mon Sep 17 00:00:00 2001
From: Betsy Gitelman <93718720+ebgitelman@users.noreply.github.com>
Date: Tue, 12 Sep 2023 18:30:44 -0400
Subject: [PATCH 08/39] Second set of edits on pgd for kubernetes
Next section of PGD for Kubernetes edit
more edits to pgd for kubernetes doc
First read
Final first read edits to PG4K-PGD
Second read of EDB PGD for Kubernetes - first batch
rest of edits to EDB PGD for Kubernetes
---
.../1/architecture.mdx | 95 ++++----
.../1/backup.mdx | 60 ++---
.../1/before_you_start.mdx | 21 +-
.../1/certificates.mdx | 4 +-
.../1/connectivity.mdx | 184 ++++++++--------
.../1/index.mdx | 54 ++---
.../1/installation_upgrade.mdx | 37 ++--
.../1/openshift.mdx | 206 +++++++++---------
.../1/private_registries.mdx | 61 +++---
.../1/quickstart.mdx | 65 +++---
.../1/recovery.mdx | 77 ++++---
.../1/samples.mdx | 12 +-
.../1/security.mdx | 181 ++++++++-------
.../1/ssl_connections.mdx | 20 +-
.../1/use_cases.mdx | 35 ++-
.../1/using_pgd.mdx | 74 +++----
16 files changed, 583 insertions(+), 603 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
index e8ac4330e00..5aababc156c 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
@@ -3,10 +3,10 @@ title: 'Architecture'
originalFilePath: 'src/architecture.md'
---
-This section covers the main architectural aspects you need to consider
-when deploying EDB Postgres Distributed in Kubernetes (PG4K-PGD).
+Consider these main architectural aspects
+when deploying EDB Postgres Distributed in Kubernetes.
-PG4K-PGD is a
+EDB Postgres Distributed for Kubernetes is a
[Kubernetes operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
designed to deploy and manage EDB Postgres Distributed clusters
running in private, public, hybrid, or multi-cloud environments.
@@ -17,21 +17,23 @@ running in private, public, hybrid, or multi-cloud environments.
is a multi-master implementation of Postgres designed for high performance and
availability.
PGD generally requires deployment using
-[*Trusted Postgres Architect*, (TPA)](https://www.enterprisedb.com/docs/pgd/latest/tpa/),
-a tool that uses [Ansible](https://www.ansible.com) for provisioning and
-deployment of PGD clusters.
+[Trusted Postgres Architect (TPA)](/pgd/latest/tpa/),
+a tool that uses [Ansible](https://www.ansible.com) to provision and
+deploy PGD clusters.
-PG4K-PGD offers a different way of deploying PGD clusters, leveraging containers
-and Kubernetes, with the added advantages that the resulting architecture is
-self-healing and robust, managed through declarative configuration, and that it
-takes advantage of the vast and growing Kubernetes ecosystem.
+EDB Postgres Distributed for Kubernetes offers a different way of deploying PGD clusters, leveraging containers
+and Kubernetes. The advantages are that the resulting architecture:
+
+- Is self-healing and robust.
+- Is managed through declarative configuration.
+- Takes advantage of the vast and growing Kubernetes ecosystem.
## Relationship with EDB Postgres for Kubernetes
-A PGD cluster consists of one or more *PGD Groups*, each having one or more *PGD
-Nodes*. A PGD node is a Postgres database. PG4K-PGD internally
+A PGD cluster consists of one or more *PGD groups*, each having one or more *PGD
+nodes*. A PGD node is a Postgres database. EDB Postgres Distributed for Kubernetes internally
manages each PGD node using the `Cluster` resource as defined by EDB Postgres
-for Kubernetes (PG4K), specifically a `Cluster` with a single instance (i.e. no
+for Kubernetes, specifically a cluster with a single instance (that is, no
replicas).
The single PostgreSQL instance created by each `Cluster` can be configured
@@ -39,15 +41,15 @@ declaratively via the
[`.spec.cnp` section](pg4k-pgd.v1beta1.md#pgd-k8s-enterprisedb-io-v1beta1-CnpConfiguration)
of the PGD Group spec.
-In PG4K-PGD, as in PG4K, the underlying database implementation is responsible
-for data replication. However, it is important to note that *failover* and
-*switchover* work differently, entailing Raft election and the nomination of new
-write leaders. PG4K only handles the deployment and healing of data nodes.
+In EDB Postgres Distributed for Kubernetes, as in EDB Postgres for Kubernetes, the underlying database implementation is responsible
+for data replication. However, it's important to note that failover and
+switchover work differently, entailing Raft election and nominating new
+write leaders. EDB Postgres for Kubernetes handles only the deployment and healing of data nodes.
-## Managing PGD using PG4K-PGD
+## Managing PGD using EDB Postgres Distributed for Kubernetes
-The PG4K-PGD operator can manage the complete lifecycle of PGD clusters. As
-such, in addition to PGD Nodes (represented as single-instance `Clusters`), it
+The EDB Postgres Distributed for Kubernetes operator can manage the complete lifecycle of PGD clusters. As
+such, in addition to PGD nodes (represented as single-instance `Clusters`), it
needs to manage other objects associated with PGD.
PGD relies on the Raft algorithm for distributed consensus to manage node
@@ -57,10 +59,10 @@ or performing distributed DDL.
These considerations force additional actors in PGD above database nodes.
-PG4K-PGD manages the following:
+EDB Postgres Distributed for Kubernetes manages the following:
-- Data nodes: as mentioned previously, a node is a database, and is managed
- via PG4K, creating a `Cluster` with a single instance.
+- Data nodes. A node is a database and is managed
+ by EDB Postgres for Kubernetes, creating a `Cluster` with a single instance.
- [Witness nodes](https://www.enterprisedb.com/docs/pgd/latest/nodes/#witness-nodes)
are basic database instances that do not participate in data
replication; their function is to guarantee that consensus is possible in
@@ -73,9 +75,9 @@ PG4K-PGD manages the following:
### Proxies and routing
PGD groups assume full mesh connectivity of PGD nodes. Each node must be able to
-connect to every other node, using the appropriate connection string (a
-`libpq`-style DSN). Write operations don't need to be sent to every node. PGD
-will take care of replicating data after it's committed to one node.
+connect to every other node using the appropriate connection string (a
+`libpq`-style DSN). Write operations don't need to be sent to every node. PGD
+takes care of replicating data after it's committed to one node.
For performance, it is often recommendable to send write operations mostly to a
single node, the *write leader*. Raft is used to identify which node is the
@@ -83,9 +85,9 @@ write leader, and to hold metadata about the PGD nodes. PGD Proxies are used to
transparently route writes to write leaders, and to quickly pivot to the new
write leader in case of switchover or failover.
-It is possible to configure *Raft subgroups*, each of which can maintain a
-separate write leader. In PG4K-PGD, a PGD Group containing a PGD Proxy
-automatically comprises a Raft subgroup.
+It's possible to configure *Raft subgroups*, each of which can maintain a
+separate write leader. In EDB Postgres Distributed for Kubernetes, a PGD group containing a PGD proxy
+comprises a Raft subgroup.
There are two kinds of routing available with PGD Proxies:
@@ -94,7 +96,7 @@ There are two kinds of routing available with PGD Proxies:
- Local routing uses subgroups to maintain separate write leaders. Local
routing is often used to achieve geographical separation of writes.
-In PG4K-PGD, local routing is used by default, and a configuration option is
+In EDB Postgres Distributed for Kubernetes, local routing is used by default, and a configuration option is
available to select global routing.
You can find more information in the
@@ -107,13 +109,12 @@ distributed multi-master capabilities and to offer high availability.
The Always On architectures are built from either one group in a single location
or two groups in two separate locations.
-Please refer to the
-[PGD architecture document](https://www.enterprisedb.com/docs/pgd/latest/architectures/)
-for further information.
+See [Choosing your architecture](/pgd/latest/architectures/) in the PGD documentation
+for more information.
## Deploying PGD on Kubernetes
-PG4K-PGD leverages Kubernetes to deploy and manage PGD clusters. As such, some
+EDB Postgres Distributed for Kubernetes leverages Kubernetes to deploy and manage PGD clusters. As such, some
adaptations are necessary to translate PGD into the Kubernetes ecosystem.
### Images and operands
@@ -146,7 +147,7 @@ for a **Kubernetes cluster** is three (3), in order to make the control plane
resilient to the failure of a single zone. This means that each data center is
active at any time and can run workloads simultaneously.
-PG4K-PGD can be installed within a
+EDB Postgres Distributed for Kubernetes can be installed in a
[single Kubernetes cluster](#single-kubernetes-cluster)
or across
[multiple Kubernetes clusters](#multiple-kubernetes-clusters).
@@ -154,9 +155,9 @@ or across
### Single Kubernetes cluster
A multi-availability-zone Kubernetes architecture is typical of Kubernetes
-services managed by Cloud Providers. Such an architecture enables the PG4K-PGD
-and the PG4K operators to schedule workloads and nodes across availability
-zones, considering all zones active:
+services managed by cloud providers. Such an architecture enables the EDB Postgres Distributed for Kubernetes
+and the EDB Postgres for Kubernetes operators to schedule workloads and nodes across availability
+zones, considering all zones active.
![Kubernetes cluster spanning over 3 independent data centers](./images/k8s-architecture-3-az.png)
@@ -164,21 +165,21 @@ PGD clusters can be deployed in a single Kubernetes cluster and take advantage
of Kubernetes availability zones to enable High Availability architectures,
including the Always On recommended architectures.
-The *Always On Single Location* architecture shown in the
-[PGD Architecture document](https://www.enterprisedb.com/docs/pgd/latest/architectures/):
-![Always On Single Region](./images/always_on_1x3_updated.png)
+You can realize the *Always On Single Location* architecture shown in
+[Choosing your architecture](/pgd/latest/architectures/) in the PGD documentation on
+a single Kubernetes cluster with three availability zones.
-can be realized on single kubernetes cluster with 3 availability zones.
+![Always On Single Region](./images/always_on_1x3_updated.png)
-The PG4K-PGD operator can control the *scheduling* of pods (i.e. which pods go
-to which data center) using affinity, tolerations and node selectors, as is the
-case with PG4K. Individual scheduling controls are available for proxies as well
+The EDB Postgres Distributed for Kubernetes operator can control the scheduling of pods (that is, which pods go
+to which data center) using affinity, tolerations, and node selectors, as is the
+case with EDB Postgres for Kubernetes. Individual scheduling controls are available for proxies as well
as nodes.
Please refer to the
[Kubernetes documentation on scheduling](https://kubernetes.io/docs/concepts/scheduling-eviction/),
-as well as the [PG4K documents](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/scheduling/)
-for further information.
+and [Scheduling](/postgres_for_kubernetes/latest/scheduling/) in the EDB Postgres for Kubernetes documentation
+for more information.
### Multiple Kubernetes clusters
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
index add53edb914..f3c55ce0197 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
@@ -3,29 +3,29 @@ title: 'Backup on object stores'
originalFilePath: 'src/backup.md'
---
-EDB Postgres Distributed for Kubernetes (PG4K-PGD) supports *online/hot backup* of
+EDB Postgres Distributed for Kubernetes supports *online/hot backup* of
PGD clusters through physical backup and WAL archiving on an object store.
This means that the database is always up (no downtime required) and that
-Point In Time Recovery is available.
+point-in-time recovery (PITR) is available.
## Common object stores
-Multiple object store are supported, such as `AWS S3`, `Microsoft Azure Blob Storage`,
-`Google Cloud Storage`, `MinIO Gateway`, or any S3 compatible provider.
-Given that PG4K-PGD configures the connection with object stores by relying on
-EDB Postgres for Kubernetes (PG4K), please refer to the [PG4K Cloud provider support](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#cloud-provider-support)
-documentation for additional depth.
+Multiple object stores are supported, such as AWS S3, Microsoft Azure Blob Storage,
+Google Cloud Storage, MinIO Gateway, or any S3-compatible provider.
+Given that EDB Postgres Distributed for Kubernetes configures the connection with object stores by relying on
+EDB Postgres for Kubernetes, see the [EDB Postgres for Kubernetes cloud provider support](/postgres_for_kubernetes/latest/backup_recovery/#cloud-provider-support)
+documentation for more information.
!!! Important
- In the PG4K documentation you'll find the Cloud Provider configuration section
- available at `spec.backup.barmanObjectStore`. Note that in PG4K-PGD examples, the object store section is found at a
+ The EDB Postgres for Kubernetes documentation's Cloud Provider configuration section is
+ available at `spec.backup.barmanObjectStore`. In EDB Postgres Distributed for Kubernetes examples, the object store section is at a
different path: `spec.backup.configuration.barmanObjectStore`.
## WAL archive
-WAL archiving is the process that sends `WAL files` to the object storage, and it's essential to
-execute *online/hot backups*, or Point in Time recovery (PITR).
-In PG4K-PGD, each PGD Node will be set up to archive WAL files in the object store independently.
+WAL archiving is the process that sends WAL files to the object storage, and it's essential to
+execute online/hot backups or PITR.
+In EDB Postgres Distributed for Kubernetes, each PGD node is set up to archive WAL files in the object store independently.
The WAL archive is defined in the PGDGroup `spec.backup.configuration.barmanObjectStore` stanza,
and is enabled as soon as a destination path and cloud credentials are set.
@@ -47,13 +47,13 @@ spec:
maxParallel: 8
```
-For further information, refer to the [PG4K WAL archiving](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#wal-archiving) documentation.
+For more information, see the [EDB Postgres for Kubernetes WAL archiving](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#wal-archiving) documentation.
## Scheduled backups
-Scheduled backups are the recommended way to configure your backup strategy in PG4K-PGD.
-When the PGDGroup `spec.backup.configuration.barmanObjectStore` stanza is configured, the operator will select one of the
-PGD data nodes as the elected "Backup Node", for which it will automatically create a `Scheduled Backup` resource.
+Scheduled backups are the recommended way to configure your backup strategy in EDB Postgres Distributed for Kubernetes.
+When the PGD group `spec.backup.configuration.barmanObjectStore` stanza is configured, the operator selects one of the
+PGD data nodes as the elected backup node for which it creates a `Scheduled Backup` resource.
The `.spec.backup.cron.schedule` field allows you to define a cron schedule specification, expressed
in the [https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format]\(Go `cron` package format).
@@ -85,8 +85,8 @@ in the created backup resources. The choices are:
- *cluster:* sets the cluster as owner of the backup
!!! Note
- The `PG4K` ScheduledBackup object contains an additional option named `cluster` to specify the
- Cluster to be backed up. This option is currently not supported by `PG4K-PGD`, and will be
+ The EDB Postgres for Kubernetes `ScheduledBackup` object contains the `cluster` option to specify the
+ cluster to back up. This option is currently not supported by EDB Postgres Distributed for Kubernetes and is
ignored if specified.
In case an elected "Backup node" is deleted, the operator will transparently elect a new "Backup Node"
@@ -94,12 +94,12 @@ and reconcile the Scheduled Backup resource accordingly.
## Retention policies
-PG4K-PGD can manage the automated deletion of backup files from the backup
-object store, using **retention policies** based on the recovery window.
-This process will also take care of removing unused WAL files and WALs associated with backups
+EDB Postgres Distributed for Kubernetes can manage the automated deletion of backup files from the backup
+object store using retention policies based on the recovery window.
+This process also takes care of removing unused WAL files and WALs associated with backups
that are scheduled for deletion.
-You can define your backups with a retention policy of 30 days as follows:
+You can define your backups with a retention policy of 30 days:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -111,7 +111,7 @@ spec:
retentionPolicy: "30d"
```
-For further information, refer to the [PG4K Retention policies](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#retention-policies) documentation.
+For more information, see the [EDB Postgres for Kubernetes retention policies](/postgres_for_kubernetes/latest/backup_recovery/#retention-policies) in the EDB Postgres for Kubernetes documentation.
!!! Important
Currently, the retention policy will only be applied for the elected "Backup Node"
@@ -125,20 +125,20 @@ For further information, refer to the [PG4K Retention policies](https://www.ente
## Compression algorithms
Backups and WAL files are uncompressed by default. However, multiple compression algorithms are
-supported. For more information, refer to the [PG4K Compression algorithms](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#compression-algorithms) documentation.
+supported. For more information, see the [EDB Postgres for Kubernetes compression algorithms](/postgres_for_kubernetes/latest/backup_recovery/#compression-algorithms) documentation.
## Tagging of backup objects
-It's possible to specify tags as key-value pairs for the backup objects, namely base backups, WAL files and history files.
-For more information, refer to the [PG4K document on Tagging of backup objects](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#tagging-of-backup-objects).
+It's possible to specify tags as key-value pairs for the backup objects, namely base backups, WAL files, and history files.
+For more information, see the EDB Postgres for Kubernetes documentation about [tagging of backup objects](/postgres_for_kubernetes/latest/backup_recovery/#tagging-of-backup-objects).
## On-demand backups of a PGD Node
-A PGD Node is represented as single-instance PG4K `Cluster` object.
+A PGD node is represented as single-instance EDB Postgres for Kubernetes `Cluster` object.
As such, in case of need, it's possible to request an on-demand backup
-of a specific PGD Node by creating a PG4K `Backup` resource.
-In order to do that, you can directly refer to the [PG4K On-demand backups](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#on-demand-backups) documentation.
+of a specific PGD node by creating a EDB Postgres for Kubernetes `Backup` resource.
+To do that, see [EDB Postgres for Kubernetes on-demand backups](/postgres_for_kubernetes/latest/backup_recovery/#on-demand-backups) in the EDB Postgres for Kubernetes documentation.
!!! Hint
- You can retrieve the list of PG4K Clusters that make up your PGDGroup
+ You can retrieve the list of EDB Postgres for Kubernetes clusters that make up your PGD group
by running: `kubectl get cluster -l k8s.pgd.enterprisedb.io/group=my-pgd-group -n my-namespace`
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
index 92feac82401..4f5dc2192df 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
@@ -19,9 +19,8 @@ specific to Kubernetes and PGD.
[Service](https://kubernetes.io/docs/concepts/services-networking/service/)
: A *service* is an abstraction that exposes as a network service an
- application that runs on a group of pods and standardizes important features
- such as service discovery across applications, load balancing, failover, and so
- on.
+ application that runs on a group of pods and standardizes important features,
+ such as service discovery across applications, load balancing, and failover.
[Secret](https://kubernetes.io/docs/concepts/configuration/secret/)
: A *secret* is an object that is designed to store small amounts of sensitive
@@ -34,7 +33,7 @@ specific to Kubernetes and PGD.
[Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
: A *persistent volume* (PV) is a resource in a Kubernetes cluster that
- represents storage that has been either manually provisioned by an
+ represents storage that was either manually provisioned by an
administrator or dynamically provisioned by a *storage class* controller. A PV
is associated with a pod using a *persistent volume claim* and its lifecycle is
independent of any pod that uses it. Normally, a PV is a network volume,
@@ -75,13 +74,13 @@ specific to Kubernetes and PGD.
[`kubectl`](https://kubernetes.io/docs/reference/kubectl/overview/)
: `kubectl` is the command-line tool used to manage a Kubernetes cluster.
-EDB Postgres Distributed for Kubernetes requires a Kubernetes version supported by the community. Please refer to the
-["Supported releases"](https://www.enterprisedb.com/resources/platform-compatibility#pgdk8s) page for details.
+EDB Postgres Distributed for Kubernetes requires a Kubernetes version supported by the community. See
+[Supported releases](https://www.enterprisedb.com/resources/platform-compatibility#pgdk8s) for details.
## PGD terminology
-Please refer to the
-[PGD terminology page for further information](https://www.enterprisedb.com/docs/pgd/latest/terminology/).
+For more information, see
+[Terminology](https://www.enterprisedb.com/docs/pgd/latest/terminology/) in the PGD documentation.
[Node](https://www.enterprisedb.com/docs/pgd/latest/terminology/#node)
: A PGD database instance.
@@ -103,12 +102,12 @@ Region
round-trip network latency.
Zone
-: An *availability zone* in the Cloud (also known as *zone*) is an area in a
+: An *availability zone* in the cloud (also known as a *zone*) is an area in a
region where resources can be deployed. Usually, an availability zone
corresponds to a data center or an isolated building of the same data center.
## What to do next
-Now that you have familiarized with the terminology, you can decide to
-[test EDB Postgres Distributed for Kubernetes (PG4K-PGD) on your laptop using a local cluster](quickstart.md) before
+Now that you have familiarized with the terminology, you can
+[test EDB Postgres Distributed for Kubernetes on your laptop using a local cluster](quickstart.md) before
deploying the operator in your selected cloud environment.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
index 559a9399ee2..ed8486ffbe8 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
@@ -3,8 +3,8 @@ title: 'Certificates'
originalFilePath: 'src/certificates.md'
---
-EDB Postgres Distributed for Kubernetes has been designed to natively support TLS certificates.
-In order to set up a PGD cluster, each PGD node require:
+EDB Postgres Distributed for Kubernetes was designed to natively support TLS certificates.
+To set up an PGD cluster, each PGD node requires:
- a server Certification Authority (CA) certificate
- a server TLS certificate signed by the server Certification Authority
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
index bc2b8492c54..0c112a9338e 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
@@ -3,96 +3,94 @@ title: 'Connectivity'
originalFilePath: 'src/connectivity.md'
---
-This section provides information about secure network communications within a
-PGD Cluster, covering the following topics:
+Information about secure network communications in a
+PGD cluster includes:
-- [services](#services)
-- [domain names resolution](#domain-names-resolution) using fully qualified domain names (FQDN)
+- [Services](#services)
+- [Domain names resolution](#domain-names-resolution) using fully qualified domain names (FQDN)
- [TLS configuration](#tls-configuration)
-\!!! Notice
- Although the above topics might seem unrelated to each other, they all
+!!! Notice
+ Although these topics might seem unrelated to each other, they all
participate in the configuration of the PGD resources to make them universally
identifiable and accessible over a secure network.
## Services
-Resources in a PGD Cluster are accessible through Kubernetes services.
-Every PGDGroup manages several of them, namely:
+Resources in a PGD cluster are accessible through Kubernetes services.
+Every PGD group manages several of them, namely:
-- one service per node, used for internal communications (*node service*)
-- a *group service*, to reach any node in the group, used primarily by PG4K-PGD
+- One service per node, used for internal communications (*node service*)
+- A *group service* to reach any node in the group, used primarily by EDB Postgres Distributed for Kubernetes
to discover a new group in the cluster
-- a *proxy service*, to enable applications to reach the write leader of the
- group, transparently using PGD proxy
+- A *proxy service* to enable applications to reach the write leader of the
+ group transparently using PGD Proxy
-For an example using these services, see [Connecting an application to a PGD cluster](#connecting-to-a-pgd-cluster-from-an-application).
+For an example that uses these services, see [Connecting an application to a PGD cluster](#connecting-to-a-pgd-cluster-from-an-application).
![Basic architecture of an EDB Postgres Distributed for Kubernetes PGD group](./images/pg4k-pgd-basic-architecture.png)
Each service is generated from a customizable template in the `.spec.connectivity`
section of the manifest.
-All services must be reachable using their fully qualified domain name (FQDN)
-from all the PGD nodes in all the Kubernetes clusters (see below in this
-section).
+All services must be reachable using their FQDN
+from all the PGD nodes in all the Kubernetes clusters. See [Domain names resolution](#domain-names-resolutions).
-PG4K-PGD provides a service templating framework that gives you the
-availability to easily customize services at the following 3 levels:
+EDB Postgres Distributed for Kubernetes provides a service templating framework that gives you the
+availability to easily customize services at the following three levels:
Node Service Template
-: Each PGD node is reachable using a service which can be configured in the
+: Each PGD node is reachable using a service that can be configured in the
`.spec.connectivity.nodeServiceTemplate` section.
Group Service Template
-: Each PGD group has a group service that is a single entry point for the
+: Each PGD group has a group service that's a single entry point for the
whole group and that can be configured in the
`.spec.connectivity.groupServiceTemplate` section.
Proxy Service Template
: Each PGD group has a proxy service to reach the group write leader through
- the PGD proxy, and can be configured in the `.spec.connectivity.proxyServiceTemplate`
+ the PGD proxy and can be configured in the `.spec.connectivity.proxyServiceTemplate`
section. This is the entry-point service for the applications.
-You can use templates to create a LoadBalancer service, and/or to add arbitrary
-annotations and labels to a service in order to integrate with other components
-available in the Kubernetes system (i.e. to create external DNS names or tweak
+You can use templates to create a LoadBalancer service or to add arbitrary
+annotations and labels to a service to integrate with other components
+available in the Kubernetes system (that is, to create external DNS names or tweak
the generated load balancer).
## Domain names resolution
-PG4K-PGD ensures that all resources in a PGD Group have a fully qualified
-domain name (FQDN) by adopting a convention that uses the PGD Group name as a prefix
+EDB Postgres Distributed for Kubernetes ensures that all resources in a PGD group have a FQDN by adopting a convention that uses the PGD group name as a prefix
for all of them.
-As a result, it expects that you define the domain name of the PGD Group. This
-can be done through the `.spec.connectivity.dns` section which controls how the
-FQDN for the resources are generated, with two fields:
+As a result, it expects you to define the domain name of the PGD group. This
+can be done through the `.spec.connectivity.dns` section, which controls how the
+FQDN for the resources are generated with two fields:
-- `domain`: domain name to be used by all the objects in the PGD group (mandatory);
-- `hostSuffix`: suffix to be added to each service in the PGD group (optional).
+- `domain` — Domain name for all the objects in the PGD group to use (mandatory).
+- `hostSuffix` — Suffix to add to each service in the PGD group (optional).
-## TLS Configuration
+## TLS configuration
-PG4K-PGD requires that resources in a PGD Cluster communicate over a secure
+EDB Postgres Distributed for Kubernetes requires that resources in a PGD cluster communicate over a secure
connection. It relies on PostgreSQL's native support for [SSL connections](https://www.postgresql.org/docs/current/libpq-ssl.html)
to encrypt client/server communications using TLS protocols for increased
security.
-Currently, PG4K-PGD requires that [cert-manager](https://cert-manager.io/) is installed.
-Cert-manager has been chosen as the tool to provision dynamic certificates,
-given that it is widely recognized as the de facto standard in a Kubernetes
+Currently, EDB Postgres Distributed for Kubernetes requires that [cert-manager](https://cert-manager.io/) is installed.
+Cert-manager was chosen as the tool to provision dynamic certificates
+given that it's widely recognized as the standard in a Kubernetes
environment.
The `spec.connectivity.tls` section describes how the communication between the
-nodes should happen:
+nodes happens:
- `mode` is an enumeration describing how the server certificates are verified
during PGD group nodes communication. It accepts the following values, as
- documented in ["SSL Support"](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS)
- from the PostgreSQL documentation:
+ documented in [SSL Support](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS)
+ in the PostgreSQL documentation:
- `verify-full`
- `verify-ca`
@@ -100,59 +98,59 @@ nodes should happen:
- `serverCert` defines the server certificates used by the PGD group nodes to
accept requests.
- The clients validate this certificate depending on the passed TLS mode;
- refer to the previous point for the accepted values.
+ The clients validate this certificate depending on the passed TLS mode.
+ It accepts the same values as `mode`.
-- `clientCert` defines the `streaming_replica` user certificate that will
- be used by the nodes to authenticate each other.
+- `clientCert` defines the `streaming_replica` user certificate
+ used by the nodes to authenticate each other.
-### Server TLS Configuration
+### Server TLS configuration
-The server certificate configuration is specified in `.spec.connectivity.tls.serverCert.certManager`
-section of the PGDGroup custom resource.
+The server certificate configuration is specified in the `.spec.connectivity.tls.serverCert.certManager`
+section of the `PGDGroup` custom resource.
-The following assumptions have been made for this section to work:
+The following assumptions were made for this section to work:
- An issuer `.spec.connectivity.tls.serverCert.certManager.issuerRef` is available
for the domain `.spec.connectivity.dns.domain` and any other domain used by
- `.spec.connectivity.tls.serverCert.certManager.altDnsNames`
-- There is a secret containing the public certificate of the CA
- used by the issuer `.spec.connectivity.tls.serverCert.caCertSecret`
+ `.spec.connectivity.tls.serverCert.certManager.altDnsNames`.
+- There's a secret containing the public certificate of the CA
+ used by the issuer `.spec.connectivity.tls.serverCert.caCertSecret`.
-The `.spec.connectivity.tls.serverCert.certManager` is used to create a per node
-cert-manager certificate request
-The resulting certificate will be used by the underlying Postgres instance
+The `.spec.connectivity.tls.serverCert.certManager` is used to create a per-node
+cert-manager certificate request.
+The resulting certificate is used by the underlying Postgres instance
to terminate TLS connections.
-The operator will add the following altDnsNames to the certificate:
+The operator adds the following altDnsNames to the certificate:
- `$node$hostSuffix.$domain`
- `$groupName$hostSuffix.$domain`
!!! Important
- It's your responsibility to add in `.spec.connectivity.tls.serverCert.certManager.altDnsNames`
- any name required from the underlying networking architecture
- (e.g., load balancers used by the user to reach the nodes).
+ It's your responsibility to add to `.spec.connectivity.tls.serverCert.certManager.altDnsNames`
+ any name required from the underlying networking architecture,
+ for example, load balancers used by the user to reach the nodes.
-### Client TLS Configuration
+### Client TLS configuration
The operator requires client certificates to be dynamically provisioned
-via cert-manager (recommended approach) or pre-provisioned via secrets.
+using cert-manager (the recommended approach) or pre-provisioned using secrets.
-#### Dynamic provisioning via Cert-manager
+#### Dynamic provisioning via cert-manager
-The client certificates configuration is managed by `.spec.connectivity.tls.clientCert.certManager`
-section of the PGDGroup custom resource.
-The following assumptions have been made for this section to work:
+The client certificates configuration is managed by the `.spec.connectivity.tls.clientCert.certManager`
+section of the `PGDGroup` custom resource.
+The following assumptions were made for this section to work:
- An issuer `.spec.connectivity.tls.clientCert.certManager.issuerRef` is available
- and will sign a certificate with the common name `streaming_replica`
-- There is a secret containing the public certificate of the CA
- used by the issuer `.spec.connectivity.tls.clientCert.caCertSecret`
+ and signs a certificate with the common name `streaming_replica`.
+- There's a secret containing the public certificate of the CA
+ used by the issuer `.spec.connectivity.tls.clientCert.caCertSecret`.
-The operator will use the configuration under `.spec.connectivity.tls.clientCert.certManager`
+The operator uses the configuration under `.spec.connectivity.tls.clientCert.certManager`
to create a certificate request per the `streaming_replica` Postgres user.
-The resulting certificate will be used to secure communication between the nodes.
+The resulting certificate is used to secure communication between the nodes.
#### Pre-provisioned certificates via secrets
@@ -160,65 +158,65 @@ Alternatively, you can specify a secret containing the pre-provisioned
client certificate for the streaming replication user through the
`.spec.connectivity.tls.clientCert.preProvisioned.streamingReplica.secretRef` option.
The certificate lifecycle in this case is managed entirely by a third party,
-either manually or automated, by simply updating the content of the secret.
+either manually or automated, by updating the content of the secret.
## Connecting to a PGD cluster from an application
-Connecting to a PGD Group from an application running inside the same Kubernetes cluster
-or from outside the cluster is a simple procedure. In both cases, you will connect to
-the proxy service of the PGD Group as the `app` user. The proxy service is a LoadBalancer
-service that will route the connection to the write leader of the PGD Group.
+Connecting to a PGD group from an application running inside the same Kubernetes cluster
+or from outside the cluster is a simple procedure. In both cases, you connect to
+the proxy service of the PGD group as the `app` user. The proxy service is a LoadBalancer
+service that routes the connection to the write leader of the PGD group.
### Connecting from inside the cluster
When connecting from inside the cluster, you can use the proxy service name to connect
-to the PGD Group. The proxy service name is composed of the PGD Group name and the (optional)
-host suffix defined in the `.spec.connectivity.dns` section of the PGDGroup custom resource.
+to the PGD group. The proxy service name is composed of the PGD group name and the optional
+host suffix defined in the `.spec.connectivity.dns` section of the `PGDGroup` custom resource.
-For example, if the PGD Group name is `my-group` and the host suffix is `.my-domain.com`,
-the proxy service name will be `my-group.my-domain.com`.
+For example, if the PGD group name is `my-group`, and the host suffix is `.my-domain.com`,
+the proxy service name is `my-group.my-domain.com`.
-Before connecting you will need to get the password for the app user from the app user
-secret. The naming format of the secret is `my-group-app` for a PGD Group named `my-group`.
+Before connecting, you need to get the password for the app user from the app user
+secret. The naming format of the secret is `my-group-app` for a PGD group named `my-group`.
-You can get the username and password from the secret with the following commands:
+You can get the username and password from the secret using the following commands:
```sh
kubectl get secret my-group-app -o jsonpath='{.data.username}' | base64 --decode
kubectl get secret my-group-app -o jsonpath='{.data.password}' | base64 --decode
```
-With this you now have all the pieces for a connection string to the PGD Group:
+With this, you have all the pieces for a connection string to the PGD group:
```text
postgresql://:@:5432/
```
-or for a `psql` invocation:
+Or, for a `psql` invocation:
```sh
psql -U -h
```
-where `app-user` and `app-password` are the values you got from the secret,
+Where `app-user` and `app-password` are the values you got from the secret,
and `database` is the name of the database you want to connect
-to (the default is `app` for the app user.)
+to. (The default is `app` for the app user.)
### Connecting from outside the Kubernetes cluster
When connecting from outside the Kubernetes cluster, in the general case,
-the [*Ingress*](https://kubernetes.io/docs/concepts/services-networking/ingress/) resource or a [*Load Balancer*](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) will be necessary.
-Check your cloud provider or local installation for more information about the
-behavior of them in your environment.
+the [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) resource or a [load balancer](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) is necessary.
+Check your cloud provider or local installation for more information about their
+behavior in your environment.
-Ingresses and Load Balancers require a Pod selector to forward connection to
-the PGD proxies. When configuring them, we suggest to use the following labels:
+Ingresses and load balancers require a pod selector to forward connection to
+the PGD proxies. When configuring them, we suggest using the following labels:
-- `k8s.pgd.enterprisedb.io/group`: set the the PGD group name
-- `k8s.pgd.enterprisedb.io/workloadType`: set to `pgd-proxy`
+- `k8s.pgd.enterprisedb.io/group` — Set the PGD group name.
+- `k8s.pgd.enterprisedb.io/workloadType` — Set to `pgd-proxy`.
If using Kind or other solutions for local development, the easiest way to
-access the PGD Group from outside is to use port forwarding
+access the PGD group from outside is to use port forwarding
to the proxy service. You can use the following command to forward port 5432 on your
local machine to the proxy service:
@@ -226,4 +224,4 @@ local machine to the proxy service:
kubectl port-forward svc/my-group.my-domain.com 5432:5432
```
-where `my-group.my-domain.com` is the proxy service name from the previous example.
+Where `my-group.my-domain.com` is the proxy service name from the previous example.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
index f7af2b1cd49..ac041c7b3b7 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
@@ -33,8 +33,8 @@ directoryDefaults:
---
-**EDB Postgres Distributed for Kubernetes** (`pg4k-pgd`, or PG4K-PGD) is an
-operator designed to manage **EDB Postgres Distributed** v5 workloads on
+EDB Postgres Distributed for Kubernetes (`pg4k-pgd`) is an
+operator designed to manage EDB Postgres Distributed (PGD) workloads on
Kubernetes, with traffic routed by PGD Proxy.
The main custom resource that the operator provides is called `PGDGroup`.
@@ -44,54 +44,54 @@ Architectures can also be deployed across different Kubernetes clusters.
## Before you start
EDB Postgres Distributed for Kubernetes provides you with a way to deploy
-EDB Postgres Distributed in a Kubernetes environment. As a result, it
-is fundamental that you have read the
-["EDB Postgres Distributed" documentation](https://www.enterprisedb.com/docs/pgd/latest/).
+EDB Postgres Distributed in a Kubernetes environment. Therefore, we recommend
+reading the
+[EDB Postgres Distributed documentation](/pgd/latest/).
-The following chapters are very important to start working with EDB Postgres
-Distributed for Kubernetes:
+To start working with EDB Postgres
+Distributed for Kubernetes, read the following in the PGD documentation:
-- [Terminology](https://www.enterprisedb.com/docs/pgd/latest/terminology/)
-- [Overview](https://www.enterprisedb.com/docs/pgd/latest/overview/)
-- [Architectures](https://www.enterprisedb.com/docs/pgd/latest/architectures/)
-- [Choosing a Postgres distribution](https://www.enterprisedb.com/docs/pgd/latest/choosing_server/)
+- [Terminology](/pgd/latest/terminology/)
+- [PGD overview](/pgd/latest/overview/)
+- [Choosing your architecture](/pgd/latest/architectures/)
+- [Choosing a Postgres distribution](/pgd/latest/choosing_server/)
-For advanced usage and maximum customization, it is also important to familiarize with
-["EDB Postgres for Kubernetes" (PG4K) documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/),
-as described in the ["Architecture" section](architecture.md#relationship-with-edb-postgres-for-kubernetes).
+For advanced usage and maximum customization, it's also important to be familiar with the
+[EDB Postgres for Kubernetes documentation](/postgres_for_kubernetes/latest/),
+as described in [Architecture](architecture.md#relationship-with-edb-postgres-for-kubernetes).
## Supported Kubernetes distributions
EDB Postgres Distributed for Kubernetes is available for:
-- Kubernetes version 1.23 or higher through a Helm Chart
-- Red Hat OpenShift version 4.10 or higher through the Red Hat OpenShift
- Certified Operator only
+- Kubernetes version 1.23 or later through a Helm chart
+- Red Hat OpenShift version 4.10 or later only through the Red Hat OpenShift
+ certified operator
## Requirements
EDB Postgres Distributed for Kubernetes requires that the Kubernetes/OpenShift
-clusters hosting the distributed PGD cluster have been prepared by you to cater for:
+clusters hosting the distributed PGD cluster were prepared by you to cater for:
-- the Public Key Infrastructure (PKI) encompassing all the Kubernetes clusters
- the PGD Global Group is spread across, as mTLS is required to authenticate
- and authorize all nodes in the mesh topology and guarantee encrypted communication
+- The public key infrastructure (PKI) encompassing all the Kubernetes clusters
+ the PGD global group is spread across. mTLS is required to authenticate
+ and authorize all nodes in the mesh topology and guarantee encrypted communication.
- Networking infrastructure across all Kubernetes clusters involved in the
- PGD Global Group to ensure that each node can communicate with each other
+ PGD global group to ensure that each node can communicate with each other
-EDB Postgres Distributed for Kubernetes also requires Cert Manager 1.10 or higher.
+EDB Postgres Distributed for Kubernetes also requires Cert Manager 1.10 or later.
!!! Seealso "About connectivity"
- Please refer to the ["Connectivity" section](connectivity.md) for more information.
+ See [Connectivity](connectivity.md) for more information.
## API reference
For a list of resources provided by EDB Postgres Distributed for Kubernetes,
-please refer to the [API reference](pg4k-pgd.v1beta1.md).
+see the [API reference](pg4k-pgd.v1beta1.md).
## Trademarks
-*[Postgres, PostgreSQL and the Slonik Logo](https://www.postgresql.org/about/policies/trademarks/)
+[Postgres, PostgreSQL, and the Slonik logo](https://www.postgresql.org/about/policies/trademarks/)
are trademarks or registered trademarks of the PostgreSQL Community Association
-of Canada, and used with their permission.*
+of Canada, and used with their permission.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
index d50d4b7993b..6b34d067b3c 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
@@ -8,11 +8,11 @@ originalFilePath: 'src/installation_upgrade.md'
EDB Postgres Distributed for Kubernetes can be installed using the provided
[Helm chart](https://github.com/EnterpriseDB/edb-postgres-for-kubernetes-charts).
-If you don't have [Helm](https://helm.sh) installed yet, please follow the
-[official instructions](https://helm.sh/docs/intro/quickstart/) to install it
+If you don't have [Helm](https://helm.sh) installed yet, follow these
+[instructions](https://helm.sh/docs/intro/quickstart/) to install it
in your system.
-Assuming you have Helm installed, the first step is to add the repository:
+After Helm is installed, add the repository:
```console
helm repo add edb \
@@ -20,16 +20,16 @@ helm repo add edb \
```
!!! Important
- You'll need access to the private EDB repository where both the operator
+ You need access to the private EDB repository where both the operator
and operand images are stored. Access requires a valid
[EDB subscription plan](https://www.enterprisedb.com/products/plans-comparison).
- Please refer to ["Accessing EDB private image registries"](private_registries.md) for further details.
+ See [Accessing EDB private image registries](private_registries.md) for details.
Given that the container images for both the operator and the selected operand
are in EDB's private registry, you need your credentials to enable `helm` to
retrieve them.
-Make sure to replace your repo and token in the command below:
+Make sure to replace your repo and token in the following command:
```console
helm upgrade --dependency-update \
@@ -43,14 +43,14 @@ helm upgrade --dependency-update \
In particular:
-- set `@@REPOSITORY@@` to the name of the repository, as explained in ["Which repository to
- choose?"](private_registries.md#which-repository-to-choose)
-- set `@@TOKEN@@` to the repository token for your EDB account, as explained in
- ["How to retrieve the token"](private_registries.md#how-to-retrieve-the-token)
+- Set `@@REPOSITORY@@` to the name of the repository, as explained in [Which repository to
+ choose?](private_registries.md#which-repository-to-choose).
+- Set `@@TOKEN@@` to the repository token for your EDB account, as explained in
+ [How to retrieve the token](private_registries.md#how-to-retrieve-the-token).
-Please remember to create a cert issuer before you start deploying PGD clusters.
-The helm chart will already suggest that you do this, but in case you miss it,
-you may run, for example:
+Be sure to create a cert issuer before you start deploying PGD clusters.
+The Helm chart prompts you to do this, but in case you miss it,
+you can, for example, run:
```sh
kubectl apply -f \
@@ -58,17 +58,16 @@ kubectl apply -f \
```
!!! Info
- For further details on the Helm chart, please refer to the
+ For more details on the Helm chart, see the
[Helm chart repo documentation](https://github.com/EnterpriseDB/edb-postgres-for-kubernetes-charts).
With the operators and a self-signed cert issuer deployed, you can start
-creating PGD clusters. Please refer to the
-["Quickstart"](quickstart.md#part-3-deploy-a-pgd-cluster) for an example.
-
+creating PGD clusters. See the
+[Quick start](quickstart.md#part-3-deploy-a-pgd-cluster) for an example.
## Red Hat OpenShift
-If you are trying to install EDB Postgres Distributed for Kubernetes on Red Hat OpenShift,
-please refer to the ["Red Hat OpenShift section"](openshift.md) which contains
+If you're trying to install EDB Postgres Distributed for Kubernetes on Red Hat OpenShift,
+see [Red Hat OpenShift](openshift.md), which contains
information on the certified operator maintained by EDB.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx
index 25abcb596f9..1e656a723b5 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/openshift.mdx
@@ -4,26 +4,26 @@ originalFilePath: 'src/openshift.md'
---
EDB Postgres Distributed for Kubernetes is a certified operator that can be
-installed on OpenShift via the web interface.
+installed on OpenShift using a web interface.
## Ensuring access to EDB private registry
!!! Important
- You'll need access to the private EDB repository where both the operator
+ You need access to the private EDB repository where both the operator
and operand images are stored. Access requires a valid
[EDB subscription plan](https://www.enterprisedb.com/products/plans-comparison).
- Please refer to ["Accessing EDB private image registries"](private_registries.md) for further details.
+ See [Accessing EDB private image registries](private_registries.md) for details.
-The OpenShift install will use pull secrets in order to access the
+The OpenShift install uses pull secrets to access the
operand and operator images, which are held in a private repository.
-Once you have credentials to the private repo, you will need to create
-two pull secrets in the `openshift-operators` namespace, named:
+Once you have credentials to the private repo, you need to create
+two pull secrets in the `openshift-operators` namespace:
-- `pgd-operator-pull-secret`, for the EDB Postgres Distributed for Kubernetes operator images
-- `postgresql-operator-pull-secret`, for the EDB Postgres for Kubernetes operator images
+- `pgd-operator-pull-secret` for the EDB Postgres Distributed for Kubernetes operator images
+- `postgresql-operator-pull-secret` for the EDB Postgres for Kubernetes operator images
-You can create each secret via the `oc create` command, as follows:
+You can create each secret using the `oc create` command:
```sh
oc create secret docker-registry pgd-operator-pull-secret \
@@ -37,68 +37,62 @@ oc create secret docker-registry postgresql-operator-pull-secret \
--docker-password="@@TOKEN@@"
```
-where:
+Where:
-- `@@REPOSITORY@@` is the name of the repository, as explained in ["Which repository to
- choose?"](private_registries.md#which-repository-to-choose)
+- `@@REPOSITORY@@` is the name of the repository, as explained in [Which repository to
+ choose?](private_registries.md#which-repository-to-choose).
- `@@TOKEN@@` is the repository token for your EDB account, as explained in
- ["How to retrieve the token"](private_registries.md#how-to-retrieve-the-token)
+ [How to retrieve the token](private_registries.md#how-to-retrieve-the-token).
## Installing the operator
The EDB Postgres Distributed for Kubernetes operator can be found in the Red
Hat OperatorHub directly from your OpenShift dashboard.
-1. Navigate in the web console to the `Operators -> OperatorHub` page:
+1. From the hamburger menu, select **Operators > OperatorHub**.
- ![Menu OperatorHub](./images/openshift/operatorhub_1.png)
-
-2. Use the search box to restrict the listing, e.g. using `EDB` or `pgd`:
+2. In the web console, use the search box to filter the listing. For example, enter `EDB` or `pgd`:
![Install OperatorHub](./images/openshift/find-pgd-openshift.png)
-3. Read the information about the Operator and select `Install`
+3. Read the information about the operator and select **Install**.
-4. The following `Operator installation` page expects you to choose:
+4. In the Operator Installation page, select:
- - the installation mode ([cluster-wide](#cluster-wide-installation) is the
- only mode at the moment)
- - the update channel (at the moment `preview`)
- - the approval strategy, following the availability on the market place of
+ - The installation mode. [Cluster-wide](#cluster-wide-installation) is currently the
+ only mode.
+ - The update channel (currently **preview**).
+ - The approval strategy, following the availability on the marketplace of
a new release of the operator, certified by Red Hat:
- - `Automatic`: OLM automatically upgrades the running operator with the
- new version
- - `Manual`: OpenShift waits for human intervention, by requiring an
- approval in the `Installed Operators` section
+ - **Automatic**: OLM upgrades the running operator with the
+ new version.
+ - **Manual**: OpenShift waits for human intervention by requiring an
+ approval in the **Installed Operators** section.
### Cluster-wide installation
-With cluster-wide installation, you are asking OpenShift to install the
-Operator in the default `openshift-operators` namespace and to make it
+With cluster-wide installation, you're asking OpenShift to install the
+operator in the default `openshift-operators` namespace and to make it
available to all the projects in the cluster.
-
This is the default and normally recommended approach to install EDB Postgres
Distributed for Kubernetes.
-From the web console, select `All namespaces on the cluster (default)` as
-`Installation mode`:
-
-![Install all namespaces](./images/openshift/all-namespaces.png)
+From the web console, for **Installation mode**, select **All namespaces on the cluster (default)**.
-On installation, the operator will be visible in all namespaces. In case there
+On installation, the operator is visible in all namespaces. In case there
were problems during installation, check the logs in any pods in the
-`openshift-operators` project on the `Workloads → Pods` page,
+`openshift-operators` project on the **Workloads > Pods** page
as you would with any other OpenShift operator.
!!! Important "Beware"
- By choosing the cluster-wide installation you cannot easily move to a
- single project installation at a later time.
+ By choosing the cluster-wide installation you, can't easily move to a
+ single-project installation later.
## Creating a PGD cluster
-After the installation from OpenShift, you should find the operator deployment
-in the `openshift-operators` namespace. Notice the cert-manager operator will
-also get installed, as will the EDB Postgres for Kubernetes operator
+After the installation by OpenShift, the operator deployment
+is in the `openshift-operators` namespace. Notice the cert-manager operator was
+also installed, as was the EDB Postgres for Kubernetes operator
(`postgresql-operator-controller-manager`).
```sh
@@ -110,14 +104,14 @@ postgresql-operator-controller-manager-1-20-0 1/1 1 1
…
```
-Checking that the `pgd-operator-controller-manager` deployment is READY, we can
+After checking that the `pgd-operator-controller-manager` deployment is READY, you can
start creating PGD clusters. The EDB Postgres Distributed for Kubernetes
repository contains some useful sample files.
-Remember to deploy your PGD clusters on a dedicated namespace/project. The
+You must deploy your PGD clusters on a dedicated namespace/project. The
default namespace is reserved.
-First then, you should create a new namespace, and deploy a
+First, then, create a new namespace, and deploy a
[self-signed certificate `Issuer`](https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml)
in it:
@@ -127,9 +121,7 @@ oc apply -n my-namespace -f \
https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
```
-### Using PGD in a single Openshift Cluster in a single region
-
-Please see the following section for [multi-cluster and multi-region](#using-pgd-in-multiple-openshift-clusters-in-multiple-regions) deployments.
+### Using PGD in a single OpenShift cluster in a single region
Now you can deploy a PGD cluster, for example a flexible 3-region, which
contains two data groups and a witness group. You can find the YAML manifest
@@ -139,7 +131,7 @@ in the file [`flexible_3regions.yaml`](../samples/flexible_3regions.yaml).
oc apply -f flexible_3regions.yaml -n my-namespace
```
-You should start seeing your PGD groups come up:
+Your PGD groups start to come up:
```sh
$ oc get pgdgroups -n my-namespace
@@ -149,40 +141,40 @@ region-b 2 1 PGDGroup - Healthy
region-c 0 1 PGDGroup - Healthy 23m
```
-### Using PGD in multiple Openshift Clusters in multiple regions
+### Using PGD in multiple OpenShift clusters in multiple regions
-In order to deploy PGD in multiple Openshift Clusters in multiple regions you must first establish a way for the
-PGD Groups to communicate with each other. The recommended way of achieving this with multiple Openshift clusters is to use
+To deploy PGD in multiple OpenShift clusters in multiple regions, you must first establish a way for the
+PGD groups to communicate with each other. The recommended way of achieving this with multiple OpenShift clusters is to use
[Submariner](https://submariner.io/getting-started/quickstart/openshift/). Configuring the connectivity is outside the
-scope of this document, but once you have established connectivity between the Openshift Clusters you can deploy
-PGD Groups synced with one another.
+scope of this documentation. However, once you've established connectivity between the OpenShift clusters, you can deploy
+PGD groups synced with one another.
!!! Warning
- This example assumes you are deploying three PGD Groups, one in each Openshift
- Cluster, and that you have established connectivity between the Openshift Clusters using Submariner.
+ This example assumes you're deploying three PGD groups, one in each OpenShift
+ cluster, and that you established connectivity between the OpenShift clusters using Submariner.
-Similar to the [single cluster example](#using-pgd-in-a-single-openshift-cluster-in-a-single-region), we will create
+Similar to the [single-cluster example](#using-pgd-in-a-single-openshift-cluster-in-a-single-region), this example creates
two data PGD groups and one witness group. In contrast to that example,
-each group will live in a different Openshift Cluster.
+each group lives in a different OpenShift cluster.
-In addition to basic connectivity between the Openshift Clusters, you will need to ensure that each Openshift Cluster
-contains a certificate authority that is trusted by the other Openshift Clusters. This is required for the PGD Groups
+In addition to basic connectivity between the OpenShift clusters, you need to ensure that each OpenShift cluster
+contains a certificate authority that's trusted by the other OpenShift clusters. This condition is required for the PGD groups
to communicate with each other.
-The Openshift clusters can all use
+The OpenShift clusters can all use
the same certificate authority, or each cluster can have its own certificate
-authority. Either way, it needs to be ensured that each Openshift cluster's
-certificates trust the other Openshift clusters' certificate authorities.
-
-For illustration, we are going to demo using a self-signed certificate
-that has a single certificate authority used for all certificates on all our Openshift clusters.
-
-In this demo we will refer to the Openshift clusters as `Openshift Cluster A`, `Openshift Cluster B`, and
-`Openshift Cluster C` . In Openshift, an installation of the PG4K-PGD-Operator from OperatorHub will include an
-installation of the *cert-manager* operator; creating and managing certificates with cert-manager is
-recommended. We create a namespace to hold `Openshift Cluster A`, and in it
-we will also create the needed objects for a self-signed certificate. Assuming
-that the PGD operator and the cert-manager are installed, we create a [self-signed certificate `Issuer`](https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml)
+authority. Either way, you need to ensure that each OpenShift cluster's
+certificates trust the other OpenShift clusters' certificate authorities.
+
+This example uses a self-signed certificate
+that has a single certificate authority used for all certificates on all the OpenShift clusters.
+
+The example refers to the OpenShift clusters as `OpenShift Cluster A`, `OpenShift Cluster B`, and
+`OpenShift Cluster C`. In OpenShift, an installation of the EDB Postgres Distributed for Kubernetes operator from OperatorHub includes an
+installation of the cert-manager operator. We recommend creating and managing certificates with cert-manager.
+
+1. Create a namespace to hold `OpenShift Cluster A`, and in it also create the needed objects for a self-signed certificate. Assuming
+that the PGD operator and the cert-manager are installed, you create a [self-signed certificate `Issuer`](https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml)
in that namespace.
```sh
@@ -191,21 +183,21 @@ oc apply -n pgd-group -f \
https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
```
-After a few moments, cert-manager should have created the Issuers and Certificates. Additionally, there should now be
+1. After a few moments, cert-manager creates the issuers and certificates. There are also now
two secrets in the `pgd-group` namespace: `server-ca-key-pair` and `client-ca-key-pair`. These secrets contain
-the certificates and private keys for the server and client certificate authorities. We will need to copy these secrets
-to the other Openshift Clusters **before applying** the `issuer-selfsigned.yaml` manifest. We can use the
-`oc get secret` command to get the contents of the secrets.
+the certificates and private keys for the server and client certificate authorities. You need to copy these secrets
+to the other OpenShift clusters before applying the `issuer-selfsigned.yaml` manifest. You can use the
+`oc get secret` command to get the contents of the secrets:
```sh
oc get secret server-ca-key-pair -n pgd-group -o yaml > server-ca-key-pair.yaml
oc get secret client-ca-key-pair -n pgd-group -o yaml > client-ca-key-pair.yaml
```
-After removing the content specific to `Openshift Cluster A`
-from the above secrets (such as uid, resourceVersion and timestamp,) we can switch our
-context to `Openshift Cluster B`; we create the namespace, create our
-secrets in it, and only then apply the `issuer-selfsigned.yaml` file.
+1. After removing the content specific to `OpenShift Cluster A`
+from these secrets (such as uid, resourceVersion, and timestamp), you can switch
+context to `OpenShift Cluster B`. Then create the namespace, create the
+secrets in it, and only then apply the `issuer-selfsigned.yaml` file:
```sh
oc create ns pgd-group
@@ -215,8 +207,8 @@ oc apply -n pgd-group -f \
https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
```
-Finally, we can switch our context to `Openshift Cluster C`, and repeat
-the same process we followed for Cluster B.
+1. You can switch context to `OpenShift Cluster C` and repeat
+the same process followed for Cluster B:
```sh
oc create ns pgd-group
@@ -226,8 +218,7 @@ oc apply -n pgd-group -f \
https://raw.githubusercontent.com/EnterpriseDB/edb-postgres-for-kubernetes-charts/main/hack/samples/issuer-selfsigned.yaml
```
-Now, back on `Openshift Cluster A`, we can create our first PGD Group, called `region-a`. The YAML manifest for the PGD Group is as
-follows:
+1. On `OpenShift Cluster A`, you can create your first PGD group, called `region-a`. The YAML manifest for the PGD group is:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -276,20 +267,20 @@ spec:
group: cert-manager.io
```
-!!! Important
- Please note that the format of the hostnames in the `discovery` section differs from the single cluster
- example. This is because we are using Submariner to connect the Openshift Clusters, and Submariner uses the
- `..svc.clusterset.local` domain to route traffic between the Openshift Clusters. `region-a-group` is the
- name of the service that will be created for the PGD Group named `region-a`.
+ !!! Important
+ The format of the hostnames in the `discovery` section differs from the single-cluster
+ example. That's because Submariner is being used to connect the OpenShift clusters, and Submariner uses the
+ `..svc.clusterset.local` domain to route traffic between the OpenShift clusters. `region-a-group` is the
+ name of the service to be created for the PGD group named `region-a`.
-Let's apply the `region-a` PGD Group YAML:
+1. Apply the `region-a` PGD group YAML:
```sh
oc apply -f region-a.yaml -n pgd-group
```
-We can now switch our context to `Openshift Cluster B` and create our second PGD Group. The YAML for the PGD Group in Cluster B
-is as follows, the only difference is the `metadata.name`:
+1. You can now switch context to `OpenShift Cluster B` and create the second PGD group. The YAML for the PGD group in Cluster B
+is as follows. The only difference is the `metadata.name`.
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -333,14 +324,14 @@ spec:
group: cert-manager.io
```
-Apply the `region-b` PGD Group YAML:
+1. Apply the `region-b` PGD group YAML:
```sh
oc apply -f region-b.yaml -n pgd-group
```
-And finally, we can switch our context to `Openshift Cluster C` and create our third PGD Group. The YAML for the PGD
-Group is as follows:
+1. You can switch context to `OpenShift Cluster C` and create the third PGD group. The YAML for the PGD
+group is:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -384,29 +375,30 @@ spec:
group: cert-manager.io
```
-Apply the `region-c` PGD Group YAML:
+1. Apply the `region-c` PGD group YAML:
```sh
oc apply -f region-c.yaml -n pgd-group
```
-Now we can switch our context back to `Openshift Cluster A` and check the status of our PGD Group there.
+Now you can switch context back to `OpenShift Cluster A` and check the status of the PGD group there:
```sh
oc get pgdgroup region-a -n pgd-group
```
-We should expect to find the PGD group in phase
+The PGD group is in the phase
`PGD - Waiting for node discovery`.
-After creating the PGD Groups in each Openshift Cluster, which will in turn create the services for each node, you will
-need to expose the services to the other Openshift Clusters. This can be done in various ways.
-Since we are using
-Submariner, we will do it using the
+After creating the PGD groups in each OpenShift cluster, which in turn creates the services for each node, you
+need to expose the services to the other OpenShift clusters. You can do this in various ways.
+
+If you're using
+Submariner, you can do it using the
[`subctl`](https://submariner.io/operations/deployment/subctl/)
-command. We need to run the `subctl export service` command
-for each service in our
-`pgd-group` namespace that has a `-group` or `-node` suffix. We can accomplish this by running the following bash
+command. Run the `subctl export service` command
+for each service in the
+`pgd-group` namespace that has a `-group` or `-node` suffix. You can do this by running the following bash
`for` loop on each cluster:
```sh
@@ -415,7 +407,7 @@ for service in $(oc get svc -n pgd-group --no-headers -o custom-columns="NAME:.m
done
```
-After a few minutes the status should show that the PGD Group is healthy. Once each PGD Group is healthy, you can write
-to the `app` database in either of the two data nodes, `region-a` or `region-b`, and the data will be replicated to the
+After a few minutes, the status shows that the PGD group is healthy. Once each PGD group is healthy, you can write
+to the `app` database in either of the two data nodes: `region-a` or `region-b`. The data is replicated to the
other data node.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx
index 0bc9675cdc9..f1ed8778cbd 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/private_registries.mdx
@@ -9,26 +9,26 @@ container image registries under `docker.enterprisedb.com`.
!!! Important
Access to the private registries requires an account with EDB and is
- reserved to EDB customers with a valid [subscription plan](https://www.enterprisedb.com/products/plans-comparison#selfmanagedenterpriseplan).
- Credentials will be funneled through your EDB account.
- For trials, please refer to the ["Trials"](#trials) section below.
+ reserved for EDB customers with a valid [subscription plan](https://www.enterprisedb.com/products/plans-comparison#selfmanagedenterpriseplan).
+ Credentials are run through your EDB account.
+ For trials, see [Trials](#trials).
## Which repository to choose?
-EDB Postgres Distributed for Kubernetes is available as part of the "Extreme
-High Availability Add-On" on top of either the "EDB Enterprise Plan" or "EDB
-Standard Plan".
+EDB Postgres Distributed for Kubernetes is available as part of the Extreme
+High Availability Add-On on top of either the EDB Enterprise Plan or EDB
+Standard Plan.
Depending on your subscription plan, EDB Postgres Distributed for Kubernetes
-will be in one of the following repositories, as described in the table below:
+is in one of the following repositories.
| Plan | Repository |
| --------------------- | -------------------- |
| EDB Standard Plan | `k8s_standard_pgd` |
| EDB EnterpriseDB Plan | `k8s_enterprise_pgd` |
-The name of the repository shall be used as the *Username* when you try to
-login to the EDB container registry, for example through `docker login` or a
+Use the name of the repository as the username when you
+log in to the EDB container registry, for example, through `docker login` or a
[`kubernetes.io/dockerconfigjson` pull secret](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types).
!!! Important
@@ -38,25 +38,24 @@ login to the EDB container registry, for example through `docker login` or a
## How to retrieve the token
-In the ["repos" page in EDB](https://www.enterprisedb.com/repos-downloads),
-you'll find an *EDB Repos 2.0* section where a `Repo Token` is shown obscured.
+In the [repos page in EDB](https://www.enterprisedb.com/repos-downloads),
+is an EDB Repos 2.0 section where a repo token appears obscured.
![EDB Repo Portal](images/edb-repo-portal.png)
-Next to the "Repo Token" you'll find a button to copy the token, and an eye icon
-in case you want to look at the content of the token.
+Next to the repo token is a **Copy Token** button to copy the token and an eye icon
+for looking at the content of the token.
-The "Repo Token" shall be used as the *Password* when you try to login to EDB
+Use the repo token as the password when you log in to the EDB
container registry.
### Example with `docker login`
-You should be able to logon via Docker from your terminal. We suggest you
-copy the Repo Token using the `Copy Token` button. The `docker` command below
-will prompt you for a username and a password.
+You can log in using Docker from your terminal. We suggest that you
+copy the repo token using **Copy Token**. The `docker` command prompts you for a username and a password.
-As explained above, the username should be the repo you are trying to access
-while the password is the token you just copied.
+The username is the repo you're trying to access,
+and the password is the token you just copied:
```sh
$ docker login docker.enterprisedb.com
@@ -67,35 +66,35 @@ Login Succeeded
## Trials
-If you are a trialist or a preview user, use `k8s_enterprise_pgd` as the name
-of the repository and follow the instructions in
-["How to retrieve the token"](#how-to-retrieve-the-token) for the token.
+If you're a trialist or a preview user, use `k8s_enterprise_pgd` as the name
+of the repository, and follow the instructions in
+[How to retrieve the token](#how-to-retrieve-the-token) for the token.
## Operand images
EDB Postgres Distributed for Kubernetes is an operator that supports running
-Postgres Distributed (PGD) version 5 on three PostgreSQL distributions:
+EDB Postgres Distributed (PGD) version 5 on three PostgreSQL distributions:
- PostgreSQL
-- EDB Postgres Advanced
+- EDB Postgres Advanced Server
- EDB Postgres Extended
!!! Important
- Please refer to ["Choosing a Postgres distribution"](https://www.enterprisedb.com/docs/pgd/latest/choosing_server/)
- from the PGD documentation for details and a comparison of PGD on the
+ See [Choosing a Postgres distribution](/pgd/latest/choosing_server/)
+ in the PGD documentation for details and a comparison of PGD on the
different supported PostgreSQL distributions.
Due to the immutable application container adoption in EDB operators, the
-operator expects that the container images include all the binaries required
+operator expects for the container images to include all the binaries required
to run the requested version of PGD on top of the required distribution and
version of Postgres.
-These images follow the requirements and the conventions described in the
-["Container image requirements"](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/container_images/)
-page of the EDB Postgres for Kubernetes documentation, adding the `bdr5`
+These images follow the requirements and the conventions described in
+[Container image requirements](/postgres_for_kubernetes/latest/container_images/)
+in the EDB Postgres for Kubernetes documentation, adding the `bdr5`
extension.
-In the table below you can find the image name prefix for each Postgres distribution:
+The table shows the image name prefix for each Postgres distribution.
| Postgres distribution | Versions | Image name | Repositories |
| --------------------- | -------- | --------------------------- | ---------------------------------------- |
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
index 0df0cf3505a..03b9b3a5b39 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
@@ -1,35 +1,33 @@
---
-title: 'Quickstart'
+title: 'Quick start'
originalFilePath: 'src/quickstart.md'
---
-This section describes how to test an EDB Postgres Distributed (PGD) cluster on your
-laptop/computer using EDB Postgres Distributed for Kubernetes (PG4K-PGD)
+You can test an EDB Postgres Distributed (PGD) cluster on your
+laptop or computer using EDB Postgres Distributed for Kubernetes
on a single local
Kubernetes cluster built with [Kind](https://kind.sigs.k8s.io/).
!!! Warning
- The instructions contained in this section are for demonstration,
- testing, and practice purposes only and must not be used in production.
+ These instructions are only for demonstration,
+ testing, and practice purposes and must not be used in production.
-By following the instructions on this page you should be able to start an EDB Postgres Distributed
-cluster on your local Kubernetes installation and experiment with it.
+This quick start shows you how to start an EDB Postgres Distributed
+cluster on your local Kubernetes installation so you can experiment with it.
!!! Important
- Make sure that you have `kubectl` installed on your machine in order
- to connect to the Kubernetes cluster. Please follow the Kubernetes documentation
- on [how to install `kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
+ To connect to the Kubernetes cluster, make sure that you have `kubectl` installed on your machine.
+ See the Kubernetes documentation
+ on [installing `kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
-## Part 1 - Setup the local Kubernetes playground
+## Part 1 - Set up the local Kubernetes playground
-This section is about installing Kind, a tool for running local Kubernetes
-clusters using Docker container "nodes" (Kind stands for "Kubernetes IN Docker"
-indeed).
-If you already have access to a Kubernetes cluster, you may skip to the next
-section.
+Install Kind, a tool for running local Kubernetes
+clusters using Docker container nodes. (Kind stands for Kubernetes IN Docker.)
+If you already have access to a Kubernetes cluster, you can skip to Part 2.
-Install `kind` on your environment following the instructions in the [Kind Quickstart](https://kind.sigs.k8s.io/docs/user/quick-start),
-then create a Kubernetes cluster with:
+Install Kind on your environment following the instructions in [Kind Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start).
+Then, create a Kubernetes cluster:
```sh
kind create cluster --name pgd
@@ -39,11 +37,10 @@ kind create cluster --name pgd
## Part 2 - Install EDB Postgres Distributed for Kubernetes
-Now that you have a Kubernetes installation up and running on your laptop, you
-can proceed with the installation of EDB Postgres Distributed for Kubernetes.
+After you have a Kubernetes installation up and running on your laptop, you
+can install EDB Postgres Distributed for Kubernetes.
-Please refer to the ["Installation"](installation_upgrade.md) section and then
-proceed with the deployment of a PGD cluster.
+See [Installation](installation_upgrade.md) for details.
## Part 3 - Deploy a PGD cluster
@@ -51,15 +48,15 @@ As with any other deployment in Kubernetes, to deploy a PGD cluster you need to
apply a configuration file that defines your desired `PGDGroup` resources that
make up a PGD cluster.
-Some sample files are included in the PG4K-PGD repository. The
+Some sample files are included in the EDB Postgres Distributed for Kubernetes repository. The
[flexible_3regions.yaml](../samples/flexible_3regions.yaml) manifest
-contains the definition of a PGD cluster with 2 Data Groups and a global
-witness node spread across 3 regions. Each Data Group consists of 2 data nodes
+contains the definition of a PGD cluster with two data groups and a global
+witness node spread across three regions. Each data group consists of two data nodes
and a local witness node.
-!!! Note "There's more"
- For more detailed information about the available options, please refer
- to the ["API Reference" section](pg4k-pgd.v1beta1.md).
+!!! SeeAlso "Further reading"
+ For more details about the available options, see
+ the ["API Reference" section](pg4k-pgd.v1beta1.md).
You can deploy the `flexible-3-regions` example by saving it first and running:
@@ -67,17 +64,17 @@ You can deploy the `flexible-3-regions` example by saving it first and running:
kubectl apply -f flexible_3regions.yaml
```
-You can check that the pods are being created with the `get pods` command:
+You can check that the pods are being created using the `get pods` command:
```sh
kubectl get pods
```
-The pods are being created as part of PGD nodes, and as described in the
-[architecture document](architecture.md), they are implemented on top
-of PG4K Clusters.
+The pods are being created as part of PGD nodes. As described in the
+[Architecture](architecture.md), they're implemented on top
+of EDB Postgres for Kubernetes clusters.
-We can list the clusters then, which will give us the PGD nodes:
+You can list the clusters then, which shows the PGD nodes:
```sh
$ kubectl get clusters
@@ -90,7 +87,7 @@ region-a-3 91s 1 1 Cluster in healthy state region-a-3-1
```
Ultimately, the PGD nodes are created as part of the PGD groups
-that make up our PGD cluster.
+that make up your PGD cluster.
```sh
$ kubectl get pgdgroups
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx
index 20b381082af..d2b873ec5ba 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx
@@ -3,36 +3,35 @@ title: 'Recovery'
originalFilePath: 'src/recovery.md'
---
-In EDB Postgres Distributed for Kubernetes (PG4K-PGD), recovery is available as a way
-to bootstrap a new PGD Group starting from an available physical backup of a PGD Node.
-The recovery cannot be performed "in-place" on an existing PGD Group.
-PG4K-PGD also supports Point In Time Recovery, which allows you to restore a PGDGroup up to
+In EDB Postgres Distributed for Kubernetes, recovery is available as a way
+to bootstrap a new PGD group starting from an available physical backup of a PGD node.
+The recovery can't be performed in place on an existing PGD group.
+EDB Postgres Distributed for Kubernetes also supports point-in-time recovery (PITR), which allows you to restore a PGD group up to
any point in time, from the first available backup in your catalog to the last archived
-WAL (having a WAL archive is mandatory in this case).
+WAL. Having a WAL archive is mandatory in this case.
## Prerequisite
-Before recovering from a Backup, take care to apply the following considerations:
+Before recovering from a backup:
- Make sure that the PostgreSQL configuration (`.spec.cnp.postgresql.parameters`) of the
- recovered cluster is compatible, from a physical replication standpoint, with the original one.
+ recovered cluster is compatible with the original one from a physical replication standpoint.
-- When recovering in a newly created namespace, remember to first setup a cert-manager CA Issuer before deploying the recovered PGDGroup.
+- When recovering in a newly created namespace, first set up a cert-manager CA issuer before deploying the recovered PGD group.
-For further information, refer to the [PG4K Recovery - Additional considerations](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/bootstrap/#additional-considerations) documentation section.
+For more information, see [EDB Postgres for Kubernetes recovery - Additional considerations](/postgres_for_kubernetes/latest/bootstrap/#additional-considerations) in the EDB Postgres for Kubernetes documentation.
## Recovery from an object store
-You can recover from a PGD Node backup created by Barman Cloud and stored on supported object storage.
+You can recover from a PGD node backup created by Barman Cloud and stored on supported object storage.
-For example, given a PGDGroup named `pgdgroup-example` with 3 instances, with Backups available, your object storage
-should contain a directory for each node:
+For example, given a PGD group` named `pgdgroup-example` with three instances with backups available, your object storage contains a directory for each node:
`pgdgroup-example-1`, `pgdgroup-example-2`, `pgdgroup-example-3`
-The following example will define a full recovery from the object store.
-The operator will transparently select the latest backup between the defined `serverNames`, and
-replay up to the last available WAL.
+This example defines a full recovery from the object store.
+The operator transparently selects the latest backup between the defined `serverNames` and
+replays up to the last available WAL.
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -63,25 +62,25 @@ spec:
!!! Important
Make sure to correctly configure the WAL section according to the source cluster.
- In the above example, since the `pgdgroup-example` PGDGroup uses `compression`
- and `encryption`, make sure to set the proper parameters also in the PGDGroup
+ In the example, since the `pgdgroup-example` PGD group uses `compression`
+ and `encryption`, make sure to set the proper parameters also in the PGD group
that's being created by the `restore`.
!!! Note
- In the above example we are taking advantage of the parallel WAL restore feature,
- dedicating up to 8 jobs to concurrently fetch the required WAL files from the archive.
+ The example takes advantage of the parallel WAL restore feature,
+ dedicating up to eight jobs to concurrently fetch the required WAL files from the archive.
This feature can appreciably reduce the recovery time. Make sure that you plan ahead
for this scenario and tune the value of this parameter for your environment.
- It will certainly make a difference when you'll need it.
+ It makes a difference when you need it.
-## Point in time recovery (PITR) from an object store
+## PITR from an object store
-Instead of replaying all the WALs up to the latest one, we can ask PostgreSQL to stop replaying
-WALs at any given point in time, after having extracted a base backup.
-PostgreSQL uses this technique to achieve point-in-time recovery (PITR).
-The presence of a WAL archive is mandatory.
+Instead of replaying all the WALs up to the latest one, after extracting a base backup, you can ask PostgreSQL to stop replaying
+WALs at any point in time.
+PostgreSQL uses this technique to achieve PITR.
+(The presence of a WAL archive is mandatory.)
-The following example will define a time base target for the recovery:
+This example defines a time-base target for the recovery:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -113,19 +112,19 @@ spec:
```
!!! Important
- PITR requires you to specify a `targetTime` recovery target, by using the options described
- in the "Recovery targets" section below. When you use `targetTime` or `targetLSN`, the operator
- automatically selects the closest backup that was completed before that target. Otherwise, it
+ PITR requires you to specify a `targetTime` recovery target by using the options described
+ in [Recovery targets](#recovery-targets). When you use `targetTime` or `targetLSN`, the operator
+ selects the closest backup that was completed before that target. Otherwise, it
selects the last available backup in chronological order between the specified `serverNames`.
## Recovery from an object store specifying a `backupID`
The `.spec.restore.recoveryTarget.backupID` option allows you to specify a base backup from
-which to initiate the recovery process. By default, this value is empty.
-If you assign a value to it (in the form of a Barman backup ID), the operator will use that backup as base for the recovery.
+which to start the recovery process. By default, this value is empty.
+If you assign a value to it, the operator uses that backup as the base for the recovery. The value must be in the form of a Barman backup ID.
-The following example recovers a new PGDGroup from a specific backupID of the
-`pgdgroup-backup-1` PGD Node:
+This example recovers a new PGD group from a specific backupID of the
+`pgdgroup-backup-1` PGD node:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -155,18 +154,16 @@ spec:
```
!!! Important
- When a `backupID` is specified, make sure to only define the related PGD Node
+ When a `backupID` is specified, make sure to define only the related PGD node
in the `serverNames` option, and avoid defining the other ones.
!!! Note
Defining a specific `backupID` is especially needed when using one of the
following recovery targets: `targetName`, `targetXID`, and `targetImmediate`.
- In such cases, it is important to specify `backupID`, unless you are OK with
- the last available backup in the catalog.
+ In such cases, it's important to specify `backupID`, unless
+ the last available backup in the catalog is okay.
## Recovery targets
-Beyond PITR there are other recovery target criteria you can use.
-For more information on all the available Recovery Targets you can
-refer to the [PG4K Recovery targets](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/bootstrap/#point-in-time-recovery-pitr)
-documentation (end of paragraph).
+Beyond PITR are other recovery target criteria you can use.
+For more information on all the available recovery targets, see [EDB Postgres for Kubernetes recovery targets](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/recovery/#point-in-time-recovery-pitr) in the EDB Postgres for Kubernetes documentation.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
index a0cc3b93770..9f2323bb7dd 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
@@ -4,16 +4,16 @@ originalFilePath: 'src/samples.md'
---
!!! Important
- Examples available from this section are for demonstration and
+ The available examples are for demonstration and
experimentation purposes only.
-In this section, you can find some examples of configuration files to set up
-your EDB Postgres Distributed Cluster in a Kubernetes environment.
+These examples are configuration files for setting up
+your EDB Postgres Distributed cluster in a Kubernetes environment.
Flexible 3 regions
: [`flexible_3regions.yaml`](../samples/flexible_3regions.yaml):
- a PGD cluster with 2 Data Groups and a global witness node spread across 3
- regions, where each Data Groups consists of 2 data nodes and a local witness
+ a PGD cluster with two data groups and a global witness node spread across three
+ regions, where each data groups consists of two data nodes and a local witness
node.
-For a list of available options, please refer to the ["API Reference" page](pg4k-pgd.v1beta1.md).
+For a list of available options, see the ["API Reference" page](pg4k-pgd.v1beta1.md).
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx
index 9b6b296253c..cd039bb084b 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/security.mdx
@@ -3,31 +3,30 @@ title: 'Security'
originalFilePath: 'src/security.md'
---
-This section contains information about security for EDB Postgres Distributed for Kubernetes,
-that are analyzed at 3 different layers: Code, Container and Cluster.
+Security for EDB Postgres Distributed for Kubernetes is
+analyzed at three layers: code, container, and cluster.
!!! Warning
- The information contained in this page must not exonerate you from
- performing regular InfoSec duties on your Kubernetes cluster. Please
- familiarize yourself with the ["Overview of Cloud Native Security"](https://kubernetes.io/docs/concepts/security/overview/)
- page from the Kubernetes documentation.
+ In addition to security practices described here, you must
+ perform regular InfoSec duties on your Kubernetes cluster.
+ Familiarize yourself with [Overview of Cloud Native Security](https://kubernetes.io/docs/concepts/security/overview/)
+ in the Kubernetes documentation.
!!! Seealso "About the 4C's Security Model"
- Please refer to ["The 4C's Security Model in Kubernetes"](https://www.enterprisedb.com/blog/4cs-security-model-kubernetes)
- blog article to get a better understanding and context of the approach EDB
- has taken with security in EDB Postgres Distributed for Kubernetes.
+ See [The 4C's Security Model in Kubernetes](https://www.enterprisedb.com/blog/4cs-security-model-kubernetes)
+ blog article for a better understanding and context of the approach EDB
+ takes with security in EDB Postgres Distributed for Kubernetes.
## Code
-Source code of EDB Postgres Distributed for Kubernetes is *systematically scanned* for static analysis purposes,
-including **security problems**, using a popular open-source linter for Go called
+Source code of EDB Postgres Distributed for Kubernetes is systematically scanned for static analysis purposes,
+including security problems. EDB uses a popular open-source linter for Go called
[GolangCI-Lint](https://github.com/golangci/golangci-lint) directly in the CI/CD pipeline.
-GolangCI-Lint can run several *linters* on the same source code.
+GolangCI-Lint can run several linters on the same source code.
-One of these is [Golang Security Checker](https://github.com/securego/gosec), or simply `gosec`,
-a linter that scans the abstract syntactic tree of the source against a set of rules aimed at
-the discovery of well-known vulnerabilities, threats, and weaknesses hidden in
-the code such as hard-coded credentials, integer overflows and SQL injections - to name a few.
+One of these is [Golang Security Checker](https://github.com/securego/gosec), or `gosec`.
+`gosec` is a linter that scans the abstract syntactic tree of the source against a set of rules aimed at discovering well-known vulnerabilities, threats, and weaknesses hidden in
+the code. These threads include hard-coded credentials, integer overflows, SQL injections, and others.
!!! Important
A failure in the static code analysis phase of the CI/CD pipeline is a blocker
@@ -36,104 +35,104 @@ the code such as hard-coded credentials, integer overflows and SQL injections -
## Container
-Every container image that is part of EDB Postgres Distributed for Kubernetes is automatically built via CI/CD pipelines following every commit.
-Such images include not only the operator's, but also the operands' - specifically every supported PostgreSQL version.
-Within the pipelines, images are scanned with:
+Every container image that's part of EDB Postgres Distributed for Kubernetes is built by way of CI/CD pipelines following every commit.
+Such images include not only those of the operator but also of the operands, specifically every supported PostgreSQL version.
+In the pipelines, images are scanned with:
-- [Dockle](https://github.com/goodwithtech/dockle): for best practices in terms
+- [Dockle](https://github.com/goodwithtech/dockle) for best practices in terms
of the container build process
-- [Clair](https://github.com/quay/clair): for vulnerabilities found in both the
+- [Clair](https://github.com/quay/clair) for vulnerabilities found in both the
underlying operating system and libraries and applications that they run
!!! Important
- All operand images are automatically rebuilt once a day by our pipelines in case
- of security updates at the base image and package level, providing **patch level updates**
+ All operand images are rebuilt once a day by our pipelines in case
+ of security updates at the base image and package level, providing patch level updates
for the container images that EDB distributes.
-The following guidelines and frameworks have been taken into account for container-level security:
+The following guidelines and frameworks were taken into account for container-level security:
-- the ["Container Image Creation and Deployment Guide"](https://dl.dod.cyber.mil/wp-content/uploads/devsecops/pdf/DevSecOps_Enterprise_Container_Image_Creation_and_Deployment_Guide_2.6-Public-Release.pdf),
+- The [Container Image Creation and Deployment Guide](https://dl.dod.cyber.mil/wp-content/uploads/devsecops/pdf/DevSecOps_Enterprise_Container_Image_Creation_and_Deployment_Guide_2.6-Public-Release.pdf),
developed by the Defense Information Systems Agency (DISA) of the United States Department of Defense (DoD)
-- the ["CIS Benchmark for Docker"](https://www.cisecurity.org/benchmark/docker/),
+- The [CIS Benchmark for Docker](https://www.cisecurity.org/benchmark/docker/),
developed by the Center for Internet Security (CIS)
-!!! Seealso "About the Container level security"
- Please refer to ["Security and Containers in EDB Postgres Distributed for Kubernetes"](https://www.enterprisedb.com/blog/security-and-containers-cloud-native-postgresql)
- blog article for more information about the approach that EDB has taken on
+!!! Seealso "About the container-level security"
+ See the [Security and Containers in EDB Postgres Distributed for Kubernetes](https://www.enterprisedb.com/blog/security-and-containers-cloud-native-postgresql)
+ blog article for more information about the approach that EDB takes on
security at the container level in EDB Postgres Distributed for Kubernetes.
## Cluster
Security at the cluster level takes into account all Kubernetes components that
-form both the control plane and the nodes, as well as the applications that run in
-the cluster (PostgreSQL included).
+form both the control plane and the nodes as well as the applications that run in
+the cluster, including PostgreSQL.
-### Role Based Access Control (RBAC)
+### Role-based access control (RBAC)
The operator interacts with the Kubernetes API server with a dedicated service
-account called `pgd-operator-controller-manager`. In Kubernetes this is installed
-by default in the `pgd-operator-system` namespace, with a cluster role
-binding between this service account and the `pgd-operator-controller-manager`
-cluster role which defines the set of rules/resources/verbs granted to the operator.
+account called pgd-operator-controller-manager. In Kubernetes this account is installed
+by default in the `pgd-operator-system` namespace. A cluster role
+binds between this service account and the pgd-operator-controller-manager
+cluster role that defines the set of rules, resources, and verbs granted to the operator.
-RedHat OpenShift directly manage the operator RBAC entities via [Operator
+RedHat OpenShift directly manages the operator RBAC entities by way of [Operator
Lifecycle
-Manager](https://docs.openshift.com/container-platform/4.13/operators/understanding/olm/olm-understanding-olm.html),
-allowing the user to grant permissions only where they are required,
+Manager (OLM)](https://docs.openshift.com/container-platform/4.13/operators/understanding/olm/olm-understanding-olm.html). OLM
+allows you to grant permissions only where they're required,
implementing the principle of least privilege.
!!! Important
- The above permissions are exclusively reserved for the operator's service
- account to interact with the Kubernetes API server. They are not directly
+ These permissions are exclusively reserved for the operator's service
+ account to interact with the Kubernetes API server. They aren't directly
accessible by the users of the operator that interact only with `PGDGroup`
and `PGDGroupCleanup` resources.
-Below we provide some examples and, most importantly, the reasons why
+The following are some examples and, most importantly, the reasons why
EDB Postgres Distributed for Kubernetes requires full or partial management of standard Kubernetes
namespaced resources.
`jobs`
-: The operator needs to handle jobs to manage different `PGDGroup`'s phases.
+: The operator needs to handle jobs to manage different `PGDGroup` phases.
`poddisruptionbudgets`
-: The operator uses pod disruption budgets to make sure enough PGD Nodes
+: The operator uses pod disruption budgets to make sure enough PGD nodes
are kept active during maintenance operations.
`pods`
-: The operator needs to manage PGD Nodes (as a `Cluster` resource).
+: The operator needs to manage PGD nodes as a `Cluster` resource.
`secrets`
: Unless you provide certificates and passwords to your data nodes,
the operator adopts the "convention over configuration" paradigm by
- self-provisioning random generated passwords and TLS certificates, and by
+ self-provisioning random-generated passwords and TLS certificates and by
storing them in secrets.
`serviceaccounts`
: The operator needs to create a service account to
- enable the PGDGroup recovery job to retrieve the backup objects from
+ enable the `PGDGroup` recovery job to retrieve the backup objects from
the object store where they reside.
`services`
: The operator needs to control network access to the PGD cluster
- from applications, and properly manage
+ from applications and properly manage
failover/switchover operations in an automated way.
`statefulsets`
-: The operator needs to manage PGD Proxies.
+: The operator needs to manage PGD proxies.
`validatingwebhookconfigurations` and `mutatingwebhookconfigurations`
: The operator injects its self-signed webhook CA into both webhook
configurations, which are needed to validate and mutate all the resources it
- manages. For more details, please see the
+ manages. For more details, see the
[Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/).
To see all the permissions required by the operator, you can run `kubectl
describe clusterrole pgd-operator-manager-role`.
-PG4K-PGD internally manages the PGD nodes using the `Cluster` resource as defined by EDB Postgres
-for Kubernetes (PG4K). We refer you to the
-[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/security/)
-for the list of permissions used by PG4K operator service account.
+EDB Postgres Distributed for Kubernetes internally manages the PGD nodes using the `Cluster` resource as defined by EDB Postgres
+for Kubernetes. See the
+[EDB Postgres for Kubernetes documentation](/postgres_for_kubernetes/latest/security/)
+for the list of permissions used by the EDB Postgres for Kubernetes operator service account.
### Calls to the API server made by the instance manager
@@ -145,53 +144,53 @@ a dedicated `ServiceAccount` created by the operator that shares the same
PostgreSQL `Cluster` resource name.
!!! Important
- The operand can only access a specific and limited subset of resources
- through the API server. A service account is the
- [recommended way to access the API server from within a Pod](https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/).
+ The operand can access only a specific and limited subset of resources
+ through the API server. A service account is the recommended way to access the API server from within a pod. See the
+ [Kubernetes documentation](https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/) for details.
-We refer you to the
+See the
[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/security/)
-for additional depth on the instance manager.
+for more information on the instance manager.
-### Pod Security Policies
+### Pod security policies
-A [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/)
+A [pod security policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/)
is the Kubernetes way to define security rules and specifications that a pod needs to meet
to run in a cluster.
-For InfoSec reasons, every Kubernetes platform should implement them.
+For InfoSec reasons, every Kubernetes platform must implement them.
-EDB Postgres Distributed for Kubernetes does not require *privileged* mode for containers execution.
-The PostgreSQL containers run as `postgres` system user. No component whatsoever requires running as `root`.
+EDB Postgres Distributed for Kubernetes doesn't require privileged mode for containers execution.
+The PostgreSQL containers run as the postgres system user. No component requires running as root.
-Likewise, Volumes access does not require *privileges* mode or `root` privileges either.
-Proper permissions must be properly assigned by the Kubernetes platform and/or administrators.
-The PostgreSQL containers run with a read-only root filesystem (i.e. no writable layer).
+Likewise, volumes access doesn't require privileged mode or root privileges.
+Proper permissions must be assigned by the Kubernetes platform or administrators.
+The PostgreSQL containers run with a read-only root filesystem, that is, no writable layer.
The operator explicitly sets the required security contexts.
-On Red Hat OpenShift, Cloud Native PostgreSQL runs in `restricted` security context constraint,
+On Red Hat OpenShift, Cloud Native PostgreSQL runs in the `restricted` security context constraint,
the most restrictive one. The goal is to limit the execution of a pod to a namespace allocated UID
and SELinux context.
!!! Seealso "Security Context Constraints in OpenShift"
- For further information on Security Context Constraints (SCC) in
- OpenShift, please refer to the
- ["Managing SCC in OpenShift"](https://www.openshift.com/blog/managing-sccs-in-openshift)
+ For more information on security context constraints (SCC) in
+ OpenShift, see the
+ [Managing SCC in OpenShift](https://www.openshift.com/blog/managing-sccs-in-openshift)
article.
-!!! Warning "Security Context Constraints and namespaces"
- As stated by [Openshift documentation](https://docs.openshift.com/container-platform/latest/authentication/managing-security-context-constraints.html#role-based-access-to-ssc_configuring-internal-oauth)
- SCCs are not applied in the default namespaces (`default`, `kube-system`,
- `kube-public`, `openshift-node`, `openshift-infra`, `openshift`) and those
- should not be used to run pods. CNP clusters deployed in those namespaces
+!!! Warning "Security context constraints and namespaces"
+ As stated in the [Openshift documentation](https://docs.openshift.com/container-platform/latest/authentication/managing-security-context-constraints.html#role-based-access-to-ssc_configuring-internal-oauth),
+ SCCs aren't applied in the default namespaces (`default`, `kube-system`,
+ `kube-public`, `openshift-node`, `openshift-infra`, `openshift`). Don't use them
+ to run pods. CNP clusters deployed in those namespaces
will be unable to start due to missing SCCs.
-#### Exposed Ports
+#### Exposed ports
-EDB Postgres Distributed for Kubernetes exposes ports at operator, instance manager and operand
-levels, as listed in the table below:
+EDB Postgres Distributed for Kubernetes exposes ports at operator, instance manager, and operand
+levels, as shown in the table.
| System | Port number | Exposing | Name | Certificates | Authentication |
| :--------------- | :---------- | :------------------ | :--------------- | :----------- | :------------- |
@@ -203,26 +202,26 @@ levels, as listed in the table below:
### PGD
-The current implementation of EDB Postgres Distributed for Kubernetes automatically creates
-passwords for the `postgres` superuser and the database owner.
+The current implementation of EDB Postgres Distributed for Kubernetes creates
+passwords for the postgres superuser and the database owner.
-As far as encryption of password is concerned, EDB Postgres Distributed for Kubernetes follows
-the default behavior of PostgreSQL: starting from PostgreSQL 14,
-`password_encryption` is by default set to `scram-sha-256`, while on earlier
-versions it is set to `md5`.
+As far as encryption of passwords is concerned, EDB Postgres Distributed for Kubernetes follows
+the default behavior of PostgreSQL: starting with PostgreSQL 14,
+`password_encryption` is by default set to `scram-sha-256`. On earlier
+versions, it's set to `md5`.
!!! Important
- Please refer to the ["Connection DSNs and SSL"](https://www.enterprisedb.com/docs/pgd/latest/nodes/#connection-dsns-and-ssl-tls)
- section in the PGD documentation for details.
+ See [Connection DSNs and SSL](/pgd/latest/nodes/#connection-dsns-and-ssl-tls)
+ in the PGD documentation for details.
-You can disable management of the `postgres` user password via secrets by setting
+You can disable management of the postgres user password using secrets by setting
`enableSuperuserAccess` to `false` in the `cnp` section of the spec.
!!! Note
The operator supports toggling the `enableSuperuserAccess` option. When you
- disable it on a running cluster, the operator will ignore the content of the secret,
- remove it (if previously generated by the operator) and set the password of the
- `postgres` user to `NULL` (de facto disabling remote access through password authentication).
+ disable it on a running cluster, the operator ignores the content of the secret.
+ Remove it (if previously generated by the operator) and set the password of the
+ postgres user to `NULL`, in effect disabling remote access through password authentication.
### Storage
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
index 3f652fe5436..23eaed7105d 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/ssl_connections.mdx
@@ -1,17 +1,17 @@
---
-title: 'Client TLS/SSL Connections'
+title: 'Client TLS/SSL connections'
originalFilePath: 'src/ssl_connections.md'
---
!!! Seealso "Certificates"
- Please refer to the ["Certificates"](certificates.md)
- page for more details on how EDB Postgres Distributed for Kubernetes supports TLS certificates.
+ See [Certificates](certificates.md)
+ for more details on how EDB Postgres Distributed for Kubernetes supports TLS certificates.
-The EDB Postgres Distributed for Kubernetes operator has been designed to work with TLS/SSL for both encryption in transit and
-authentication, on server and client sides. PGD nodes are created as Cluster
-resources using the EDB Postgres for Kubernetes (PG4K) operator, and this
-includes the deployment of a Certification
-Authority (CA) to create and sign TLS client certificates.
+The EDB Postgres Distributed for Kubernetes operator was designed to work with TLS/SSL for both encryption in transit and
+authentication on server and client sides. PGD nodes are created as cluster
+resources using the EDB Postgres for Kubernetes operator. This
+includes deploying a certification
+authority (CA) to create and sign TLS client certificates.
-Please refer to the [EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/ssl_connections/)
-for further information on issuers and certificates.
+See the [EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/ssl_connections/)
+for more information on issuers and certificates.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
index 538d13427b7..4b992ca6b69 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/use_cases.mdx
@@ -3,19 +3,18 @@ title: 'Use cases'
originalFilePath: 'src/use_cases.md'
---
-EDB Postgres Distributed for Kubernetes has been designed to work with applications
-that reside in the same Kubernetes cluster, for a full cloud native
+EDB Postgres Distributed for Kubernetes was designed to work with applications
+that reside in the same Kubernetes cluster for a full cloud native
experience.
However, it might happen that, while the database can be hosted
-inside a Kubernetes cluster, applications cannot be containerized
-at the same time and need to run in a *traditional environment* such
+inside a Kubernetes cluster, applications can't be containerized
+at the same time and need to run in a traditional environment such
as a VM.
-We reproduce here a summary of the basic considerations, and refer
-you to the
-[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/use_cases/)
-for further depth.
+The following is a summary of the basic considerations. See the
+[EDB Postgres for Kubernetes documentation](/postgres_for_kubernetes/latest/use_cases/)
+for more detail.
## Case 1: Applications inside Kubernetes
@@ -24,21 +23,21 @@ namespace inside a Kubernetes cluster.
![Application and Database inside Kubernetes](./images/apps-in-k8s.png)
-The application, normally stateless, is managed as a standard `Deployment`,
-with multiple replicas spread over different Kubernetes node, and internally
-exposed through a `ClusterIP` service.
+The application, normally stateless, is managed as a standard deployment,
+with multiple replicas spread over different Kubernetes nodes and internally
+exposed through a ClusterIP service.
-The service is exposed externally to the end user through an `Ingress` and the
-provider's load balancer facility, via HTTPS.
+The service is exposed externally to the end user through an Ingress and the
+provider's load balancer facility by way of HTTPS.
## Case 2: Applications outside Kubernetes
-Another possible use case is to manage your Postgres Distributed database inside
-Kubernetes, while having your applications outside of it (for example in a
-virtualized environment). In this case, Postgres Distributed is represented by an IP
-address (or host name) and a TCP port, corresponding to the defined Ingress
+Another possible use case is to manage your PGD database inside
+Kubernetes while having your applications outside of it, for example, in a
+virtualized environment. In this case, PGD is represented by an IP
+address or host name and a TCP port, corresponding to the defined Ingress
resource in Kubernetes.
-The application can still benefit from a TLS connection to Postgres Distributed.
+The application can still benefit from a TLS connection to PGD.
![Application outside Kubernetes](./images/apps-outside-k8s.png)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
index 7ec726a1e7f..ebd37d31d23 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/using_pgd.mdx
@@ -1,34 +1,34 @@
---
-title: 'Managing EDB Postgres Distributed databases'
+title: 'Managing EDB Postgres Distributed (PGD) databases'
originalFilePath: 'src/using_pgd.md'
---
As described in the [architecture document](architecture.md),
EDB Postgres Distributed for Kubernetes is an operator created to deploy
-Postgres Distributed (PGD) databases.
+PGD databases.
It provides an alternative over deployment with TPA, and by leveraging the
Kubernetes ecosystem, it can offer self-healing and declarative control.
-The operator is also responsible of the backup and restore operations
-(see the [backup](backup.md) document.)
+The operator is also responsible of the backup and restore operations.
+See [Backup](backup.md).
-However, many of the operations and control of PGD clusters are not
+However, many of the operations and control of PGD clusters aren't
managed by the operator.
-The pods created by EDB Postgres Distributed for Kubernetes come with
-[PGD CLI](https://www.enterprisedb.com/docs/pgd/latest/cli/) installed, and
-this is the tool that can be used, for example, to execute a switchover.
+The pods created by EDB Postgres Distributed for Kubernetes come with the
+[PGD CLI](https://www.enterprisedb.com/docs/pgd/latest/cli/) installed. You can use
+this tool, for example, to execute a switchover.
## PGD CLI
!!! Warning
- The PGD CLI should not be used to create/delete resources. For example,
- the `create-proxy`, `delete-proxy` commands should be avoided.
+ Don't use the PGD CLI to create and delete resources. For example,
+ avoid the `create-proxy` and `delete-proxy` commands.
Provisioning of resources is under the control of the operator, and manual
- creation/deletion is not supported.
+ creation and deletion isn't supported.
-As an example, let's execute a switchover command.
+As an example, execute a switchover command.
-It is recommendable to use the PGD CLI from proxy pods. Let's find them.
-You can get a pod listing for your cluster:
+We recommend that you use the PGD CLI from proxy pods. To find them,
+get a pod listing for your cluster:
```shell
kubectl get pods -n my-namespace
@@ -41,14 +41,14 @@ location-a-proxy-0 1/1 Running 0 2h
location-a-proxy-1 1/1 Running 0 2h
```
-The proxy nodes have `proxy` in the name. Let's choose one and get a command
+The proxy nodes have `proxy` in the name. Choose one, and get a command
prompt in it:
```shell
kubectl exec -n my-namespace -ti location-a-proxy-0 -- bash
```
-You should now have a bash session open with the proxy pod. The `pgd` command
+You now have a bash session open with the proxy pod. The `pgd` command
is available:
```shell
@@ -91,37 +91,37 @@ location-a-3 1403922770 location-a data ACTIVE ACTIVE Up 3
## Accessing the database
-In the [use cases document](use_cases.md) you can find a discussion on using the
-database within the Kubernetes cluster vs. from outside, and in the
-[connectivity document](connectivity.md), you can find a discussion on services,
+In [Use cases](use_cases.md) is a discussion on using the
+database within the Kubernetes cluster versus from outside. In
+[Connectivity](connectivity.md), you can find a discussion on services,
which is relevant for accessing the database from applications.
-However you implement your system, your applications should use the proxy
-service to connect, in order to reap the benefits of Postgres Distributed, and
+However you implement your system, your applications must use the proxy
+service to connect to reap the benefits of PGD and
of the increased self-healing capabilities added by the EDB Postgres Distributed
for Kubernetes operator.
!!! Important
- Note that, as per the EDB Postgres for Kubernetes defaults, data nodes are
- created with a database called `app`, owned by a user named `app`, in
- contrast to the `bdrdb` database you'll find in the EDB Postgres
- Distributed documentation. This
- is configurable by the user, in the `cnp` section of the manifest.
- See the [EDB Postgres for Kubernetes bootstrapping document](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/bootstrap/)
- for reference.
+ As per the EDB Postgres for Kubernetes defaults, data nodes are
+ created with a database called `app` and owned by a user named `app`, in
+ contrast to the `bdrdb` database described in the EDB Postgres
+ Distributed documentation. You can configure these values
+ in the `cnp` section of the manifest.
+ For reference, see [Bootstrap](/postgres_for_kubernetes/latest/bootstrap/) in the EDB Postgres for Kubernetes
+ documentation.
-You may, however, want access to your PGD data nodes for administrative tasks,
-using the `psql` CLI.
+You might, however, want access to your PGD data nodes for administrative tasks,
+using the psql CLI.
-As we did in the previous section on using the PGD CLI, we can get a pod listing
-for our PGD cluster, and `kubectl exec` into a data node:
+You can get a pod listing
+for your PGD cluster and `kubectl exec` into a data node:
```shell
kubectl exec -n my-namespace -ti location-a-1-1 -- psql
```
-In the familiar territory of `psql`, you should remember that the default
-created database is named `app` (see warning above).
+In the familiar territory of psql, remember that the default
+created database is named `app` (see previous warning).
```terminal
postgres=# \c app
@@ -139,10 +139,10 @@ peer_target_state_name | ACTIVE
<- snipped ->
```
-For your applications, of course, you should use the non-privileged role (`app`
+For your applications, use the non-privileged role (`app`
by default).
-You will need the user credentials, which are stored in a Kubernetes secret:
+You need the user credentials, which are stored in a Kubernetes secret:
```shell
kubectl get secrets
@@ -152,7 +152,7 @@ NAME TYPE DATA AGE
location-a-app kubernetes.io/basic-auth 2 2h
```
-This secret contains the username and password needed for the postgres DSN,
+This secret contains the username and password needed for the Postgres DSN,
encoded in base64:
```shell
From 41aa8ec3cfbbae059071e782c57b4c88b6d75257 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Fri, 16 Feb 2024 02:05:38 +0000
Subject: [PATCH 09/39] Standardize release notes
---
.../1_0_rel_notes.mdx} | 8 ++------
.../1/rel_notes/index.mdx | 16 ++++++++++++++++
2 files changed, 18 insertions(+), 6 deletions(-)
rename product_docs/docs/postgres_distributed_for_kubernetes/1/{release_notes.mdx => rel_notes/1_0_rel_notes.mdx} (87%)
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/release_notes.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
similarity index 87%
rename from product_docs/docs/postgres_distributed_for_kubernetes/1/release_notes.mdx
rename to product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
index c792c26aacb..00d889baad9 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/release_notes.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
@@ -1,12 +1,8 @@
---
-title: 'Release notes'
-originalFilePath: 'src/release_notes.md'
+title: 'EDB Postgres Distributed for Kubernetes 1.0 release notes'
+navTitle: "Version 1.0"
---
-History of user-visible changes for EDB Postgres Distributed for Kubernetes.
-
-## Version 1.0.0
-
**Release date:** 15 February 2024
This is the first major stable release of EDB Postgres Distributed for
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
new file mode 100644
index 00000000000..703df64f7cf
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
@@ -0,0 +1,16 @@
+---
+title: EDB Postgres for Kubernetes Release notes
+navTitle: "Release notes"
+redirects:
+- ../release_notes
+navigation:
+- 1_0_rel_notes
+---
+
+The EDB Postgres Distributed for Kubernetes documentation describes the major version of EDB Postgres Distributed for Kubernetes, including minor releases and patches. The release notes provide information on what is new in each release. For new functionality introduced in a minor or patch release, the content also indicates the release that introduced the feature.
+
+| Version | Release date |
+| -------------------------- | ------------ |
+| [1.0.0](1_0_rel_notes) | 15 Feb 2024 |
+
+
From 6bcd0f62a4acbdfc33a1b629232dd1de41575665 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Wed, 20 Mar 2024 05:01:39 +0000
Subject: [PATCH 10/39] DOCS-192: link fixes
---
.../docs/postgres_distributed_for_kubernetes/1/connectivity.mdx | 2 +-
.../1/installation_upgrade.mdx | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
index 0c112a9338e..47ad4138964 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
@@ -34,7 +34,7 @@ Each service is generated from a customizable template in the `.spec.connectivit
section of the manifest.
All services must be reachable using their FQDN
-from all the PGD nodes in all the Kubernetes clusters. See [Domain names resolution](#domain-names-resolutions).
+from all the PGD nodes in all the Kubernetes clusters. See [Domain names resolution](#domain-names-resolution).
EDB Postgres Distributed for Kubernetes provides a service templating framework that gives you the
availability to easily customize services at the following three levels:
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
index 6b34d067b3c..a9cb2dcfc5d 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
@@ -63,7 +63,7 @@ kubectl apply -f \
With the operators and a self-signed cert issuer deployed, you can start
creating PGD clusters. See the
-[Quick start](quickstart.md#part-3-deploy-a-pgd-cluster) for an example.
+[Quick start](quickstart.md#part-3---deploy-a-pgd-cluster) for an example.
## Red Hat OpenShift
From 27f4da95ce1496fbc42933c728d4983561b3d4fd Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Mon, 8 Apr 2024 13:29:57 +0200
Subject: [PATCH 11/39] Known Issues
---
.../1/index.mdx | 1 +
.../1/known_issues.mdx | 21 +++++++++++++++++++
2 files changed, 22 insertions(+)
create mode 100644 product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
index ac041c7b3b7..009e13027cf 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/index.mdx
@@ -28,6 +28,7 @@ navigation:
- samples
- pg4k-pgd.v1beta1
- supported_versions
+ - known_issues
directoryDefaults:
iconName: logos/KubernetesMono
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
new file mode 100644
index 00000000000..94f529dea26
--- /dev/null
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -0,0 +1,21 @@
+---
+title: 'Known issues'
+---
+
+This is a currently known issue in EDB Postgres Distributed for Kubernetes.
+
+## Backup
+
+- To configure an EDB Postgres Distributed for Kubernetes environment, you must apply a `Cluster` YAML object to each Kubernetes cluster,
+ which then creates all necessary services for the implementation of a distributed architecture.
+
+ If you have added a `spec.backup` section to this section with the goal of setting up a backup configuration, the backup will fail
+ unless you also set the `spec.backup.cron.schedule` value.
+
+ Error output:
+
+ ```
+ The PGDGroup "region-a" is invalid: spec.backup.cron.schedule: Invalid value: "": Empty spec string
+ ```
+
+ To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements.
\ No newline at end of file
From 1dd17d32aed473a352845809baf338ac1f0f7640 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Mon, 8 Apr 2024 16:09:01 +0200
Subject: [PATCH 12/39] fixing format
---
.../1/known_issues.mdx | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
index 94f529dea26..155bbba147c 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -6,16 +6,16 @@ This is a currently known issue in EDB Postgres Distributed for Kubernetes.
## Backup
-- To configure an EDB Postgres Distributed for Kubernetes environment, you must apply a `Cluster` YAML object to each Kubernetes cluster,
- which then creates all necessary services for the implementation of a distributed architecture.
+- To configure an EDB Postgres Distributed for Kubernetes environment, you must apply a `Cluster` YAML object to each Kubernetes cluster,
+ which then creates all necessary services for the implementation of a distributed architecture.
- If you have added a `spec.backup` section to this section with the goal of setting up a backup configuration, the backup will fail
- unless you also set the `spec.backup.cron.schedule` value.
+ If you have added a `spec.backup` section to this section with the goal of setting up a backup configuration, the backup will fail
+ unless you also set the `spec.backup.cron.schedule` value.
- Error output:
+ Error output:
- ```
- The PGDGroup "region-a" is invalid: spec.backup.cron.schedule: Invalid value: "": Empty spec string
- ```
+ ```
+ The PGDGroup "region-a" is invalid: spec.backup.cron.schedule: Invalid value: "": Empty spec string
+ ```
- To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements.
\ No newline at end of file
+ To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements.
\ No newline at end of file
From 638fd17c0e553053865f7ce15907fba14ed8ed9b Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Mon, 8 Apr 2024 16:27:58 +0200
Subject: [PATCH 13/39] Update known_issues.mdx
---
.../postgres_distributed_for_kubernetes/1/known_issues.mdx | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
index 155bbba147c..139a1d7e958 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -9,7 +9,7 @@ This is a currently known issue in EDB Postgres Distributed for Kubernetes.
- To configure an EDB Postgres Distributed for Kubernetes environment, you must apply a `Cluster` YAML object to each Kubernetes cluster,
which then creates all necessary services for the implementation of a distributed architecture.
- If you have added a `spec.backup` section to this section with the goal of setting up a backup configuration, the backup will fail
+ If you have added a `spec.backup` section to this `Cluster` object with the goal of setting up a backup configuration, the backup will fail
unless you also set the `spec.backup.cron.schedule` value.
Error output:
@@ -18,4 +18,4 @@ This is a currently known issue in EDB Postgres Distributed for Kubernetes.
The PGDGroup "region-a" is invalid: spec.backup.cron.schedule: Invalid value: "": Empty spec string
```
- To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements.
\ No newline at end of file
+ To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements.
From 949838cc976386d4d2c22688d7fff127c7f1f952 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Wed, 10 Apr 2024 09:57:36 +0200
Subject: [PATCH 14/39] Update known_issues.mdx
removing bullet point as there is only one issue.
---
.../1/known_issues.mdx | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
index 139a1d7e958..247f1dc9fb6 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -6,16 +6,16 @@ This is a currently known issue in EDB Postgres Distributed for Kubernetes.
## Backup
-- To configure an EDB Postgres Distributed for Kubernetes environment, you must apply a `Cluster` YAML object to each Kubernetes cluster,
- which then creates all necessary services for the implementation of a distributed architecture.
+To configure an EDB Postgres Distributed for Kubernetes environment, you must apply a `Cluster` YAML object to each Kubernetes cluster,
+which then creates all necessary services for the implementation of a distributed architecture.
- If you have added a `spec.backup` section to this `Cluster` object with the goal of setting up a backup configuration, the backup will fail
- unless you also set the `spec.backup.cron.schedule` value.
+If you have added a `spec.backup` section to this `Cluster` object with the goal of setting up a backup configuration, the backup will fail
+unless you also set the `spec.backup.cron.schedule` value.
- Error output:
+Error output:
- ```
- The PGDGroup "region-a" is invalid: spec.backup.cron.schedule: Invalid value: "": Empty spec string
- ```
+```
+The PGDGroup "region-a" is invalid: spec.backup.cron.schedule: Invalid value: "": Empty spec string
+```
- To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements.
+To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements.
From a61b6ea6a7e2f4a559cf4986fdc320e9051cd8f4 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Mon, 15 Apr 2024 10:13:01 +0200
Subject: [PATCH 15/39] adding new known issues for DOCS-363 and DOCS-364
---
.../1/installation_upgrade.mdx | 2 +-
.../1/known_issues.mdx | 25 ++++++++++++++++---
2 files changed, 23 insertions(+), 4 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
index a9cb2dcfc5d..58770407271 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/installation_upgrade.mdx
@@ -1,5 +1,5 @@
---
-title: 'Installation and upgrades'
+title: 'Installation'
originalFilePath: 'src/installation_upgrade.md'
---
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
index 247f1dc9fb6..b4bfef5f229 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -2,7 +2,12 @@
title: 'Known issues'
---
-This is a currently known issue in EDB Postgres Distributed for Kubernetes.
+These known issues and limitations are in the current release of EDB Postgres Distributed for Kubernetes.
+
+## Upgrades
+
+This version of EDB Postgres Distributed for Kubernetes does **not support** migrating from existing Postgres databases, nor upgrading
+any of its components to more recent image versions.
## Backup
@@ -12,10 +17,24 @@ which then creates all necessary services for the implementation of a distribute
If you have added a `spec.backup` section to this `Cluster` object with the goal of setting up a backup configuration, the backup will fail
unless you also set the `spec.backup.cron.schedule` value.
-Error output:
+Error output example:
```
The PGDGroup "region-a" is invalid: spec.backup.cron.schedule: Invalid value: "": Empty spec string
```
-To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements.
+!!! Note Workaround
+ To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements.
+
+## Connectivity with PgBouncer
+
+EDB Postgres Distributed for Kubernetes does not support using PgBouncer to manage and pool client connection requests. This applies to
+both the open-source and EDB versions of PgBouncer.
+
+## Known issues in EDB Postgres Distributed
+
+All issues known for the EDB Postgres Distributed version that you include in your deployment also affect your EDB Postgres Distributed
+for Kubernetes instance.
+
+For example, if the EDB Postgres Distributed version you are using is 5.x, your EDB Postgres Distributed for Kubernetes instance will be affected by
+[these 5.x known issues](/pgd/latest/known_issues/).
\ No newline at end of file
From 9336583595cf1745ac34afb9ac9d36627beddab3 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Tue, 16 Apr 2024 14:00:09 +0200
Subject: [PATCH 16/39] tech review feedback
---
.../1/known_issues.mdx | 32 +++++++++++++++----
1 file changed, 25 insertions(+), 7 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
index b4bfef5f229..15a3fb5696a 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -9,12 +9,12 @@ These known issues and limitations are in the current release of EDB Postgres Di
This version of EDB Postgres Distributed for Kubernetes does **not support** migrating from existing Postgres databases, nor upgrading
any of its components to more recent image versions.
-## Backup
+## Backup operations
To configure an EDB Postgres Distributed for Kubernetes environment, you must apply a `Cluster` YAML object to each Kubernetes cluster,
which then creates all necessary services for the implementation of a distributed architecture.
-If you have added a `spec.backup` section to this `Cluster` object with the goal of setting up a backup configuration, the backup will fail
+If you have added a `spec.backup` section to this `PGDGroup` object with the goal of setting up a backup configuration, the backup will fail
unless you also set the `spec.backup.cron.schedule` value.
Error output example:
@@ -24,17 +24,35 @@ The PGDGroup "region-a" is invalid: spec.backup.cron.schedule: Invalid value: ""
```
!!! Note Workaround
- To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements.
+ To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements, for example:
+
+ ```
+ spec:
+ instances: 3
+ proxyInstances: 2
+ pgd:
+ parentGroup:
+ create: true
+ name: world
+ backup:
+ configuration:
+ barmanObjectStore:
+ ...
+ cron:
+ suspend: false
+ immediate: true
+ schedule: "0 */5 * * * *"
+ ```
## Connectivity with PgBouncer
-EDB Postgres Distributed for Kubernetes does not support using PgBouncer to manage and pool client connection requests. This applies to
+EDB Postgres Distributed for Kubernetes does not support using PgBouncer to pool client connection requests. This applies to
both the open-source and EDB versions of PgBouncer.
-## Known issues in EDB Postgres Distributed
+## Known issues and limitations in EDB Postgres Distributed
-All issues known for the EDB Postgres Distributed version that you include in your deployment also affect your EDB Postgres Distributed
+All issues and limitations known for the EDB Postgres Distributed version that you include in your deployment also affect your EDB Postgres Distributed
for Kubernetes instance.
For example, if the EDB Postgres Distributed version you are using is 5.x, your EDB Postgres Distributed for Kubernetes instance will be affected by
-[these 5.x known issues](/pgd/latest/known_issues/).
\ No newline at end of file
+these [5.x known issues](/pgd/latest/known_issues/) and [5.x limitations](/pgd/latest/limitations/).
\ No newline at end of file
From ae1624af3027309fb7f7acf95287637481f93cba Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Tue, 16 Apr 2024 15:07:19 +0200
Subject: [PATCH 17/39] adding section for DOCS-371 + feedback implementation
---
.../1/known_issues.mdx | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
index 15a3fb5696a..ae215a2aa53 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -1,5 +1,5 @@
---
-title: 'Known issues'
+title: 'Known issues and limitations'
---
These known issues and limitations are in the current release of EDB Postgres Distributed for Kubernetes.
@@ -9,6 +9,16 @@ These known issues and limitations are in the current release of EDB Postgres Di
This version of EDB Postgres Distributed for Kubernetes does **not support** migrating from existing Postgres databases, nor upgrading
any of its components to more recent image versions.
+## Data encryption
+
+EDB Postgres Distributed for Kubernetes does not support enabling [Transparent Data Encryption](/tde/latest/) (TDE) to encrypt user data of EDB Postgres Advanced
+and EDB Postgres Extended databases.
+
+## Connectivity with PgBouncer
+
+EDB Postgres Distributed for Kubernetes does not support using [PgBouncer](/pgbouncer/latest/) to pool client connection requests.
+This applies to both the open-source and EDB versions of PgBouncer.
+
## Backup operations
To configure an EDB Postgres Distributed for Kubernetes environment, you must apply a `Cluster` YAML object to each Kubernetes cluster,
@@ -44,11 +54,6 @@ The PGDGroup "region-a" is invalid: spec.backup.cron.schedule: Invalid value: ""
schedule: "0 */5 * * * *"
```
-## Connectivity with PgBouncer
-
-EDB Postgres Distributed for Kubernetes does not support using PgBouncer to pool client connection requests. This applies to
-both the open-source and EDB versions of PgBouncer.
-
## Known issues and limitations in EDB Postgres Distributed
All issues and limitations known for the EDB Postgres Distributed version that you include in your deployment also affect your EDB Postgres Distributed
From f428776f2a8fddb5b91c491cec3318da44d0b83d Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Tue, 16 Apr 2024 18:45:12 +0200
Subject: [PATCH 18/39] technical review
---
.../1/known_issues.mdx | 50 +++++++++----------
1 file changed, 23 insertions(+), 27 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
index ae215a2aa53..35746b81c66 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -6,14 +6,9 @@ These known issues and limitations are in the current release of EDB Postgres Di
## Upgrades
-This version of EDB Postgres Distributed for Kubernetes does **not support** migrating from existing Postgres databases, nor upgrading
+This version of EDB Postgres Distributed for Kubernetes ** does not support** migrating from existing Postgres databases, nor upgrading
any of its components to more recent image versions.
-## Data encryption
-
-EDB Postgres Distributed for Kubernetes does not support enabling [Transparent Data Encryption](/tde/latest/) (TDE) to encrypt user data of EDB Postgres Advanced
-and EDB Postgres Extended databases.
-
## Connectivity with PgBouncer
EDB Postgres Distributed for Kubernetes does not support using [PgBouncer](/pgbouncer/latest/) to pool client connection requests.
@@ -21,7 +16,7 @@ This applies to both the open-source and EDB versions of PgBouncer.
## Backup operations
-To configure an EDB Postgres Distributed for Kubernetes environment, you must apply a `Cluster` YAML object to each Kubernetes cluster,
+To configure an EDB Postgres Distributed for Kubernetes environment, you must apply a `PGDGroup` YAML object to each Kubernetes cluster,
which then creates all necessary services for the implementation of a distributed architecture.
If you have added a `spec.backup` section to this `PGDGroup` object with the goal of setting up a backup configuration, the backup will fail
@@ -33,26 +28,27 @@ Error output example:
The PGDGroup "region-a" is invalid: spec.backup.cron.schedule: Invalid value: "": Empty spec string
```
-!!! Note Workaround
- To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements, for example:
-
- ```
- spec:
- instances: 3
- proxyInstances: 2
- pgd:
- parentGroup:
- create: true
- name: world
- backup:
- configuration:
- barmanObjectStore:
- ...
- cron:
- suspend: false
- immediate: true
- schedule: "0 */5 * * * *"
- ```
+### Workaround
+
+To work around this issue, add a `spec.backup.cron.schedule` section with a schedule that meets your requirements, for example:
+
+```yaml
+spec:
+ instances: 3
+ proxyInstances: 2
+ pgd:
+ parentGroup:
+ create: true
+ name: world
+ backup:
+ configuration:
+ barmanObjectStore:
+ ...
+ cron:
+ suspend: false
+ immediate: true
+ schedule: "0 */5 * * * *"
+ ```
## Known issues and limitations in EDB Postgres Distributed
From 6c5f8149bd3b1aea4d0ccbb4a24973aff06eb689 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Mon, 22 Apr 2024 08:24:23 +0200
Subject: [PATCH 19/39] Corrected typos on known_issues.mdx
---
.../postgres_distributed_for_kubernetes/1/known_issues.mdx | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
index 35746b81c66..b934d89e18a 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -6,7 +6,7 @@ These known issues and limitations are in the current release of EDB Postgres Di
## Upgrades
-This version of EDB Postgres Distributed for Kubernetes ** does not support** migrating from existing Postgres databases, nor upgrading
+This version of EDB Postgres Distributed for Kubernetes **does not support** migrating from existing Postgres databases, nor upgrading
any of its components to more recent image versions.
## Connectivity with PgBouncer
@@ -48,7 +48,7 @@ spec:
suspend: false
immediate: true
schedule: "0 */5 * * * *"
- ```
+```
## Known issues and limitations in EDB Postgres Distributed
@@ -56,4 +56,4 @@ All issues and limitations known for the EDB Postgres Distributed version that y
for Kubernetes instance.
For example, if the EDB Postgres Distributed version you are using is 5.x, your EDB Postgres Distributed for Kubernetes instance will be affected by
-these [5.x known issues](/pgd/latest/known_issues/) and [5.x limitations](/pgd/latest/limitations/).
\ No newline at end of file
+these [5.x known issues](/pgd/latest/known_issues/) and [5.x limitations](/pgd/latest/limitations/).
From f2664c4a0085633654cb938d91c32b996ded17b3 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Mon, 22 Apr 2024 08:25:32 +0200
Subject: [PATCH 20/39] Update known_issues.mdx
---
.../postgres_distributed_for_kubernetes/1/known_issues.mdx | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
index b934d89e18a..05c04a344a8 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -19,8 +19,8 @@ This applies to both the open-source and EDB versions of PgBouncer.
To configure an EDB Postgres Distributed for Kubernetes environment, you must apply a `PGDGroup` YAML object to each Kubernetes cluster,
which then creates all necessary services for the implementation of a distributed architecture.
-If you have added a `spec.backup` section to this `PGDGroup` object with the goal of setting up a backup configuration, the backup will fail
-unless you also set the `spec.backup.cron.schedule` value.
+If you have added a `spec.backup` section to this `PGDGroup` object with the goal of setting up a backup configuration,
+the backup will fail unless you also set the `spec.backup.cron.schedule` value.
Error output example:
From df0d1ec0849a98c1f9c19bb3830e01d1320923f7 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Mon, 22 Apr 2024 08:26:17 +0200
Subject: [PATCH 21/39] Update known_issues.mdx
---
.../1/known_issues.mdx | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
index 05c04a344a8..71f0a228324 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -52,8 +52,8 @@ spec:
## Known issues and limitations in EDB Postgres Distributed
-All issues and limitations known for the EDB Postgres Distributed version that you include in your deployment also affect your EDB Postgres Distributed
-for Kubernetes instance.
+All issues and limitations known for the EDB Postgres Distributed version that you include in your deployment also affect
+your EDB Postgres Distributed for Kubernetes instance.
-For example, if the EDB Postgres Distributed version you are using is 5.x, your EDB Postgres Distributed for Kubernetes instance will be affected by
-these [5.x known issues](/pgd/latest/known_issues/) and [5.x limitations](/pgd/latest/limitations/).
+For example, if the EDB Postgres Distributed version you are using is 5.x, your EDB Postgres Distributed for Kubernetes
+instance will be affected by these [5.x known issues](/pgd/latest/known_issues/) and [5.x limitations](/pgd/latest/limitations/).
From 2319b7aeb1a900b715aca911612e8346517133b1 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Mon, 22 Apr 2024 15:44:58 +0200
Subject: [PATCH 22/39] Implementing feedback on upgrades from Jaime
---
.../1/known_issues.mdx | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
index 71f0a228324..f15f968f9a9 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/known_issues.mdx
@@ -4,10 +4,13 @@ title: 'Known issues and limitations'
These known issues and limitations are in the current release of EDB Postgres Distributed for Kubernetes.
-## Upgrades
+## Postgres major version upgrades
-This version of EDB Postgres Distributed for Kubernetes **does not support** migrating from existing Postgres databases, nor upgrading
-any of its components to more recent image versions.
+This version of EDB Postgres Distributed for Kubernetes **doesn't support** major version upgrades of Postgres.
+
+## Data migration
+
+This version of EDB Postgres Distributed for Kubernetes **doesn't support** migrating from existing Postgres databases.
## Connectivity with PgBouncer
From c9bc2b3c559c861019a7eb5f3e3a48a4b84e460b Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Mon, 22 Apr 2024 17:45:14 +0200
Subject: [PATCH 23/39] initial draft of RN notes with teams style guide
---
.../1/rel_notes/1_0_rel_notes.mdx | 58 +++++++++++--------
1 file changed, 33 insertions(+), 25 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
index 00d889baad9..525c6b506ea 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
@@ -1,28 +1,36 @@
---
-title: 'EDB Postgres Distributed for Kubernetes 1.0 release notes'
-navTitle: "Version 1.0"
+title: 'EDB Postgres Distributed for Kubernetes 1.0.0 release notes'
+navTitle: "Version 1.0.0"
---
-**Release date:** 15 February 2024
-
-This is the first major stable release of EDB Postgres Distributed for
-Kubernetes, a Kubernetes operator to deploy and manage
-EDB Postgres Distributed clusters.
-
-The operator implements the `PGDGroup` custom resource
-in the API group `pgd.k8s.enterprisedb.io`.
-This resource can be used to create and manage EDB Postgres Distributed clusters
-inside Kubernetes with capabilities including:
-
-- Deployment of EDB Postgres Distributed clusters with versions 5 and later
-- Additional self-healing capability on top of that of Postgres Distributed,
- such as recovery and restart of failed PGD nodes
-- Definition of the services to connect applications to the
- write leader of each PGD group
-- Implementation of Raft subgroups
-- Support for Local Persistent Volumes with PVC templates
-- Reuse of Persistent Volumes storage in Pods
-- TLS connections and client certificate authentication
-- Continuous backup to an S3 compatible object store
-- Pause and resume a PGD cluster, saving computational resources by temporarily
- removing database pods while keeping the database PVCs.
+Released: 24 Apr 2024
+
+This is the first major stable release of EDB Postgres Distributed for Kubernetes, a Kubernetes operator to deploy
+and manage EDB Postgres Distributed clusters.
+
+## Highlights of EDB Postgres Distributed for Kubernetes 1.0.0
+
+The operator implements the `PGDGroup` custom resource in the API group `pgd.k8s.enterprisedb.io`. You can use this resource
+to create and manage EDB Postgres Distributed clusters inside Kubernetes with capabilities including:
+
+* Deployment of EDB Postgres Distributed clusters with versions 5 and later.
+* Additional self-healing capability on top of that of Postgres Distributed, such as recovery and restart of failed PGD nodes.
+* Definition of the services to connect applications to the write leader of each PGD group.
+
+!!! Note
+The EDB Postgres Distributed for Kubernetes operator leverages
+[EDB Postgres for Kubernetes](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/) (PG4K) and inherits many
+of that project's capabilities. EDB Postgres Distributed for Kubernetes version 1.0.0 is based, specifically, on release 1.22 of PG4K.
+Please refer to the [PG4K release notes](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/rel_notes/) for more details.
+!!!
+
+## Features
+
+| Component | Description |
+|-----------|----------------------------------------------------------------------------------------------|
+| PGD4K | Deployment of EDB Postgres Distributed clusters with versions 5 and later inside Kubernetes. |
+| PGD4K | Self-healing capabilities such as recovery and restart of failed PGD nodes. |
+| PGD4K | Definition of the services to connect applications to the write leader of each PGD group. |
+| PGD4K | Implementation of Raft subgroups. |
+| PGD4K | TLS connections and client certificate authentication. |
+| PGD4K | Continuous backup to an S3 compatible object store. |
From 5fe80de5232c9552baa8d51eba56c459f5b7face Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Mon, 22 Apr 2024 19:11:18 +0200
Subject: [PATCH 24/39] Clarification of app services
Co-authored-by: Dj Walker-Morgan <126472455+djw-m@users.noreply.github.com>
---
.../1/rel_notes/1_0_rel_notes.mdx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
index 525c6b506ea..180b56d2b02 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
@@ -15,7 +15,7 @@ to create and manage EDB Postgres Distributed clusters inside Kubernetes with ca
* Deployment of EDB Postgres Distributed clusters with versions 5 and later.
* Additional self-healing capability on top of that of Postgres Distributed, such as recovery and restart of failed PGD nodes.
-* Definition of the services to connect applications to the write leader of each PGD group.
+* Defined services that allow applications to connect to the write leader of each PGD group.
!!! Note
The EDB Postgres Distributed for Kubernetes operator leverages
From 12b493568d80bb4b0f829a05db33c31cec7f7e18 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Mon, 22 Apr 2024 19:26:43 +0200
Subject: [PATCH 25/39] implementing feedback from review and updating release
date on index page
---
.../1/rel_notes/1_0_rel_notes.mdx | 12 ++++++------
.../1/rel_notes/index.mdx | 2 +-
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
index 180b56d2b02..77bdcc373e7 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
@@ -28,9 +28,9 @@ Please refer to the [PG4K release notes](https://www.enterprisedb.com/docs/postg
| Component | Description |
|-----------|----------------------------------------------------------------------------------------------|
-| PGD4K | Deployment of EDB Postgres Distributed clusters with versions 5 and later inside Kubernetes. |
-| PGD4K | Self-healing capabilities such as recovery and restart of failed PGD nodes. |
-| PGD4K | Definition of the services to connect applications to the write leader of each PGD group. |
-| PGD4K | Implementation of Raft subgroups. |
-| PGD4K | TLS connections and client certificate authentication. |
-| PGD4K | Continuous backup to an S3 compatible object store. |
+| PGD-K | Deployment of EDB Postgres Distributed clusters with versions 5 and later inside Kubernetes. |
+| PGD-K | Self-healing capabilities such as recovery and restart of failed PGD nodes. |
+| PGD-K | Defined services that allow applications to connect to the write leader of each PGD group. |
+| PGD-K | Implementation of Raft subgroups. |
+| PGD-K | TLS connections and client certificate authentication. |
+| PGD-K | Continuous backup to an S3 compatible object store. |
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
index 703df64f7cf..c68af0023ee 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
@@ -11,6 +11,6 @@ The EDB Postgres Distributed for Kubernetes documentation describes the major ve
| Version | Release date |
| -------------------------- | ------------ |
-| [1.0.0](1_0_rel_notes) | 15 Feb 2024 |
+| [1.0.0](1_0_rel_notes) | 24 Apr 2024 |
From bc13cd1bc8b02e3e73945dae1b4ef1d051e05fc2 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Wed, 3 Apr 2024 11:14:27 +0200
Subject: [PATCH 26/39] Submariner for distros with multiple AZ
---
.../postgres_distributed_for_kubernetes/1/architecture.mdx | 6 ++++++
.../postgres_distributed_for_kubernetes/1/connectivity.mdx | 2 +-
.../postgres_distributed_for_kubernetes/1/quickstart.mdx | 6 ++++++
3 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
index 5aababc156c..4096cb510e1 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
@@ -192,3 +192,9 @@ reliably communicate with each other.
can be realized on multiple Kubernetes clusters that meet the connectivity
requirements.
More information can be found in the ["Connectivity"](connectivity.md) section.
+
+!!! NoteAvailability zones
+ When creating Kubernetes clusters in different availability zones for cross-regional replication,
+ ensure the clusters can communicate with each other by enabling network connectivity. To ensure
+ pods and services are reachable by all Kubernetes clusters, deploy a network connectivity application like
+ [Submariner](https://submariner.io/) on every cluster.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
index 47ad4138964..d9bab8509df 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
@@ -10,7 +10,7 @@ PGD cluster includes:
- [Domain names resolution](#domain-names-resolution) using fully qualified domain names (FQDN)
- [TLS configuration](#tls-configuration)
-!!! Notice
+!!! NoteNote
Although these topics might seem unrelated to each other, they all
participate in the configuration of the PGD resources to make them universally
identifiable and accessible over a secure network.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
index 03b9b3a5b39..d086150bd99 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
@@ -54,6 +54,12 @@ contains the definition of a PGD cluster with two data groups and a global
witness node spread across three regions. Each data group consists of two data nodes
and a local witness node.
+!!! NoteAvailability zones
+ When creating Kubernetes clusters in different availability zones for cross-regional replication,
+ ensure the clusters can communicate with each other by enabling network connectivity. To ensure
+ pods and services are reachable by all Kubernetes clusters, deploy a network connectivity application like
+ [Submariner](https://submariner.io/) on every cluster.
+
!!! SeeAlso "Further reading"
For more details about the available options, see
the ["API Reference" section](pg4k-pgd.v1beta1.md).
From 4fdfc8c4bc9a4188fa3d9bc842a717d1984a9cd0 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Wed, 3 Apr 2024 12:31:44 +0200
Subject: [PATCH 27/39] Update connectivity.mdx
---
.../docs/postgres_distributed_for_kubernetes/1/connectivity.mdx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
index d9bab8509df..93655b3e57a 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
@@ -10,7 +10,7 @@ PGD cluster includes:
- [Domain names resolution](#domain-names-resolution) using fully qualified domain names (FQDN)
- [TLS configuration](#tls-configuration)
-!!! NoteNote
+!!! NoteNotice
Although these topics might seem unrelated to each other, they all
participate in the configuration of the PGD resources to make them universally
identifiable and accessible over a secure network.
From 1d6b99d4ee34b01a8cd9a9313ac3484b733baf0b Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Wed, 3 Apr 2024 15:33:00 +0200
Subject: [PATCH 28/39] Apply suggestions from code review
Committing John's suggestions
Co-authored-by: John Long
---
.../postgres_distributed_for_kubernetes/1/architecture.mdx | 3 +--
.../docs/postgres_distributed_for_kubernetes/1/quickstart.mdx | 3 +--
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
index 4096cb510e1..b59a68009b4 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
@@ -195,6 +195,5 @@ More information can be found in the ["Connectivity"](connectivity.md) section.
!!! NoteAvailability zones
When creating Kubernetes clusters in different availability zones for cross-regional replication,
- ensure the clusters can communicate with each other by enabling network connectivity. To ensure
- pods and services are reachable by all Kubernetes clusters, deploy a network connectivity application like
+ ensure the clusters can communicate with each other by enabling network connectivity. Specifically, every service created with a `-node` or `-group` suffix must be discoverable by all other `-node` and `-group` services. This can be achieved by deploying a network connectivity application like
[Submariner](https://submariner.io/) on every cluster.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
index d086150bd99..8cbf283db2b 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
@@ -56,8 +56,7 @@ and a local witness node.
!!! NoteAvailability zones
When creating Kubernetes clusters in different availability zones for cross-regional replication,
- ensure the clusters can communicate with each other by enabling network connectivity. To ensure
- pods and services are reachable by all Kubernetes clusters, deploy a network connectivity application like
+ ensure the clusters can communicate with each other by enabling network connectivity. Specifically, every service created with a `-node` or `-group` suffix must be discoverable by all other `-node` and `-group` services. This can be achieved by deploying a network connectivity application like
[Submariner](https://submariner.io/) on every cluster.
!!! SeeAlso "Further reading"
From 36f936d5e680be5b432b4fd39114b86c744d3356 Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Thu, 4 Apr 2024 10:45:34 +0200
Subject: [PATCH 29/39] spaces in note titles
---
.../docs/postgres_distributed_for_kubernetes/1/architecture.mdx | 2 +-
.../docs/postgres_distributed_for_kubernetes/1/connectivity.mdx | 2 +-
.../docs/postgres_distributed_for_kubernetes/1/quickstart.mdx | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
index b59a68009b4..556da1f9774 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
@@ -193,7 +193,7 @@ can be realized on multiple Kubernetes clusters that meet the connectivity
requirements.
More information can be found in the ["Connectivity"](connectivity.md) section.
-!!! NoteAvailability zones
+!!! Note Availability zones
When creating Kubernetes clusters in different availability zones for cross-regional replication,
ensure the clusters can communicate with each other by enabling network connectivity. Specifically, every service created with a `-node` or `-group` suffix must be discoverable by all other `-node` and `-group` services. This can be achieved by deploying a network connectivity application like
[Submariner](https://submariner.io/) on every cluster.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
index 93655b3e57a..d7f57fbaa78 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx
@@ -10,7 +10,7 @@ PGD cluster includes:
- [Domain names resolution](#domain-names-resolution) using fully qualified domain names (FQDN)
- [TLS configuration](#tls-configuration)
-!!! NoteNotice
+!!! Note Notice
Although these topics might seem unrelated to each other, they all
participate in the configuration of the PGD resources to make them universally
identifiable and accessible over a secure network.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
index 8cbf283db2b..d0ad83dbe78 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
@@ -54,7 +54,7 @@ contains the definition of a PGD cluster with two data groups and a global
witness node spread across three regions. Each data group consists of two data nodes
and a local witness node.
-!!! NoteAvailability zones
+!!! Note Availability zones
When creating Kubernetes clusters in different availability zones for cross-regional replication,
ensure the clusters can communicate with each other by enabling network connectivity. Specifically, every service created with a `-node` or `-group` suffix must be discoverable by all other `-node` and `-group` services. This can be achieved by deploying a network connectivity application like
[Submariner](https://submariner.io/) on every cluster.
From 03f4a77279d45c4d0ec0d34f34ce169a5511f8ad Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Fri, 5 Apr 2024 10:41:41 +0200
Subject: [PATCH 30/39] Apply suggestions from code review
Applying feedback from Betsy.
Co-authored-by: Betsy Gitelman
---
.../docs/postgres_distributed_for_kubernetes/1/architecture.mdx | 2 +-
.../docs/postgres_distributed_for_kubernetes/1/quickstart.mdx | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
index 556da1f9774..8b4c8862c3f 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
@@ -195,5 +195,5 @@ More information can be found in the ["Connectivity"](connectivity.md) section.
!!! Note Availability zones
When creating Kubernetes clusters in different availability zones for cross-regional replication,
- ensure the clusters can communicate with each other by enabling network connectivity. Specifically, every service created with a `-node` or `-group` suffix must be discoverable by all other `-node` and `-group` services. This can be achieved by deploying a network connectivity application like
+ ensure the clusters can communicate with each other by enabling network connectivity. Specifically, every service created with a `-node` or `-group` suffix must be discoverable by all other `-node` and `-group` services. You can achieve this by deploying a network connectivity application like
[Submariner](https://submariner.io/) on every cluster.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
index d0ad83dbe78..d7e99a97dd6 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
@@ -56,7 +56,7 @@ and a local witness node.
!!! Note Availability zones
When creating Kubernetes clusters in different availability zones for cross-regional replication,
- ensure the clusters can communicate with each other by enabling network connectivity. Specifically, every service created with a `-node` or `-group` suffix must be discoverable by all other `-node` and `-group` services. This can be achieved by deploying a network connectivity application like
+ ensure the clusters can communicate with each other by enabling network connectivity. Specifically, every service created with a `-node` or `-group` suffix must be discoverable by all other `-node` and `-group` services. You can achieve this by deploying a network connectivity application like
[Submariner](https://submariner.io/) on every cluster.
!!! SeeAlso "Further reading"
From 926e6e094dc12fc4c39a3f8a0addea4e4311775b Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Fri, 5 Apr 2024 14:31:29 +0200
Subject: [PATCH 31/39] implemented Djs feedback
---
.../postgres_distributed_for_kubernetes/1/architecture.mdx | 4 ++--
.../docs/postgres_distributed_for_kubernetes/1/quickstart.mdx | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
index 8b4c8862c3f..a47a125f15a 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
@@ -193,7 +193,7 @@ can be realized on multiple Kubernetes clusters that meet the connectivity
requirements.
More information can be found in the ["Connectivity"](connectivity.md) section.
-!!! Note Availability zones
- When creating Kubernetes clusters in different availability zones for cross-regional replication,
+!!! Note Regions and availability zones
+ When creating Kubernetes clusters in different regions or availability zones for cross-regional replication,
ensure the clusters can communicate with each other by enabling network connectivity. Specifically, every service created with a `-node` or `-group` suffix must be discoverable by all other `-node` and `-group` services. You can achieve this by deploying a network connectivity application like
[Submariner](https://submariner.io/) on every cluster.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
index d7e99a97dd6..6c2533d273e 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
@@ -54,8 +54,8 @@ contains the definition of a PGD cluster with two data groups and a global
witness node spread across three regions. Each data group consists of two data nodes
and a local witness node.
-!!! Note Availability zones
- When creating Kubernetes clusters in different availability zones for cross-regional replication,
+!!! Note Regions and availability zones
+ When creating Kubernetes clusters in different regions or availability zones for cross-regional replication,
ensure the clusters can communicate with each other by enabling network connectivity. Specifically, every service created with a `-node` or `-group` suffix must be discoverable by all other `-node` and `-group` services. You can achieve this by deploying a network connectivity application like
[Submariner](https://submariner.io/) on every cluster.
From 20c38155bc87269844b69944b82b80c9dc01b6b8 Mon Sep 17 00:00:00 2001
From: Betsy Gitelman
Date: Tue, 2 Apr 2024 16:28:50 -0400
Subject: [PATCH 32/39] Editorial review of new pgd4pk content
First read of new material
---
.../1/architecture.mdx | 53 +++++++++----------
.../1/backup.mdx | 34 ++++++------
.../1/before_you_start.mdx | 32 +++++------
.../1/certificates.mdx | 29 +++++-----
.../1/pause_resume.mdx | 24 ++++-----
.../1/quickstart.mdx | 2 +-
6 files changed, 86 insertions(+), 88 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
index a47a125f15a..a0530dc8c76 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
@@ -36,8 +36,7 @@ manages each PGD node using the `Cluster` resource as defined by EDB Postgres
for Kubernetes, specifically a cluster with a single instance (that is, no
replicas).
-The single PostgreSQL instance created by each `Cluster` can be configured
-declaratively via the
+You can configure the single PostgreSQL instance created by each `Cluster` in the
[`.spec.cnp` section](pg4k-pgd.v1beta1.md#pgd-k8s-enterprisedb-io-v1beta1-CnpConfiguration)
of the PGD Group spec.
@@ -64,9 +63,9 @@ EDB Postgres Distributed for Kubernetes manages the following:
- Data nodes. A node is a database and is managed
by EDB Postgres for Kubernetes, creating a `Cluster` with a single instance.
- [Witness nodes](https://www.enterprisedb.com/docs/pgd/latest/nodes/#witness-nodes)
- are basic database instances that do not participate in data
- replication; their function is to guarantee that consensus is possible in
- groups with an even number of data nodes, or after network partitions. Witness
+ are basic database instances that don't participate in data
+ replication. Their function is to guarantee that consensus is possible in
+ groups with an even number of data nodes or after network partitions. Witness
nodes are also managed using a single-instance `Cluster` resource.
- [PGD Proxies](https://www.enterprisedb.com/docs/pgd/latest/routing/proxy/):
act as Postgres proxies with knowledge of the write leader. PGD proxies need
@@ -79,19 +78,19 @@ connect to every other node using the appropriate connection string (a
`libpq`-style DSN). Write operations don't need to be sent to every node. PGD
takes care of replicating data after it's committed to one node.
-For performance, it is often recommendable to send write operations mostly to a
+For performance, we often recommend sending write operations mostly to a
single node, the *write leader*. Raft is used to identify which node is the
-write leader, and to hold metadata about the PGD nodes. PGD Proxies are used to
-transparently route writes to write leaders, and to quickly pivot to the new
+write leader and to hold metadata about the PGD nodes. PGD proxies are used to
+transparently route writes to write leaders and to quickly pivot to the new
write leader in case of switchover or failover.
It's possible to configure *Raft subgroups*, each of which can maintain a
separate write leader. In EDB Postgres Distributed for Kubernetes, a PGD group containing a PGD proxy
comprises a Raft subgroup.
-There are two kinds of routing available with PGD Proxies:
+Two kinds of routing are available with PGD proxies:
-- Global routing uses the top-level Raft group, and maintains one global write
+- Global routing uses the top-level Raft group and maintains one global write
leader.
- Local routing uses subgroups to maintain separate write leaders. Local
routing is often used to achieve geographical separation of writes.
@@ -99,10 +98,10 @@ There are two kinds of routing available with PGD Proxies:
In EDB Postgres Distributed for Kubernetes, local routing is used by default, and a configuration option is
available to select global routing.
-You can find more information in the
+For more information, see the
[PGD documentation of routing with Raft](https://www.enterprisedb.com/docs/pgd/latest/routing/raft/).
-### PGD Architectures and High Availability
+### PGD architectures and high availability
EDB proposes several recommended architectures to make good use of PGD's
distributed multi-master capabilities and to offer high availability.
@@ -119,35 +118,34 @@ adaptations are necessary to translate PGD into the Kubernetes ecosystem.
### Images and operands
-PGD can be configured to run one of three Postgres distributions. Please refer
-to the
-[PGD documentation](https://www.enterprisedb.com/docs/pgd/latest/choosing_server/)
+You can configure PGD to run one of three Postgres distributions. See the
+[PGD documentation](/pgd/latest/choosing_server/)
to understand the features of each distribution.
To function in Kubernetes, containers are provided for each Postgres
distribution. These are the *operands*.
In addition, the operator images are kept in those same repositories.
-Please refer to [the document on registries](private_registries.md)
+See [EDB private image registries](private_registries.md)
for details on accessing the images.
### Kubernetes architecture
-We reproduce some of the points of the
-[PG4K document on Kubernetes architecture](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/architecture/),
-to which we refer you for further depth.
+Some of the points of the
+[PG4K document on Kubernetes architecture](/postgres_for_kubernetes/latest/architecture/)
+are reproduced here. See the PG4K documentation for details.
-Kubernetes natively provides the possibility to span separate physical locations
-–also known as data centers, failure zones, or more frequently **availability
-zones**– connected to each other via redundant, low-latency, private network
+Kubernetes natively provides the possibility to span separate physical locations.
+These physical locations are also known as data centers, failure zones, or, more frequently, *availability
+zones*. They are connected to each other by way of redundant, low-latency, private network
connectivity.
Being a distributed system, the recommended minimum number of availability zones
-for a **Kubernetes cluster** is three (3), in order to make the control plane
+for a *Kubernetes cluster* is three. This minimum makes the control plane
resilient to the failure of a single zone. This means that each data center is
active at any time and can run workloads simultaneously.
-EDB Postgres Distributed for Kubernetes can be installed in a
+You can install EDB Postgres Distributed for Kubernetes in a
[single Kubernetes cluster](#single-kubernetes-cluster)
or across
[multiple Kubernetes clusters](#multiple-kubernetes-clusters).
@@ -162,7 +160,7 @@ zones, considering all zones active.
![Kubernetes cluster spanning over 3 independent data centers](./images/k8s-architecture-3-az.png)
PGD clusters can be deployed in a single Kubernetes cluster and take advantage
-of Kubernetes availability zones to enable High Availability architectures,
+of Kubernetes availability zones to enable high-availability architectures,
including the Always On recommended architectures.
You can realize the *Always On Single Location* architecture shown in
@@ -176,7 +174,7 @@ to which data center) using affinity, tolerations, and node selectors, as is the
case with EDB Postgres for Kubernetes. Individual scheduling controls are available for proxies as well
as nodes.
-Please refer to the
+See the
[Kubernetes documentation on scheduling](https://kubernetes.io/docs/concepts/scheduling-eviction/),
and [Scheduling](/postgres_for_kubernetes/latest/scheduling/) in the EDB Postgres for Kubernetes documentation
for more information.
@@ -191,7 +189,8 @@ reliably communicate with each other.
[Always On multi-location PGD architectures](https://www.enterprisedb.com/docs/pgd/latest/architectures/)
can be realized on multiple Kubernetes clusters that meet the connectivity
requirements.
-More information can be found in the ["Connectivity"](connectivity.md) section.
+
+For more information, see ["Connectivity"](connectivity.md).
!!! Note Regions and availability zones
When creating Kubernetes clusters in different regions or availability zones for cross-regional replication,
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
index f3c55ce0197..550c641ca85 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
@@ -47,7 +47,7 @@ spec:
maxParallel: 8
```
-For more information, see the [EDB Postgres for Kubernetes WAL archiving](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/backup_recovery/#wal-archiving) documentation.
+For more information, see the [EDB Postgres for Kubernetes WAL archiving](/postgres_for_kubernetes/latest/backup_recovery/#wal-archiving) documentation.
## Scheduled backups
@@ -56,7 +56,7 @@ When the PGD group `spec.backup.configuration.barmanObjectStore` stanza is confi
PGD data nodes as the elected backup node for which it creates a `Scheduled Backup` resource.
The `.spec.backup.cron.schedule` field allows you to define a cron schedule specification, expressed
-in the [https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format]\(Go `cron` package format).
+in the [Go `cron` package format](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format).
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -71,26 +71,26 @@ spec:
immediate: true
```
-Scheduled Backups can be suspended if necessary by setting `.spec.backup.cron.suspend` to true. This will
-prevent any new backup from being scheduled while the option is set to true.
+You can suspend scheduled backups if necessary by setting `.spec.backup.cron.suspend` to `true`. Setting this setting
+to `true` prevents any new backup from being scheduled.
-In case you want to execute a backup as soon as the ScheduledBackup resource is created
-you can set `.spec.backup.cron.immediate` to true.
+If you want to execute a backup as soon as the ScheduledBackup resource is created,
+set `.spec.backup.cron.immediate` to `true`.
-`.spec.backupOwnerReference` indicates which ownerReference should be used
+`.spec.backupOwnerReference` indicates the ownerReference to use
in the created backup resources. The choices are:
-- *none:* no owner reference for created backup objects
-- *self:* sets the Scheduled backup object as owner of the backup
-- *cluster:* sets the cluster as owner of the backup
+- **none** — No owner reference for created backup objects.
+- **self** — Sets the `ScheduledBackup` object as owner of the backup.
+- **cluster** — Sets the cluster as owner of the backup.
!!! Note
The EDB Postgres for Kubernetes `ScheduledBackup` object contains the `cluster` option to specify the
cluster to back up. This option is currently not supported by EDB Postgres Distributed for Kubernetes and is
ignored if specified.
-In case an elected "Backup node" is deleted, the operator will transparently elect a new "Backup Node"
-and reconcile the Scheduled Backup resource accordingly.
+If an elected backup node is deleted, the operator transparently elects a new backup node
+and reconciles the `ScheduledBackup` resource accordingly.
## Retention policies
@@ -114,11 +114,11 @@ spec:
For more information, see the [EDB Postgres for Kubernetes retention policies](/postgres_for_kubernetes/latest/backup_recovery/#retention-policies) in the EDB Postgres for Kubernetes documentation.
!!! Important
- Currently, the retention policy will only be applied for the elected "Backup Node"
+ Currently, the retention policy is applied only for the elected `Backup Node`
backups and WAL files. Given that each other PGD node also archives its own WALs
- independently, it is your responsibility to manage the lifecycle of those WAL files,
+ independently, it's your responsibility to manage the lifecycle of those WAL files,
for example by leveraging the object storage data retention policy.
- Also, in case you have an object storage data retention policy set up on every PGD Node
+ Also, if you have an object storage data retention policy set up on every PGD node
directory, make sure it's not overlapping or interfering with the retention policy managed
by the operator.
@@ -132,10 +132,10 @@ supported. For more information, see the [EDB Postgres for Kubernetes compressio
It's possible to specify tags as key-value pairs for the backup objects, namely base backups, WAL files, and history files.
For more information, see the EDB Postgres for Kubernetes documentation about [tagging of backup objects](/postgres_for_kubernetes/latest/backup_recovery/#tagging-of-backup-objects).
-## On-demand backups of a PGD Node
+## On-demand backups of a PGD node
A PGD node is represented as single-instance EDB Postgres for Kubernetes `Cluster` object.
-As such, in case of need, it's possible to request an on-demand backup
+As such, if you need to, it's possible to request an on-demand backup
of a specific PGD node by creating a EDB Postgres for Kubernetes `Backup` resource.
To do that, see [EDB Postgres for Kubernetes on-demand backups](/postgres_for_kubernetes/latest/backup_recovery/#on-demand-backups) in the EDB Postgres for Kubernetes documentation.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
index 4f5dc2192df..9af02c5f388 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
@@ -1,16 +1,16 @@
---
-title: 'Before You Start'
+title: 'Before you start'
originalFilePath: 'src/before_you_start.md'
---
-Before we get started, it is essential to go over some terminology that is
+Before you get started, it's essential that you become familiar with some terminology that's
specific to Kubernetes and PGD.
## Kubernetes terminology
[Node](https://kubernetes.io/docs/concepts/architecture/nodes/)
: A *node* is a worker machine in Kubernetes, either virtual or physical, where
- all services necessary to run pods are managed by the control plane node(s).
+ all services necessary to run pods are managed by the control plane nodes.
[Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/)
: A *pod* is the smallest computing unit that can be deployed in a Kubernetes
@@ -23,26 +23,26 @@ specific to Kubernetes and PGD.
such as service discovery across applications, load balancing, and failover.
[Secret](https://kubernetes.io/docs/concepts/configuration/secret/)
-: A *secret* is an object that is designed to store small amounts of sensitive
+: A *secret* is an object that's designed to store small amounts of sensitive
data such as passwords, access keys, or tokens, and use them in pods.
-[Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/)
+[Storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/)
: A *storage class* allows an administrator to define the classes of storage in
a cluster, including provisioner (such as AWS EBS), reclaim policies, mount
options, volume expansion, and so on.
-[Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
+[Persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
: A *persistent volume* (PV) is a resource in a Kubernetes cluster that
represents storage that was either manually provisioned by an
administrator or dynamically provisioned by a *storage class* controller. A PV
- is associated with a pod using a *persistent volume claim* and its lifecycle is
+ is associated with a pod using a *persistent volume claim*, and its lifecycle is
independent of any pod that uses it. Normally, a PV is a network volume,
especially in the public cloud. A [*local persistent volume*
(LPV)](https://kubernetes.io/docs/concepts/storage/volumes/#local) is a
persistent volume that exists only on the particular node where the pod that
uses it is running.
-[Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
+[Persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
: A *persistent volume claim* (PVC) represents a request for storage, which
might include size, access mode, or a particular storage class. Similar to how
a pod consumes node resources, a PVC consumes the resources of a PV.
@@ -54,7 +54,7 @@ specific to Kubernetes and PGD.
projects, departments, teams, and so on.
[RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
-: *Role Based Access Control* (RBAC), also known as *role-based security*, is a
+: *Role-based access control* (RBAC), also known as *role-based security*, is a
method used in computer systems security to restrict access to the network and
resources of a system to authorized users only. Kubernetes has a native API to
control roles at the namespace and cluster level and associate them with
@@ -80,24 +80,24 @@ EDB Postgres Distributed for Kubernetes requires a Kubernetes version supported
## PGD terminology
For more information, see
-[Terminology](https://www.enterprisedb.com/docs/pgd/latest/terminology/) in the PGD documentation.
+[Terminology](/pgd/latest/terminology/) in the PGD documentation.
-[Node](https://www.enterprisedb.com/docs/pgd/latest/terminology/#node)
+[Node](/pgd/latest/terminology/#node)
: A PGD database instance.
-[Failover](https://www.enterprisedb.com/docs/pgd/latest/terminology/#failover)
+[Failover](/pgd/latest/terminology/#failover)
: The automated process that recognizes a failure in a highly available database cluster and takes action to connect the application to another active database.
-[Switchover](https://www.enterprisedb.com/docs/pgd/latest/terminology/#switchover)
+[Switchover](/pgd/latest/terminology/#switchover)
: A planned change in connection between the application and the active database node in a cluster, typically done for maintenance.
-[Write leader](https://www.enterprisedb.com/docs/pgd/latest/terminology/#write-leader)
+[Write leader](/pgd/latest/terminology/#write-leader)
: In always-on architectures, a node is selected as the correct connection endpoint for applications. This node is called the write leader. The write leader is selected by consensus of a quorum of proxy nodes.
## Cloud terminology
Region
-: A *region* in the Cloud is an isolated and independent geographic area
+: A *region* in the cloud is an isolated and independent geographic area
organized in *availability zones*. Zones within a region have very little
round-trip network latency.
@@ -108,6 +108,6 @@ Zone
## What to do next
-Now that you have familiarized with the terminology, you can
+Now that you are familiar with the terminology, you can
[test EDB Postgres Distributed for Kubernetes on your laptop using a local cluster](quickstart.md) before
deploying the operator in your selected cloud environment.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
index ed8486ffbe8..aeca57e2e21 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
@@ -6,25 +6,24 @@ originalFilePath: 'src/certificates.md'
EDB Postgres Distributed for Kubernetes was designed to natively support TLS certificates.
To set up an PGD cluster, each PGD node requires:
-- a server Certification Authority (CA) certificate
-- a server TLS certificate signed by the server Certification Authority
-- a client Certification Authority (CA) certificate
+- A server certification authority (CA) certificate
+- A server TLS certificate signed by the server CA
+- A client CA certificate
- a streaming replication client certificate generated by the client Certification Authority
!!! Note
- You can find all the secrets used by each PGD Node and the expiry dates in
- the Cluster (PGD Node) Status.
+ You can find all the secrets used by each PGD node and the expiry dates in
+ the cluster (PGD node) status.
-EDB Postgres Distributed for Kubernetes is very flexible when it comes to TLS certificates, and
-primarily operates in two modes:
+EDB Postgres Distributed for Kubernetes is very flexible when it comes to TLS certificates. It
+operates primarily in two modes:
-1. **operator managed**: certificates are internally
- managed by the operator in a fully automated way, and signed using a CA created
- by EDB Postgres Distributed for Kubernetes
-2. **user provided**: certificates are
+- **Operator managed** — Certificates are internally
+ managed by the operator in a fully automated way and signed using a CA created
+ by EDB Postgres Distributed for Kubernetes.
+- **User provided** — Certificates are
generated outside the operator and imported in the cluster definition as
- secrets - EDB Postgres Distributed for Kubernetes integrates itself with cert-manager (see
- examples below)
+ secrets. EDB Postgres Distributed for Kubernetes integrates itself with cert-manager.
-You can find further information in the
-[EDB Postgres for Kubernetes documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/certificates/).
+For more information, see the
+[EDB Postgres for Kubernetes documentation](/postgres_for_kubernetes/latest/certificates/).
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
index f4f6c92653a..2fe7433a97f 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
@@ -1,27 +1,27 @@
---
-title: 'Declarative Pausing and Resuming'
+title: 'Declarative pausing and resuming'
originalFilePath: 'src/pause_resume.md'
---
-The declarative Pausing and Resuming feature enables saving CPU power by removing the
-database Pods, while keeping the database PVCs.
+The *declarative pausing and resuming* feature enables saving CPU power by removing the
+database pods, while keeping the database PVCs.
-Declarative Pausing and Resuming leverages the hibernation functionality available for
-EDB Postgres for Kubernetes. For additional depth, and explanation of how
-hibernation works, we refer you to the
-[PG4K documentation on declarative hibernation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/declarative_hibernation/).
+Declarative pausing and resuming leverages the hibernation functionality available for
+EDB Postgres for Kubernetes. For additional depth and an explanation of how
+hibernation works, see the
+[Postgres for Kubernetes documentation on declarative hibernation](/postgres_for_kubernetes/latest/declarative_hibernation/).
-Pause is requested by adding the `k8s.pgd.enterprisedb.io/pause`
+Request pause by adding the `k8s.pgd.enterprisedb.io/pause`
annotation in the desired PGD Group.
-For example,
+For example:
```sh
kubectl annotate pgdgroup region-a k8s.pgd.enterprisedb.io/pause=on
```
After a few seconds, the requested PGD Group will be in paused state, with
-all the database pods removed.
+all the database pods removed:
```sh
kubectl get pgdgroups
@@ -32,8 +32,8 @@ region-b 2 1 PGDGroup - Healthy 25m
region-c 0 1 PGDGroup - Healthy 25m
```
-To resume a paused PGD Group, you can simply set the annotation to "off".
-Remember to add the `--overwrite` flag.
+To resume a paused PGD Group, set the annotation to `off`.
+Remember to add the `--overwrite` flag:
```sh
kubectl annotate pgdgroup region-a k8s.pgd.enterprisedb.io/pause=off --overwrite
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
index 6c2533d273e..d777a6e07a5 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/quickstart.mdx
@@ -75,7 +75,7 @@ You can check that the pods are being created using the `get pods` command:
kubectl get pods
```
-The pods are being created as part of PGD nodes. As described in the
+The pods are being created as part of PGD nodes. As described in
[Architecture](architecture.md), they're implemented on top
of EDB Postgres for Kubernetes clusters.
From bd826430b6e61d82416edb35f3a9dcb03bec2e13 Mon Sep 17 00:00:00 2001
From: Betsy Gitelman
Date: Thu, 4 Apr 2024 15:34:32 -0400
Subject: [PATCH 33/39] Additional editorial changes
---
.../1/before_you_start.mdx | 4 ++--
.../1/certificates.mdx | 2 +-
.../1/pause_resume.mdx | 8 ++++----
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
index 9af02c5f388..14b107878d5 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
@@ -24,7 +24,7 @@ specific to Kubernetes and PGD.
[Secret](https://kubernetes.io/docs/concepts/configuration/secret/)
: A *secret* is an object that's designed to store small amounts of sensitive
- data such as passwords, access keys, or tokens, and use them in pods.
+ data, such as passwords, access keys, or tokens, and use them in pods.
[Storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/)
: A *storage class* allows an administrator to define the classes of storage in
@@ -54,7 +54,7 @@ specific to Kubernetes and PGD.
projects, departments, teams, and so on.
[RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
-: *Role-based access control* (RBAC), also known as *role-based security*, is a
+: *Role-based access control (RBAC)*, also known as *role-based security*, is a
method used in computer systems security to restrict access to the network and
resources of a system to authorized users only. Kubernetes has a native API to
control roles at the namespace and cluster level and associate them with
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
index aeca57e2e21..dd907713393 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/certificates.mdx
@@ -9,7 +9,7 @@ To set up an PGD cluster, each PGD node requires:
- A server certification authority (CA) certificate
- A server TLS certificate signed by the server CA
- A client CA certificate
-- a streaming replication client certificate generated by the client Certification Authority
+- A streaming replication client certificate generated by the client CA
!!! Note
You can find all the secrets used by each PGD node and the expiry dates in
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
index 2fe7433a97f..ead2fe84586 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
@@ -4,7 +4,7 @@ originalFilePath: 'src/pause_resume.md'
---
The *declarative pausing and resuming* feature enables saving CPU power by removing the
-database pods, while keeping the database PVCs.
+database pods while keeping the database PVCs.
Declarative pausing and resuming leverages the hibernation functionality available for
EDB Postgres for Kubernetes. For additional depth and an explanation of how
@@ -12,7 +12,7 @@ hibernation works, see the
[Postgres for Kubernetes documentation on declarative hibernation](/postgres_for_kubernetes/latest/declarative_hibernation/).
Request pause by adding the `k8s.pgd.enterprisedb.io/pause`
-annotation in the desired PGD Group.
+annotation in the desired PGD group.
For example:
@@ -20,7 +20,7 @@ For example:
kubectl annotate pgdgroup region-a k8s.pgd.enterprisedb.io/pause=on
```
-After a few seconds, the requested PGD Group will be in paused state, with
+After a few seconds, the requested PGD group will be in paused state, with
all the database pods removed:
```sh
@@ -32,7 +32,7 @@ region-b 2 1 PGDGroup - Healthy 25m
region-c 0 1 PGDGroup - Healthy 25m
```
-To resume a paused PGD Group, set the annotation to `off`.
+To resume a paused PGD group, set the annotation to `off`.
Remember to add the `--overwrite` flag:
```sh
From c42e734c463af212f911c05f8744231431e76b99 Mon Sep 17 00:00:00 2001
From: Betsy Gitelman
Date: Thu, 4 Apr 2024 16:38:52 -0400
Subject: [PATCH 34/39] Second read of pgd4k content after rebase
---
.../1/architecture.mdx | 2 +-
.../1/backup.mdx | 12 +++---
.../1/labels_annotations.mdx | 40 +++++++++----------
.../1/pause_resume.mdx | 4 +-
.../1/samples.mdx | 2 +-
5 files changed, 30 insertions(+), 30 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
index a0530dc8c76..96e2d7bbdbd 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx
@@ -67,7 +67,7 @@ EDB Postgres Distributed for Kubernetes manages the following:
replication. Their function is to guarantee that consensus is possible in
groups with an even number of data nodes or after network partitions. Witness
nodes are also managed using a single-instance `Cluster` resource.
-- [PGD Proxies](https://www.enterprisedb.com/docs/pgd/latest/routing/proxy/):
+- [PGD proxies](https://www.enterprisedb.com/docs/pgd/latest/routing/proxy/)
act as Postgres proxies with knowledge of the write leader. PGD proxies need
information from Raft to route writes to the current write leader.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
index 550c641ca85..cdc0be4c0e5 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx
@@ -27,10 +27,10 @@ WAL archiving is the process that sends WAL files to the object storage, and it'
execute online/hot backups or PITR.
In EDB Postgres Distributed for Kubernetes, each PGD node is set up to archive WAL files in the object store independently.
-The WAL archive is defined in the PGDGroup `spec.backup.configuration.barmanObjectStore` stanza,
+The WAL archive is defined in the PGD Group `spec.backup.configuration.barmanObjectStore` stanza,
and is enabled as soon as a destination path and cloud credentials are set.
-You can choose to compress WAL files before they are uploaded, and/or encrypt them.
-Parallel WAL archiving can also be enabled.
+You can choose to compress WAL files before they're uploaded and you can encrypt them.
+You can also enable parallel WAL archiving:
```yaml
apiVersion: pgd.k8s.enterprisedb.io/v1beta1
@@ -74,10 +74,10 @@ spec:
You can suspend scheduled backups if necessary by setting `.spec.backup.cron.suspend` to `true`. Setting this setting
to `true` prevents any new backup from being scheduled.
-If you want to execute a backup as soon as the ScheduledBackup resource is created,
+If you want to execute a backup as soon as the `ScheduledBackup` resource is created,
set `.spec.backup.cron.immediate` to `true`.
-`.spec.backupOwnerReference` indicates the ownerReference to use
+`.spec.backupOwnerReference` indicates the `ownerReference` to use
in the created backup resources. The choices are:
- **none** — No owner reference for created backup objects.
@@ -141,4 +141,4 @@ To do that, see [EDB Postgres for Kubernetes on-demand backups](/postgres_for_ku
!!! Hint
You can retrieve the list of EDB Postgres for Kubernetes clusters that make up your PGD group
- by running: `kubectl get cluster -l k8s.pgd.enterprisedb.io/group=my-pgd-group -n my-namespace`
+ by running `kubectl get cluster -l k8s.pgd.enterprisedb.io/group=my-pgd-group -n my-namespace`.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx
index cbe565eac1b..79e2ae544b8 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx
@@ -8,27 +8,27 @@ Kubernetes operator.
`k8s.pgd.enterprisedb.io/certificateType`
: Indicates the type of the certificates. `replication` indicates a certificate
-to be used to authenticate the replication client, `server` indicates a
+to be used to authenticate the replication client. `server` indicates a
certificate to be used for server authentication.
`k8s.pgd.enterprisedb.io/group`
-: Name of the pgdgroup that the resource belongs to. Added to cluster or
-instance resources
+: Name of the PGDGroup that the resource belongs to. Added to cluster or
+instance resources.
`k8s.pgd.enterprisedb.io/isWitnessService`
-: Indicates a service is for a witness node
+: Indicates a service is for a witness node.
`k8s.pgd.enterprisedb.io/type`
-: Type of the resource, added to cluster or instance resources, usually `node`
+: Type of the resource added to cluster or instance resources, usually `node`.
`k8s.pgd.enterprisedb.io/workloadType`
-: Indicates the workload type of the resource, added to cluster or instance
+: Indicates the workload type of the resource added to cluster or instance
resources. `pgd-node-data` indicates data node; `pgd-node-witness` a witness
-node; `pgd-proxy` for pgd proxy node;
-`proxy-svc` for pgd proxy service; `group-svc` for pgd group service to
+node; `pgd-proxy` for PGD Proxy node;
+`proxy-svc` for PGD Proxy service; `group-svc` for PGD group service to
communicate with any node in the PGDGroup;
-`node-svc` is a service created from the cnp service template;
-`scheduled-backup` is added to scheduledBackup
+`node-svc` is a service created from the CNP service template;
+`scheduled-backup` is added to `scheduledBackup`
resources; `bootstrap-cross-location-pgd-group` is added to the pod that
creates a cross-location PGD group;
`pgd-node-restore` is added to the pod that starts the node restore process.
@@ -41,30 +41,30 @@ their metadata cleaned up
before creating the PGD node. This is written by the restore job.
`k8s.pgd.enterprisedb.io/hash`
-: Contains the hash of the used PGDGroup spec
+: Contains the hash of the used PGDGroup spec.
`k8s.pgd.enterprisedb.io/latestCleanupExecuted`
-: Set in the PGDGroup to indicate that the cleanup has been executed.
+: Set in the PGDGroup to indicate that the cleanup was executed.
`k8s.pgd.enterprisedb.io/node`
-: Contains the name of the node for which a certain certificate has been
+: Contains the name of the node for which a certain certificate was
generated. Added to the certificate resources.
`k8s.pgd.enterprisedb.io/noFinalizers`
-: Set in the PGDGroup with value `true` to skip the finalizer execution. This
-is for internal use only.
+: Set in the PGDGroup with value `true` to skip the finalizer execution.
+For internal use only.
`k8s.pgd.enterprisedb.io/pause`
: Set in the PGDGroup to pause a PGDGroup.
`k8s.pgd.enterprisedb.io/recoverabilityPointsByMethod`
-: Set in the PGDGroup to store the CNP clusters' First Recoverability points by
+: Set in the PGDGroup to store the CNP cluster's first recoverability points by
method in a tamper-proof place.
`k8s.pgd.enterprisedb.io/seedingServer`
-: Set in the PGDGroup to indicate to the operator which is the server to be
-restored. This is written by the restore job.
+: Set in the PGDGroup to indicate to the operator which server to
+restore. This is written by the restore job.
`k8s.pgd.enterprisedb.io/seedingSnapshots`
-: Set in the PGDGroup to indicate to the operator which are the snapshots to be
-restored. This is written by the restore job.
+: Set in the PGDGroup to indicate to the operator which snapshots to
+restore. This is written by the restore job.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
index ead2fe84586..4b0a01140ba 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/pause_resume.mdx
@@ -52,6 +52,6 @@ region-c 0 1 PGDGroup - Healthy 25m
```
There are some requirements before the pause annotation can put the PGD group
-on Pause. Ideally, the PGD Group should be in Healthy state. Alternativelly, if
-all the data nodes in the PGD Group are Healthy at the individual level, Pause
+on Pause. Ideally, the PGD Group should be in Healthy state. Alternatively, if
+all the data nodes in the PGD Group are healthy at the individual level, Pause
can also be initiated.
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
index 9f2323bb7dd..e80f36bb16a 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/samples.mdx
@@ -12,7 +12,7 @@ your EDB Postgres Distributed cluster in a Kubernetes environment.
Flexible 3 regions
: [`flexible_3regions.yaml`](../samples/flexible_3regions.yaml):
- a PGD cluster with two data groups and a global witness node spread across three
+ A PGD cluster with two data groups and a global witness node spread across three
regions, where each data groups consists of two data nodes and a local witness
node.
From d84f8aaec92901018f7b000a0b27558029569dbe Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Mon, 22 Apr 2024 12:08:31 -0700
Subject: [PATCH 35/39] Apply suggestions from code review
Co-authored-by: gvasquezvargas
---
.../1/before_you_start.mdx | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
index 14b107878d5..3c09f1cd20c 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
@@ -24,7 +24,7 @@ specific to Kubernetes and PGD.
[Secret](https://kubernetes.io/docs/concepts/configuration/secret/)
: A *secret* is an object that's designed to store small amounts of sensitive
- data, such as passwords, access keys, or tokens, and use them in pods.
+ data, such as passwords, access keys, or tokens, for use within pods.
[Storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/)
: A *storage class* allows an administrator to define the classes of storage in
@@ -82,7 +82,7 @@ EDB Postgres Distributed for Kubernetes requires a Kubernetes version supported
For more information, see
[Terminology](/pgd/latest/terminology/) in the PGD documentation.
-[Node](/pgd/latest/terminology/#node)
+[Data node](/pgd/latest/terminology/#node)
: A PGD database instance.
[Failover](/pgd/latest/terminology/#failover)
From c1d0965584a19eb5a406c23c6b3214f5fc52ffe9 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Tue, 23 Apr 2024 05:17:49 +0000
Subject: [PATCH 36/39] Fix links to PG4K API docs (correctly, this time.)
---
.../1/pg4k-pgd.v1beta1.mdx | 36 +++++++++----------
.../processors/pg4k-pgd/replace-beta-urls.mjs | 4 +--
2 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx
index 7964a54772e..0e948d96dc2 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx
@@ -634,7 +634,7 @@ required.
Field | Description |
configuration [Required]
-BackupConfiguration
+BackupConfiguration
|
The CNP configuration to be used for backup. ServerName value is reserved by the operator.
@@ -867,14 +867,14 @@ Make sure you reserve enough time for the operator to request a fast shutdown of
|
storage [Required]
-StorageConfiguration
+StorageConfiguration
|
Configuration of the storage of the instances
|
walStorage [Required]
-StorageConfiguration
+StorageConfiguration
|
Configuration of the WAL storage for the instances
@@ -889,7 +889,7 @@ successfully start up (default 300)
|
affinity
-AffinityConfiguration
+AffinityConfiguration
|
Affinity/Anti-affinity rules for Pods
@@ -905,14 +905,14 @@ for more information.
|
postgresql
-PostgresConfiguration
+PostgresConfiguration
|
Configuration of the PostgreSQL server
|
monitoring [Required]
-MonitoringConfiguration
+MonitoringConfiguration
|
The configuration of the monitoring infrastructure of this cluster
@@ -926,7 +926,7 @@ for more information.
|
serviceAccountTemplate [Required]
-ServiceAccountTemplate
+ServiceAccountTemplate
|
The service account template to be passed to CNP
@@ -973,7 +973,7 @@ Defaults to: RuntimeDefault
|
managed [Required]
-ManagedConfiguration
+ManagedConfiguration
|
The configuration that is used by the portions of PostgreSQL that are managed by the CNP instance manager
@@ -1012,7 +1012,7 @@ Enabled by default.
|
superuserSecret
-LocalObjectReference
+LocalObjectReference
|
The secret containing the superuser password.
@@ -1420,7 +1420,7 @@ of a CNP node
Field | Description |
|
caBundleSecretRef [Required]
-SecretKeySelector
+SecretKeySelector
|
CABundleSecretRef is a reference to a secret field containing the CA bundle
@@ -1428,7 +1428,7 @@ to verify the openTelemetry server certificate
|
clientCertSecret [Required]
-LocalObjectReference
+LocalObjectReference
|
ClientCertSecret is the name of the secret containing the client certificate used to connect
@@ -2284,7 +2284,7 @@ by applications. Defaults to the value of the database key.
|
ownerCredentialsSecret [Required]
-LocalObjectReference
+LocalObjectReference
|
Name of the secret containing the initial credentials for the
@@ -2418,14 +2418,14 @@ case the proxies will be created in the parent group
|
barmanObjectStore [Required]
-BarmanObjectStoreConfiguration
+BarmanObjectStoreConfiguration
|
The configuration for the barman-cloud tool suite
|
recoveryTarget [Required]
-RecoveryTarget
+RecoveryTarget
|
By default, the recovery process applies all the available
@@ -2606,7 +2606,7 @@ see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
|
target [Required]
-BackupTarget
+BackupTarget
|
The policy to decide which instance should perform this backup. If empty,
@@ -2618,7 +2618,7 @@ standby, if available.
|
method
-BackupMethod
+BackupMethod
|
The backup method to be used, possible options are barmanObjectStore
@@ -2635,7 +2635,7 @@ Overrides the default setting specified in the cluster field '.spec.backup.volum
|
onlineConfiguration
-OnlineConfiguration
+OnlineConfiguration
|
Configuration parameters to control the online/hot backup with volume snapshots
@@ -2785,7 +2785,7 @@ by PGD to connect to the nodes
|
pvcRole [Required]
-github.com/EnterpriseDB/cloud-native-postgres/pkg/utils.PVCRole
+github.com/EnterpriseDB/cloud-native-postgres/pkg/utils.PVCRole
|
PVCRole is the pvcRole snapshot to restore
diff --git a/scripts/fileProcessor/processors/pg4k-pgd/replace-beta-urls.mjs b/scripts/fileProcessor/processors/pg4k-pgd/replace-beta-urls.mjs
index 97fc5999020..afeaa2c6133 100644
--- a/scripts/fileProcessor/processors/pg4k-pgd/replace-beta-urls.mjs
+++ b/scripts/fileProcessor/processors/pg4k-pgd/replace-beta-urls.mjs
@@ -1,9 +1,9 @@
// Replace URLs beginning with the following patterns...
// - https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/pg4k-pgd.v1beta1#
-// ...with "#" (that is, leave them relative.) This handles a weird API docs thing during development.
+// ...with "/postgres_for_kubernetes/latest/pg4k.v1/#" (that is, leave them relative.) This handles a weird API docs thing during development.
const replacements = [
- {pattern: /https:\/\/www\.enterprisedb\.com\/docs\/postgres_for_kubernetes\/latest\/pg4k-pgd.v1beta1#/g, replacement: "#"},
+ {pattern: /https:\/\/www\.enterprisedb\.com\/docs\/postgres_for_kubernetes\/latest\/pg4k-pgd.v1beta1#/g, replacement: "/postgres_for_kubernetes/latest/pg4k.v1/#"},
];
export const process = (filename, content) => {
From a893c0a84c9662f7b0660fad627f86eaf82ba2e1 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Tue, 23 Apr 2024 08:04:57 -0700
Subject: [PATCH 37/39] Correct the type of node in definition
Co-authored-by: Dj Walker-Morgan <126472455+djw-m@users.noreply.github.com>
---
.../postgres_distributed_for_kubernetes/1/before_you_start.mdx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
index 3c09f1cd20c..bd6cab29fb4 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/before_you_start.mdx
@@ -92,7 +92,7 @@ For more information, see
: A planned change in connection between the application and the active database node in a cluster, typically done for maintenance.
[Write leader](/pgd/latest/terminology/#write-leader)
-: In always-on architectures, a node is selected as the correct connection endpoint for applications. This node is called the write leader. The write leader is selected by consensus of a quorum of proxy nodes.
+: In always-on architectures, a node is selected as the correct connection endpoint for applications. This node is called the write leader. The write leader is selected by consensus of a quorum of data nodes.
## Cloud terminology
From 688e0a74a3ca785c3c43241d1270e0f926802ac0 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Tue, 23 Apr 2024 15:09:02 +0000
Subject: [PATCH 38/39] Allow searching!
---
src/constants/products.js | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/constants/products.js b/src/constants/products.js
index 81cf5cb8eb7..d0779fce3f6 100644
--- a/src/constants/products.js
+++ b/src/constants/products.js
@@ -68,7 +68,6 @@ export const products = {
postgres_distributed_for_kubernetes: {
name: "EDB Postgres Distributed for Kubernetes",
iconName: IconNames.KUBERNETES,
- noSearch: true, // remove this when PG4K-PGD is released!
},
postgres_for_kubernetes: {
name: "EDB Postgres for Kubernetes",
From 934fd3f8b73be8d4cb892707c67740ac30fd874d Mon Sep 17 00:00:00 2001
From: gvasquezvargas
Date: Tue, 23 Apr 2024 18:15:34 +0200
Subject: [PATCH 39/39] PGD4K approved abbreviation by Stephen
---
.../1/rel_notes/1_0_rel_notes.mdx | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
index 77bdcc373e7..02dfe0fe07d 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/1_0_rel_notes.mdx
@@ -28,9 +28,9 @@ Please refer to the [PG4K release notes](https://www.enterprisedb.com/docs/postg
| Component | Description |
|-----------|----------------------------------------------------------------------------------------------|
-| PGD-K | Deployment of EDB Postgres Distributed clusters with versions 5 and later inside Kubernetes. |
-| PGD-K | Self-healing capabilities such as recovery and restart of failed PGD nodes. |
-| PGD-K | Defined services that allow applications to connect to the write leader of each PGD group. |
-| PGD-K | Implementation of Raft subgroups. |
-| PGD-K | TLS connections and client certificate authentication. |
-| PGD-K | Continuous backup to an S3 compatible object store. |
+| PGD4K | Deployment of EDB Postgres Distributed clusters with versions 5 and later inside Kubernetes. |
+| PGD4K | Self-healing capabilities such as recovery and restart of failed PGD nodes. |
+| PGD4K | Defined services that allow applications to connect to the write leader of each PGD group. |
+| PGD4K | Implementation of Raft subgroups. |
+| PGD4K | TLS connections and client certificate authentication. |
+| PGD4K | Continuous backup to an S3 compatible object store. |
|