diff --git a/package-lock.json b/package-lock.json index ae4b9ba524a..a86cc9d2db3 100644 --- a/package-lock.json +++ b/package-lock.json @@ -39,6 +39,7 @@ "gatsby-transformer-remark": "^5.25.1", "gatsby-transformer-sharp": "^4.25.0", "github-slugger": "^1.5.0", + "globby": "^13.2.2", "graceful-fs": "^4.2.11", "hast-util-to-string": "^1.0.4", "is-absolute-url": "^4.0.1", @@ -2491,6 +2492,33 @@ "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" } }, + "node_modules/@graphql-tools/code-file-loader/node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@graphql-tools/code-file-loader/node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, "node_modules/@graphql-tools/graphql-tag-pluck": { "version": "7.5.1", "resolved": "https://registry.npmjs.org/@graphql-tools/graphql-tag-pluck/-/graphql-tag-pluck-7.5.1.tgz", @@ -4506,6 +4534,25 @@ } } }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@typescript-eslint/typescript-estree/node_modules/lru-cache": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", @@ -4531,6 +4578,14 @@ "node": ">=10" } }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, "node_modules/@typescript-eslint/typescript-estree/node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", @@ -8835,9 +8890,9 @@ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" }, "node_modules/fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.0.tgz", + "integrity": "sha512-ChDuvbOypPuNjO8yIDf36x7BlZX1smcUMTTcyoIjycexOxd6DFsKsg21qVBzEmr3G7fUKIRy2/psii+CIUt7FA==", "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -10394,6 +10449,33 @@ "gatsby": "^4.0.0-next" } }, + "node_modules/gatsby-plugin-page-creator/node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gatsby-plugin-page-creator/node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, "node_modules/gatsby-plugin-react-helmet": { "version": "5.25.0", "resolved": "https://registry.npmjs.org/gatsby-plugin-react-helmet/-/gatsby-plugin-react-helmet-5.25.0.tgz", @@ -11207,6 +11289,25 @@ "ms": "^2.1.1" } }, + "node_modules/gatsby/node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/gatsby/node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", @@ -11240,6 +11341,14 @@ "node": ">=10" } }, + "node_modules/gatsby/node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, "node_modules/gatsby/node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", @@ -11453,19 +11562,18 @@ } }, "node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "version": "13.2.2", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", + "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", "dependencies": { - "array-union": "^2.1.0", "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", "merge2": "^1.4.1", - "slash": "^3.0.0" + "slash": "^4.0.0" }, "engines": { - "node": ">=10" + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -16953,6 +17061,25 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/react-dev-utils/node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/react-dev-utils/node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", @@ -16985,6 +17112,14 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/react-dev-utils/node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, "node_modules/react-dev-utils/node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", @@ -18898,11 +19033,14 @@ "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" }, "node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", "engines": { - "node": ">=8" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/slice-ansi": { diff --git a/package.json b/package.json index a64eebd3ef0..77a1457ee79 100644 --- a/package.json +++ b/package.json @@ -65,6 +65,7 @@ "gatsby-transformer-remark": "^5.25.1", "gatsby-transformer-sharp": "^4.25.0", "github-slugger": "^1.5.0", + "globby": "^13.2.2", "graceful-fs": "^4.2.11", "hast-util-to-string": "^1.0.4", "is-absolute-url": "^4.0.1", diff --git a/product_docs/docs/biganimal/release/overview/02_high_availability.mdx b/product_docs/docs/biganimal/release/overview/02_high_availability.mdx index c1e385bdbc8..0740b969ad2 100644 --- a/product_docs/docs/biganimal/release/overview/02_high_availability.mdx +++ b/product_docs/docs/biganimal/release/overview/02_high_availability.mdx @@ -58,7 +58,7 @@ Since BigAnimal replicates to only one node synchronously, some standby replicas ## Extreme high availability (Preview) -For use cases where high availability across regions is a major concern, a cluster deployment with extreme high availability enabled can provide two data group with three data nodes each, plus a witness group, for a true active-active solution. Extreme-high-availability clusters offer the ability to deploy a cluster across multiple regions or a single region. Extreme-high-availability clusters are powered by [EDB Postgres Distributed](/pgd/latest/) using multi-master logical replication. +For use cases where high availability across regions is a major concern, a cluster deployment with extreme high availability enabled can provide two data groups with three data nodes each, plus a witness group, for a true active-active solution. Extreme-high-availability clusters offer the ability to deploy a cluster across multiple regions or a single region. Extreme-high-availability clusters are powered by [EDB Postgres Distributed](/pgd/latest/) using multi-master logical replication. Extreme-high-availability clusters support both EDB Postgres Advanced Server and EDB Postgres Extended Server database distributions. diff --git a/product_docs/docs/migration_portal/4/01_mp_release_notes/mp_4.5.1_rel_notes.mdx b/product_docs/docs/migration_portal/4/01_mp_release_notes/mp_4.5.1_rel_notes.mdx index 32a3be7a489..d87b81e6040 100644 --- a/product_docs/docs/migration_portal/4/01_mp_release_notes/mp_4.5.1_rel_notes.mdx +++ b/product_docs/docs/migration_portal/4/01_mp_release_notes/mp_4.5.1_rel_notes.mdx @@ -3,7 +3,7 @@ title: "Version 4.5.1" --- -New features, enhancements, bug fixes, and other changes in Migration Portal 4.5 include the following: +New features, enhancements, bug fixes, and other changes in Migration Portal 4.5.1 include the following: | Type | Description | | ---- |------------ | diff --git a/product_docs/docs/pem/8/pem_rel_notes/863_rel_notes.mdx b/product_docs/docs/pem/8/pem_rel_notes/863_rel_notes.mdx new file mode 100644 index 00000000000..93a21e97b5a --- /dev/null +++ b/product_docs/docs/pem/8/pem_rel_notes/863_rel_notes.mdx @@ -0,0 +1,9 @@ +--- +title: "Version 8.6.3" +--- + +New features, enhancements, bug fixes, and other changes in PEM 8.6.3 include: + +| Type | Description | +| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Enhancement | Added support for Flask 2.x. This is a security fix for CVE-2023-30861 and is recommended for all users. This patch affects the PEM server only, no need to update PEM agents.| diff --git a/product_docs/docs/pem/8/pem_rel_notes/index.mdx b/product_docs/docs/pem/8/pem_rel_notes/index.mdx index 69d2be1c13a..1dac6a6cef9 100644 --- a/product_docs/docs/pem/8/pem_rel_notes/index.mdx +++ b/product_docs/docs/pem/8/pem_rel_notes/index.mdx @@ -1,6 +1,7 @@ --- title: "Release notes" navigation: + - 863_rel_notes - 861_rel_notes --- diff --git a/product_docs/docs/pem/9/pem_rel_notes/922_rel_notes.mdx b/product_docs/docs/pem/9/pem_rel_notes/922_rel_notes.mdx new file mode 100644 index 00000000000..0752d828be9 --- /dev/null +++ b/product_docs/docs/pem/9/pem_rel_notes/922_rel_notes.mdx @@ -0,0 +1,9 @@ +--- +title: "Version 9.2.2" +--- + +New features, enhancements, bug fixes, and other changes in PEM 9.2.2 include: + +| Type | Description | +| ----------- | -------------------------------------------------------------------------------------------------| +| Enhancement | This is a security fix for CVE-2023-2650 and is recommended for all the Windows users. This security fix includes updates for the Apache HTTPD bundled with the Windows installer of PEM. This patch affects the PEM server only, no need to update PEM agents.| diff --git a/product_docs/docs/pem/9/pem_rel_notes/index.mdx b/product_docs/docs/pem/9/pem_rel_notes/index.mdx index 4751a24785d..a813876341c 100644 --- a/product_docs/docs/pem/9/pem_rel_notes/index.mdx +++ b/product_docs/docs/pem/9/pem_rel_notes/index.mdx @@ -1,6 +1,7 @@ --- title: "Release notes" navigation: + - 922_rel_notes - 921_rel_notes - 920_rel_notes - 911_rel_notes diff --git a/product_docs/docs/pem/9/upgrading/upgrading_pem_installation/upgrading_pem_installation_linux_rpm.mdx b/product_docs/docs/pem/9/upgrading/upgrading_pem_installation/upgrading_pem_installation_linux_rpm.mdx index 1d64cf66af5..b4ec715e968 100644 --- a/product_docs/docs/pem/9/upgrading/upgrading_pem_installation/upgrading_pem_installation_linux_rpm.mdx +++ b/product_docs/docs/pem/9/upgrading/upgrading_pem_installation/upgrading_pem_installation_linux_rpm.mdx @@ -1,5 +1,5 @@ --- -title: "Upgrading a PEM native package installation on a Linux host" +title: "Upgrading a PEM installation on a Linux host" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-enterprise-manager/installation-getting-started/upgrade-migration-guide/8.0/upgrading_pem_installation_linux_rpm.html" @@ -8,105 +8,103 @@ redirects: - /pem/latest/pem_upgrade/upgrading_pem_installation/upgrading_pem_installation_linux_rpm/ --- -To upgrade PEM component software on Linux hosts, install a newer version of the PEM component native packages in the following order: +To upgrade PEM component software on Linux hosts, install a newer version of the PEM component packages in the following order: -1. Invoke the PEM agent native package installer on each monitored node except the PEM server host. -2. Invoke the PEM server native package installer. it upgrades both the PEM server and the PEM agent that resides on the PEM server host. +1. Invoke the PEM agent package installer on each monitored node except the PEM server host. +2. Invoke the PEM server package installer. It upgrades both the PEM server and the PEM agent that resides on the PEM server host. During an installation, the component installation automatically detects an existing installation and performs an upgrade. After upgrading the PEM agent and server, you can upgrade SQL Profiler if required. That step is platform specific. !!! Note If you already configured or are planning to configure any shell/batch script run by a Linux agent that's upgraded from any earlier version to version 7.11 or later, you must speciy the user for the `batch_script_user` parameter in the `agent.cfg` file. We strongly recommended that you use a non-root user to run the scripts. Using the root user can result in compromising the data security and operating system security. However, if you want to restore the pemagent to its original settings using a root user to run the scripts, then you must set the `batch_script_user` parameter to `root`. -## Prerequisites to upgrade a PEM installation on Linux host +## Upgrading a PEM server installation -PEM depends on third-party components from the vendor repository, including python3, libboost, openssl, snmp++, and libcurl. To ensure these components are up to date, update your operating system using platform-specific commands. +The commands to upgrade the PEM server are platform specific. -The minimum version required for openssl is 1.0.2k. If you're using a version of PostgreSQL or EDB Postgres Advanced Server earlier than version 10, before the upgrade you must install the `libs` package for version 10 or above on the system where the PEM server is installed. Use the following platform-specific commands to install the `libs` version 10 or above on your host. +If you want to upgrade a PEM server that is installed on a machine in an isolated network, you need to create a PEM repository on that machine before you upgrade the PEM server. For more information about creating a PEM repository on an isolated network, see [Creating an EDB repository +on an isolated network](/pem/latest/installing/creating_pem_repository_in_isolated_network/). -### Prerequisites to upgrade a PEM installation on a CentOS or RHEL host +### On a CentOS, Rocky Linux, AlmaLinux, or RHEL host -To upgrade packages on a CentOS or RHEL 7.x host: +To use an RPM package to upgrade an existing RPM installation, you can use the `yum` package manager to upgrade the installed version of the PEM server on CentOS/RHEL 7.x or Rocky Linux/AlmaLinux/RHEL 8.x: ```shell -yum update -yum upgrade +yum upgrade edb-pem ``` -To upgrade packages on a Rocky Linux or AlmaLinux or RHEL 8.x host: +You can also use the `dnf` command on Rocky Linux/AlmaLinux/RHEL 8.x: ```shell -dnf update -dnf upgrade +dnf upgrade edb-pem ``` -To upgrade EDB Postgres Advanced Server libs: +!!! Note + If you're doing a fresh installation of the PEM server on CentOS or RHEL 7.x host, the installer installs the `edb-python3-mod_wsgi` package along with the installation. The package is a requirement of the operating system. If you are upgrading the PEM server on CentOS or RHEL 7.x host, the`the edb-python3-mod_wsgi` packages replaces the `mod_wsgi package` package to meet the requirements of the operating system. + +After upgrading the PEM server using yum or the `dnf` command, you must configure the PEM server. For detailed information, see [Configuring the PEM server](#configuring-the-pem-server). -```shell -yum install edb-as-server-libs -``` +### On a Debian or Ubuntu host -To upgrade PostgreSQL libs: +You can use the `apt-get` package manager to upgrade the installed version of the PEM server on supported versions of Debian or Ubuntu: ```shell -yum install postgresql-libs +apt-get upgrade edb-pem ``` -Where `` is the PostgreSQL or EDB Postgres Advanced Server version whose `libs` package you want to install. +After upgrading the PEM server with `apt-get`, you need to configure the PEM server. For detailed information, see [Configuring the PEM server](#configuring-the-pem-server). -### Prerequisites to upgrade a PEM installation on a Debian or Ubuntu host +### On a SLES host -To upgrade packages on a Debian or Ubuntu host: +You can use the zypper package manager to upgrade the installed version of the PEM Server on supported versions of a SLES host: ```shell -apt-get update -apt-get upgrade +zypper update edb-pem ``` -To upgrade Advanced Server libs: +After upgrading the PEM server using zypper, you need to configure the PEM server. For detailed information, see [Configuring the PEM server](#configuring-the-pem-server). -```shell -apt-get install edb-as-server-libs -``` +!!! Note + If you upgrade the PEM backend database server and the PEM server, update the `PG_INSTALL_PATH` and `DB_UNIT_FILE` parameters pointing to the new version in the `/usr/edb/pem/share/.install-config` file before you run the configure script. -To upgrade PostgreSQL libs: +## Configuring the PEM server + +After upgrading the PEM server, you can use the following command to configure the PEM server: ```shell -apt-get install postgresql-libs +/usr/edb/pem/bin/configure-pem-server.sh ``` -Where `` is the PostgreSQL or EDB Postgres Advanced Server version whose `libs` package you want to install. +The configure script uses the values from the old PEM server configuration file while running the script. -### Prerequisites to upgrade a PEM installation on a SLES host +For detailed information, see [Configuring the PEM server on Linux platforms](/pem/latest/installing/configuring_the_pem_server_on_linux/). -To upgrade packages on a SLES host: +!!! Note + - The configure script requires a superuser password only after the upgrade process. -```shell -zypper update -zypper upgrade -``` + - If your configure script gets stuck, then stop the PEM agent with `alert_threads>0`. To get the details of such agents, execute the query: -To upgrade EDB Postgres Advanced Server libs: + ```sql + SELECT agent_id FROM pem.agent_config WHERE param='alert_threads' AND value > 0; + ``` -```shell -zypper install edb-as-server-libs -``` + Stop the running agents and re-run the configure script. -To upgrade PostgreSQL libs: + If the problem persists, then run the query to terminate the stuck alert processes: -```shell -zypper install postgresql-libs -``` + ```sql + SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE query='SELECT pem.process_one_alert()'; + ``` -Where `` is the PostgreSQL or EDB Postgres Advanced Server version whose `libs` package you want to install. + Then re-run the configure script. -## Upgrading a PEM agent native package installation +## Upgrading a PEM agent installation -You can use native packages to upgrade existing PEM agents initially installed using native packages. The upgrade process doesn't update the PEM agent configuration file. After installing the new agent, you must manually copy the configuration file of the existing agent to the new installation location. +The commands to upgrade the PEM agent are platform specific. -### Upgrading the PEM agent on a CentOS or RHEL host +### On a CentOS or RHEL host -For CentOS or RHEL 7.x or RHEL 8.x, to upgrade a PEM agent, use the following command: +For CentOS or RHEL 7.x or RHEL 8.x, use the following command: ```shell yum upgrade edb-pem-agent @@ -118,84 +116,19 @@ For Rocky Linux or AlmaLinux or RHEL 8.x, you can also use the following command dnf upgrade edb-pem-agent ``` -### Upgrading a PEM agent on a Debian or Ubuntu host +### On a Debian or Ubuntu host -To upgrade a PEM agent, use the following command: +For Debian or Ubuntu, use the following command: ```shell apt-get upgrade edb-pem-agent ``` -### Upgrading a PEM agent on a SLES host +### On a SLES host -To upgrade a PEM agent, use the following command: +For SLES, use the following command: ```shell zypper update edb-pem-agent ``` -## Upgrading a PEM server native package installation - -If you initially used native packages to install your PEM server, you can use native packages to upgrade your PEM server. The commands to upgrade are platform specific. - -If you want to upgrade a PEM server that is installed on a machine in an isolated network, you need to create a PEM repository on that machine before you upgrade the PEM server. For more information about creating a PEM repository on an isolated network, see [Creating a PEM Repository on an Isolated Network](/pem/latest/installing/creating_pem_repository_in_isolated_network/). - -### Upgrading a PEM server on a CentOS, Rocky Linux, AlmaLinux, or RHEL host - -To use an RPM package to upgrade an existing RPM installation, you can use the `yum` package manager to upgrade the installed version of the PEM server on CentOS/RHEL 7.x or Rocky Linux/AlmaLinux/RHEL 8.x: - -```shell -yum upgrade edb-pem -``` - -You can also use the `dnf` command on Rocky Linux/AlmaLinux/RHEL 8.x: - -```shell -dnf upgrade edb-pem -``` - -!!! Note - If you're doing a fresh installation of the PEM server on CentOS or RHEL 7.x host, the installer installs the `edb-python3-mod_wsgi` package along with the installation. The package is a requirement of the operating system. If you are upgrading the PEM server on CentOS or RHEL 7.x host, the`the edb-python3-mod_wsgi` packages replaces the `mod_wsgi package` package to meet the requirements of the operating system. - -After upgrading the PEM server using yum or the `dnf` command, you must configure the PEM server. For detailed information, see [Configuring the PEM server](#configuring-the-pem-server). - -### Upgrading the PEM server on a Debian or Ubuntu host - -You can use the `apt-get` package manager to upgrade the installed version of the PEM server on supported versions of Debian or Ubuntu: - -```shell -apt-get upgrade edb-pem -``` - -After upgrading the PEM server with `apt-get`, you need to configure the PEM server. For detailed information, see [Configuring the PEM server](#configuring-the-pem-server). - -### Upgrading the PEM server on a SLES host - -You can use the zypper package manager to upgrade the installed version of the PEM Server on supported versions of a SLES host: - -```shell -zypper update edb-pem -``` - -After upgrading the PEM server using zypper, you need to configure the PEM server. For detailed information, see [Configuring the PEM server](#configuring-the-pem-server). - -!!! Note - If you upgrade the PEM backend database server and the PEM server, update the `PG_INSTALL_PATH` and `DB_UNIT_FILE` parameters pointing to the new version in the `/usr/edb/pem/share/.install-config` file before you run the configure script. - -## Configuring the PEM server - -After upgrading the PEM server, you can use the following command to configure the PEM server: - -```shell -/usr/edb/pem/bin/configure-pem-server.sh -``` - -The configure script uses the values from the old PEM server configuration file while running the script. - - -After executing the PEM server configuration file, use your version-specific service control command to restart the httpd service. - -For detailed information, see [Configuring the PEM server on Linux platforms](/pem/latest/installing/configuring_the_pem_server_on_linux/). - -!!! Note - From PEM version 7.11 and later, the configure script requires a superuser password only after the upgrade process. diff --git a/product_docs/docs/pgd/4/rel_notes/index.mdx b/product_docs/docs/pgd/4/rel_notes/index.mdx index 17e44cf19a6..525bf15937b 100644 --- a/product_docs/docs/pgd/4/rel_notes/index.mdx +++ b/product_docs/docs/pgd/4/rel_notes/index.mdx @@ -2,7 +2,9 @@ title: "EDB Postgres Distributed Release notes" navTitle: "Release notes" navigation: +- pgd_4.3.1-1_rel_notes - pgd_4.3.1_rel_notes +- pgd_4.3.0-1_rel_notes - pgd_4.3.0_rel_notes - pgd_4.2.2_rel_notes - pgd_4.2.1_rel_notes @@ -24,7 +26,9 @@ redirects: - /pgd/latest/rel_notes/pgd_4.2.1_rel_notes/ - /pgd/latest/rel_notes/pgd_4.2.2_rel_notes/ - /pgd/latest/rel_notes/pgd_4.3.0_rel_notes/ + - /pgd/latest/rel_notes/pgd_4.3.0-1_rel_notes/ - /pgd/latest/rel_notes/pgd_4.3.1_rel_notes/ + - /pgd/latest/rel_notes/pgd_4.3.1-1_rel_notes/ --- @@ -32,9 +36,9 @@ The EDB Postgres Distributed documentation describes the latest version of EDB P | Release Date | EDB Postgres Distributed | BDR | HARP | CLI | TPAexec | | ------------ | ---------------------------- | ----- | ----- | ----- | -------------------------------------------------------------------------------- | -| 2023 May 17 | [4.3.1](pgd_4.3.1_rel_notes) | 4.3.1 | 2.3.0[^**] | 1.1.1 | [23.17](https://techsupport.enterprisedb.com/customer_portal/sw/tpa/trunk/239/) | -| 2023 May 17 | [4.3.1](pgd_4.3.1_rel_notes) | 4.3.1 | 2.2.3 | 1.1.1 | [23.17](https://techsupport.enterprisedb.com/customer_portal/sw/tpa/trunk/239/) | -| 2023 Feb 14 | [4.3.0](pgd_4.3.0_rel_notes) | 4.3.0 | 2.2.2[^*] | 1.1.0 | [23.9](https://techsupport.enterprisedb.com/customer_portal/sw/tpa/trunk/239/) | +| 2023 July 12 | [4.3.1-1 ](pgd_4.3.1-1_rel_notes)| 4.3.1 | 2.3.0 | 1.1.1 | [23.19](/tpa/latest/rel_notes/tpa_23.19_rel_notes) | +| 2023 May 17 | [4.3.1](pgd_4.3.1_rel_notes) | 4.3.1 | 2.2.3 | 1.1.1 | [23.17](/tpa/latest/rel_notes/tpa_23.17_rel_notes) | +| 2023 Mar 30 | [4.3.0-1](pgd_4.3.0-1_rel_notes) | 4.3.0 | 2.2.2 | 1.1.0 | [23.9](https://techsupport.enterprisedb.com/customer_portal/sw/tpa/trunk/239/) | | 2023 Feb 14 | [4.3.0](pgd_4.3.0_rel_notes) | 4.3.0 | 2.2.1 | 1.1.0 | [23.9](https://techsupport.enterprisedb.com/customer_portal/sw/tpa/trunk/239/) | | 2022 Dec 14 | [4.2.2](pgd_4.2.2_rel_notes) | 4.2.2 | 2.2.1 | 1.1.0 | [23.9](https://techsupport.enterprisedb.com/customer_portal/sw/tpa/trunk/239/) | | 2022 Nov 16 | [4.2.1](pgd_4.2.1_rel_notes) | 4.2.1 | 2.2.1 | 1.1.0 | [23.7](https://techsupport.enterprisedb.com/customer_portal/sw/tpa/trunk/237/) | @@ -47,6 +51,3 @@ The EDB Postgres Distributed documentation describes the latest version of EDB P | 2021 Dec 01 | [4.0.0](pgd_4.0.0_rel_notes) | 4.0.0 | 2.0.0 | - | [22.9](https://techsupport.enterprisedb.com/customer_portal/sw/tpa/trunk/2106/) | -[^**] We released a patch to HARP 2.2.3 with few bug fixes and enhancement. If using 2.2.3 or below with an earlier version of EDB Postgres Distributed, we recommend that you upgrade to 2.3.0. - -[^*] We released a patch to HARP 2.2.1 to address a security vulnerability. If using 2.2.1 with an earlier version of EDB Postgres Distributed, we recommend that you upgrade to 2.2.2. diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0-1_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0-1_rel_notes.mdx new file mode 100644 index 00000000000..188e5649f2f --- /dev/null +++ b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0-1_rel_notes.mdx @@ -0,0 +1,16 @@ +--- +title: "Release notes for EDB Postgres Distributed version 4.3.0-1" +navTitle: "Version 4.3.0-1" +--- + +EDB Postgres Distributed version 4.3.0-1 is a patch release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions. + +It includes a patch to HARP 2.2.1 to address a security vulnerability. If you are using HARP 2.2.1 or earlier, we recommend that you upgrade to HARP 2.2.2. + +!!! Note + This version is required for EDB Postgres Advanced Server versions 12.14.18, 13.10.14, 14.7.0 and later. + +| Component | Version | Type | Description | +| --------- | ------- | --------------- | ------------------------------------------------------------------------------------------------------------------------| +| HARP | 2.2.2 | Change | Upgrade 3rd party dependencies to fix Github dependabot alerts | + diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0_rel_notes.mdx index 6c79862c20f..bb9747dc659 100644 --- a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0_rel_notes.mdx +++ b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0_rel_notes.mdx @@ -11,10 +11,10 @@ EDB Postgres Distributed version 4.3.0 is a patch release of EDB Postgres Distri | Component | Version | Type | Description | | --------- | ------- | --------------- | ------------------------------------------------------------------------------------------------------------------------| | BDR | 4.3.0 | Bug fix | Handle ALTER TABLE...ALTER COLUMN...TYPE...on a non-data node (BDR-2768 RT86808)

A global DML lock is required if a table rewrite is needed, however this cannot be taken if the query is executed on a non-data node (logical standby or subscriber-only).

| -| BDR | 4.3.0 | Bug fix | Separate autopartition leader from Raft leader (BDR-2721)

Raft leader can be a witness node but autopartitioning can only be managed on data nodes. This change ensures that autopatition leader is always a data node.

| +| BDR | 4.3.0 | Bug fix | Separate autopartition leader from Raft leader (BDR-2721)

Raft leader can be a witness node but autopartitioning can only be managed on data nodes. This change ensures that autopartition leader is always a data node.

| | BDR | 4.3.0 | Feature | Implement bdr.alter node_kind()

Function to change a node kind, the kind can also be checked in bdr.node now.

| | BDR | 4.3.0 | Bug fix | Replicate bdr admin functions to standbys (BDR-1575, RT72698)

Always replicate the function call also for the writter process.

| | BDR | 4.3.0 | Bug fix | Fix watermark handling on clusters with multiple sub-groups

Watermark is used to ensure data consistency during join. Previously, this didn't work correctly in the presence of multiple data sub-groups.

| | BDR | 4.3.0 | Bug fix | Don't allow switching to CAMO Local Mode if the node is not a write lead

In CAMO only one node should be allowed to switch to the Local Mode at given time. We now require the node to be the HARP write leader in order to ensure that rule.

| -| HARP | 2.2.2 | Change | Upgrade 3rd party dependencies to fix Github dependabot alerts | +| HARP | 2.2.1 | Bug fix | Fix connection leak issue (BDR-2530). diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1-1_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1-1_rel_notes.mdx new file mode 100644 index 00000000000..93bbdcf76fc --- /dev/null +++ b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1-1_rel_notes.mdx @@ -0,0 +1,19 @@ +--- +title: "Release notes for EDB Postgres Distributed version 4.3.1-1" +navTitle: "Version 4.3.1-1" +--- + +EDB Postgres Distributed version 4.3.1-1 is a minor release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions. + +It includes HARP 2.2.3 with bug fixes and enhancements. If you are using HARP 2.2.3 or earlier, we recommend that you upgrade to HARP 2.3.0. + +!!! Note + This version is required for EDB Postgres Advanced Server versions 12.15, 13.11, 14.8, and later. + + + Component | Version | Type | Description + --------- | ------- | --------------- | ------------------------------------------------------------------------------------------------------------------------ + HARP | 2.3.0 | Bug fix | Fix the CAMO lag computation issue - (BDR-3341). + HARP | 2.3.0 | Bug fix | Fix the Etcd TLS issue when only `ssl_ca_file` is set - (BDR-3582). + HARP | 2.3.0 | Feature | Add HTTP(S) health check probes for HARP. + diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1_rel_notes.mdx index a33c33d812d..66501aeccda 100644 --- a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1_rel_notes.mdx +++ b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1_rel_notes.mdx @@ -24,8 +24,5 @@ EDB Postgres Distributed version 4.3.1 is a minor release of EDB Postgres Distri | BDR | 4.3.1 | Bug fix | Reload configuration for the pglogical receiver. Reload and apply the configuration changes when the server receives reload signal. | | BDR | 4.3.1 | Enhancement | Avoid restarting sync workers. This enhancement is to prevent the node join from failing when config changes are made that signal the restart of subscription workers. | | CLI | 1.1.1 | Change | Upgraded third-party dependencies to fix Github dependabot alerts. | -| HARP | 2.3.0 | Bug fix | Fix the CAMO lag computation issue - (BDR-3341). | -| HARP | 2.3.0 | Bug fix | Fix the Etcd TLS issue when only `ssl_ca_file` is set - (BDR-3582). | -| HARP | 2.3.0 | Feature | Add HTTP(S) health check probes for HARP. | | HARP | 2.2.3 | Bug fix | Updated consensus check to use `bdr.get_raft_status` instead of `bdr.monitor_group_raft`. | | Utilities | 1.1.0 | Bug fix | Fixed handle uninitialized physical replication slots issue. | diff --git a/product_docs/docs/pgd/5/architectures.mdx b/product_docs/docs/pgd/5/architectures.mdx index fbba16434f9..4980c1e1b3e 100644 --- a/product_docs/docs/pgd/5/architectures.mdx +++ b/product_docs/docs/pgd/5/architectures.mdx @@ -141,3 +141,18 @@ Use these criteria to help you to select the appropriate Always On architecture. | License cost | 2 or 3 PGD data nodes | 4 or 6  PGD data nodes | 4 or 6 PGD data nodes | 6+ PGD data nodes | + +## Adding flexibility to the standard architectures + +The single location architecture can be deployed in as many locations as desired to provide the data resiliency needed and proximity to applications and users maintaining the data. While EDB Postgres Distributed has a variety of conflict handling approaches available, in general care should be taken to minimize the number of expected collisions if allowing write activity from geographically disparate locations. + +The standard architectures can also be expanded with two additional types of nodes: + +• Subscriber only nodes + +• Logical standbys + +**Subscriber only nodes** can be used to achieve additional read scalability and to have data closer to users when the majority of an application’s workload is read intensive with infrequent writes. They can also be leveraged to publish a subset of the data for reporting, archiving, and analytic needs. + +**Logical standbys** receive replicated data from another node in the PGD cluster but do not participate in the replication mesh or consensus. They contain all the same data as the other PGD data nodes, and can quickly be promoted to a master if one of the data nodes fails to return the cluster to full capacity/consensus. They can be used in environments where network traffic between data centers is a concern; otherwise 3 PGD data nodes per location is always preferred. + diff --git a/product_docs/docs/pgd/5/consistency/column-level-conflicts.mdx b/product_docs/docs/pgd/5/consistency/column-level-conflicts.mdx index 9860ad521ed..08274942963 100644 --- a/product_docs/docs/pgd/5/consistency/column-level-conflicts.mdx +++ b/product_docs/docs/pgd/5/consistency/column-level-conflicts.mdx @@ -190,32 +190,32 @@ By default, column-level conflict resolution picks the value with a higher times ## Notes -- The attributes modified by an `UPDATE` are determined by comparing the old and new row in a trigger. This means that if the attribute doesn't change a value, it isn't detected as modified even if it's explicitly set. For example, `UPDATE t SET a = a` doesn't mark `a` as modified for any row. Similarly, `UPDATE t SET a = 1` doesn't mark `a` as modified for rows that are already set to `1`. +- The attributes modified by an `UPDATE` are determined by comparing the old and new row in a trigger. This means that if the attribute doesn't change a value, it isn't detected as modified even if it's explicitly set. For example, `UPDATE t SET a = a` doesn't mark `a` as modified for any row. Similarly, `UPDATE t SET a = 1` doesn't mark `a` as modified for rows that are already set to `1`. -- For `INSERT` statements, there's no old row to compare the new one to, so all attributes are considered to be modified, and they are assigned a new timestamp. This condition applies even for columns that weren't included in the `INSERT` statement and received default values. PGD can detect the attributes that have a default value but can't know if it was included automatically or specified explicitly. +- For `INSERT` statements, there's no old row to compare the new one to, so all attributes are considered to be modified, and they are assigned a new timestamp. This condition applies even for columns that weren't included in the `INSERT` statement and received default values. PGD can detect the attributes that have a default value but can't know if it was included automatically or specified explicitly. - This situation effectively means column-level conflict resolution doesn't work for `INSERT-INSERT` conflicts even if the `INSERT` statements specify different subsets of columns. The newer row has timestamps that are all newer than the older row. + This situation effectively means column-level conflict resolution doesn't work for `INSERT-INSERT` conflicts even if the `INSERT` statements specify different subsets of columns. The newer row has timestamps that are all newer than the older row. -- By treating the columns independently, it's easy to violate constraints in a way that isn't possible when all changes happen on the same node. Consider, for example, a table like this: +- By treating the columns independently, it's easy to violate constraints in a way that isn't possible when all changes happen on the same node. Consider, for example, a table like this: -```sql -CREATE TABLE t (id INT PRIMARY KEY, a INT, b INT, CHECK (a > b)); -INSERT INTO t VALUES (1, 1000, 1); -``` + ```sql + CREATE TABLE t (id INT PRIMARY KEY, a INT, b INT, CHECK (a > b)); + INSERT INTO t VALUES (1, 1000, 1); + ``` - Assume one node does: + Assume one node does: -```sql -UPDATE t SET a = 100; -``` + ```sql + UPDATE t SET a = 100; + ``` - Another node concurrently does: + Another node concurrently does: -```sql -UPDATE t SET b = 500; -``` + ```sql + UPDATE t SET b = 500; + ``` - Each of those updates is valid when executed on the initial row and so passes on each node. But when replicating to the other node, the resulting row violates the `CHECK (A > b)` constraint, and the replication stops until the issue is resolved manually. + Each of those updates is valid when executed on the initial row and so passes on each node. But when replicating to the other node, the resulting row violates the `CHECK (A > b)` constraint, and the replication stops until the issue is resolved manually. - The column storing timestamp mapping is managed automatically. Don't specify or override the value in your queries, as the results can be unpredictable. (The value is ignored where possible.) @@ -228,3 +228,4 @@ UPDATE t SET b = 500; ```sql SELECT bdr.alter_node_group_config('group', ignore_redundant_updates := false); ``` + diff --git a/product_docs/docs/pgd/5/durability/lag-control.mdx b/product_docs/docs/pgd/5/durability/lag-control.mdx index 5e486737936..73301d424d8 100644 --- a/product_docs/docs/pgd/5/durability/lag-control.mdx +++ b/product_docs/docs/pgd/5/durability/lag-control.mdx @@ -49,15 +49,22 @@ PGD commit-delay time. To get started using Lag Control: -- Determine the maximum acceptable commit delay time `max_commit_delay` that all database applications can tolerate. +- Determine the maximum acceptable commit delay time `max_commit_delay` that + all database applications can tolerate. -- Decide on the lag measure to use. Choose either lag size `max_lag_size` or lag time `max_lag_time`. +- Decide on the lag measure to use. Choose either lag size `max_lag_size` or + lag time `max_lag_time`. -- Decide on the groups or subgroups involved and the minimum number of nodes in each collection required to satisfy confirmation. This information forms the basis for the definition of a commit scope rule. +- Decide on the groups or subgroups involved and the minimum number of nodes + in each collection required to satisfy confirmation. This information forms + the basis for the definition of a commit scope rule. ## Configuration -You specify Lag Control in a commit scope, which allows consistent and coordinated parameter settings across the nodes spanned by the commmit scope rule. You can include a Lag Control specification in the default commit scope of a top group or as part of an origin Group Commit scope. +You specify lag control in a commit scope, which allows consistent and +coordinated parameter settings across the nodes spanned by the commit scope +rule. You can include a lag control specification in the default commit scope of +a top group or as part of an origin group commit scope. Using the sample node groups from [Commit scope](commit-scopes), this example shows Lag Control rules for two data centers: @@ -78,11 +85,15 @@ SELECT bdr.add_commit_scope( ); ``` -The parameter values admit unit specification that's compatible with GUC parameter conventions. +The parameter values admit unit specification that's compatible with GUC +parameter conventions. -You can add a Lag Control commit scope rule to existing commit scope rules that also include Group Commit and CAMO rule specifications. +You can add a lag control commit scope rule to existing commit scope rules that +also include group commit and CAMO rule specifications. -The `max_commit_delay` parameter permits and encourages a specification of milliseconds with a fractional part, including a submillisecond setting, if appropriate. +The `max_commit_delay` parameter permits and encourages a specification of +milliseconds with a fractional part, including a submillisecond setting, if +appropriate. ## Overview @@ -122,8 +133,8 @@ lowest duration possible to maintain a lag measure threshold. !!! Note Don't conflate the PGD commit delay with the Postgres -commit delay. They are unrelated and perform different functions. -Don't substitute one for the other. +commit delay. They are unrelated and perform different functions. Don't +substitute one for the other. !!! ## Transaction application diff --git a/product_docs/docs/postgres_for_kubernetes/1/api_reference.mdx b/product_docs/docs/postgres_for_kubernetes/1/api_reference.mdx index c3196003622..697665045f8 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/api_reference.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/api_reference.mdx @@ -140,7 +140,7 @@ Backup is the Schema for the backups API | Name | Description | Type | | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ | -| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#objectmeta-v1-meta) | +| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) | | `spec ` | Specification of the desired behavior of the backup. More info: | [BackupSpec](#BackupSpec) | | `status ` | Most recently observed status of the backup. This data may not be up to date. Populated by the system. Read-only. More info: | [BackupStatus](#BackupStatus) | @@ -164,7 +164,7 @@ BackupList contains a list of Backup | Name | Description | Type | | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------- | -| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#listmeta-v1-meta) | +| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#listmeta-v1-meta) | | `items ` | List of backups - *mandatory* | [\[\]Backup](#Backup) | @@ -204,8 +204,8 @@ BackupStatus defines the observed state of Backup | `backupId ` | The ID of the Barman backup | string | | `backupName ` | The Name of the Barman backup | string | | `phase ` | The last backup status | BackupPhase | -| `startedAt ` | When the backup was started | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#time-v1-meta) | -| `stoppedAt ` | When the backup was terminated | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#time-v1-meta) | +| `startedAt ` | When the backup was started | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) | +| `stoppedAt ` | When the backup was terminated | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) | | `beginWal ` | The starting WAL | string | | `endWal ` | The ending WAL | string | | `beginLSN ` | The starting xlog | string | @@ -341,7 +341,7 @@ Cluster is the Schema for the PostgreSQL API | Name | Description | Type | | ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ | -| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#objectmeta-v1-meta) | +| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) | | `spec ` | Specification of the desired behavior of the cluster. More info: | [ClusterSpec](#ClusterSpec) | | `status ` | Most recently observed status of the cluster. This data may not be up to date. Populated by the system. Read-only. More info: | [ClusterStatus](#ClusterStatus) | @@ -353,7 +353,7 @@ ClusterList contains a list of Cluster | Name | Description | Type | | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------- | -| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#listmeta-v1-meta) | +| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#listmeta-v1-meta) | | `items ` | List of clusters - *mandatory* | [\[\]Cluster](#Cluster) | @@ -391,13 +391,14 @@ ClusterSpec defines the desired state of Cluster | `failoverDelay ` | The amount of time (in seconds) to wait before triggering a failover after the primary PostgreSQL instance in the cluster was detected to be unhealthy | int32 | | `affinity ` | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#AffinityConfiguration) | | `topologySpreadConstraints` | TopologySpreadConstraints specifies how to spread matching pods among the given topology. More info: | \[]corev1.TopologySpreadConstraint | -| `resources ` | Resources requirements of every generated Pod. Please refer to for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#resourcerequirements-v1-core) | +| `resources ` | Resources requirements of every generated Pod. Please refer to for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#resourcerequirements-v1-core) | +| `priorityClassName ` | Name of the priority class which will be used in every generated Pod, if the PriorityClass specified does not exist, the pod will not be able to schedule. Please refer to for more information | string | | `primaryUpdateStrategy ` | Deployment strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy | | `primaryUpdateMethod ` | Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be with a switchover (`switchover`) or in-place (`restart` - default) | PrimaryUpdateMethod | | `backup ` | The configuration to be used for backups | [\*BackupConfiguration](#BackupConfiguration) | | `nodeMaintenanceWindow ` | Define a maintenance window for the Kubernetes nodes | [\*NodeMaintenanceWindow](#NodeMaintenanceWindow) | | `licenseKey ` | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string | -| `licenseKeySecret ` | The reference to the license key. When this is set it take precedence over LicenseKey. | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretkeyselector-v1-core) | +| `licenseKeySecret ` | The reference to the license key. When this is set it take precedence over LicenseKey. | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) | | `monitoring ` | The configuration of the monitoring infrastructure of this cluster | [\*MonitoringConfiguration](#MonitoringConfiguration) | | `externalClusters ` | The list of external clusters which are used in the configuration | [\[\]ExternalCluster](#ExternalCluster) | | `logLevel ` | The instances' log level, one of the following values: error, warning, info (default), debug, trace | string | @@ -530,10 +531,10 @@ ExternalCluster represents the connection parameters to an external cluster whic | ---------------------- | ------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | | `name ` | The server name, required - *mandatory* | string | | `connectionParameters` | The list of connection parameters, such as dbname, host, username, etc | map[string]string | -| `sslCert ` | The reference to an SSL certificate to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretkeyselector-v1-core) | -| `sslKey ` | The reference to an SSL private key to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretkeyselector-v1-core) | -| `sslRootCert ` | The reference to an SSL CA public key to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretkeyselector-v1-core) | -| `password ` | The reference to the password to be used to connect to the server | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretkeyselector-v1-core) | +| `sslCert ` | The reference to an SSL certificate to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) | +| `sslKey ` | The reference to an SSL private key to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) | +| `sslRootCert ` | The reference to an SSL CA public key to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) | +| `password ` | The reference to the password to be used to connect to the server | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) | | `barmanObjectStore ` | The configuration for the barman-cloud tool suite | [\*BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration) | @@ -615,7 +616,7 @@ LDAPBindSearchAuth provides the required fields to use the bind+search LDAP auth | ----------------- | -------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | | `baseDN ` | Root DN to begin the user search | string | | `bindDN ` | DN of the user to bind to the directory | string | -| `bindPassword ` | Secret with the password for the user to bind to the directory | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretkeyselector-v1-core) | +| `bindPassword ` | Secret with the password for the user to bind to the directory | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) | | `searchAttribute` | Attribute to match against the username | string | | `searchFilter ` | Search filter to use when doing the search+bind authentication | string | @@ -772,7 +773,7 @@ Pooler is the Schema for the poolers API | Name | Description | Type | | ---------- | ----------- | ------------------------------------------------------------------------------------------------------------ | -| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#objectmeta-v1-meta) | +| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) | | `spec ` | | [PoolerSpec](#PoolerSpec) | | `status ` | | [PoolerStatus](#PoolerStatus) | @@ -794,7 +795,7 @@ PoolerList contains a list of Pooler | Name | Description | Type | | ---------- | ------------- | -------------------------------------------------------------------------------------------------------- | -| `metadata` | | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#listmeta-v1-meta) | +| `metadata` | | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#listmeta-v1-meta) | | `items ` | - *mandatory* | [\[\]Pooler](#Pooler) | @@ -949,7 +950,7 @@ The defaults of the CREATE ROLE command are applied Reference: @@ -961,7 +962,7 @@ RollingUpdateStatus contains the information about an instance which is being up | Name | Description | Type | | ----------- | ------------------------------------------------- | ------------------------------------------------------------------------------------------------ | | `imageName` | The image which we put into the Pod - *mandatory* | string | -| `startedAt` | When the update has been started | [metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#time-v1-meta) | +| `startedAt` | When the update has been started | [metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) | @@ -989,7 +990,7 @@ ScheduledBackup is the Schema for the scheduledbackups API | Name | Description | Type | | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ | -| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#objectmeta-v1-meta) | +| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) | | `spec ` | Specification of the desired behavior of the ScheduledBackup. More info: | [ScheduledBackupSpec](#ScheduledBackupSpec) | | `status ` | Most recently observed status of the ScheduledBackup. This data may not be up to date. Populated by the system. Read-only. More info: | [ScheduledBackupStatus](#ScheduledBackupStatus) | @@ -1001,7 +1002,7 @@ ScheduledBackupList contains a list of ScheduledBackup | Name | Description | Type | | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------- | -| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#listmeta-v1-meta) | +| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#listmeta-v1-meta) | | `items ` | List of clusters - *mandatory* | [\[\]ScheduledBackup](#ScheduledBackup) | @@ -1027,9 +1028,9 @@ ScheduledBackupStatus defines the observed state of ScheduledBackup | Name | Description | Type | | ------------------ | -------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `lastCheckTime ` | The latest time the schedule | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#time-v1-meta) | -| `lastScheduleTime` | Information when was the last time that backup was successfully scheduled. | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#time-v1-meta) | -| `nextScheduleTime` | Next time we will run a backup | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#time-v1-meta) | +| `lastCheckTime ` | The latest time the schedule | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) | +| `lastScheduleTime` | Information when was the last time that backup was successfully scheduled. | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) | +| `nextScheduleTime` | Next time we will run a backup | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) | @@ -1092,7 +1093,7 @@ StorageConfiguration is the configuration of the storage of the PostgreSQL insta | `storageClass ` | StorageClass to use for database data (`PGDATA`). Applied after evaluating the PVC template, if available. If not specified, generated PVCs will be satisfied by the default storage class | \*string | | `size ` | Size of the storage. Required if not already specified in the PVC template. Changes to this field are automatically reapplied to the created PVCs. Size cannot be decreased. | string | | `resizeInUseVolumes` | Resize existent PVCs, defaults to true | \*bool | -| `pvcTemplate ` | Template to be used to generate the Persistent Volume Claim | [\*corev1.PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaim-v1-core) | +| `pvcTemplate ` | Template to be used to generate the Persistent Volume Claim | [\*corev1.PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#persistentvolumeclaim-v1-core) | @@ -1118,10 +1119,10 @@ TDEConfiguration contains the Transparent Data Encryption configuration | Name | Description | Type | | ------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | | `enabled ` | True if we want to have TDE enabled | bool | -| `secretKeyRef ` | Reference to the secret that contains the encryption key | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretkeyselector-v1-core) | -| `wrapCommand ` | WrapCommand is the encrypt command provided by the user | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretkeyselector-v1-core) | -| `unwrapCommand ` | UnwrapCommand is the decryption command provided by the user | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretkeyselector-v1-core) | -| `passphraseCommand` | PassphraseCommand is the command executed to get the passphrase that will be passed to the OpenSSL command to encrypt and decrypt | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretkeyselector-v1-core) | +| `secretKeyRef ` | Reference to the secret that contains the encryption key | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) | +| `wrapCommand ` | WrapCommand is the encrypt command provided by the user | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) | +| `unwrapCommand ` | UnwrapCommand is the decryption command provided by the user | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) | +| `passphraseCommand` | PassphraseCommand is the command executed to get the passphrase that will be passed to the OpenSSL command to encrypt and decrypt | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) | @@ -1129,10 +1130,11 @@ TDEConfiguration contains the Transparent Data Encryption configuration Topology contains the cluster topology -| Name | Description | Type | -| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------- | -| `successfullyExtracted` | SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors in synchronous replica election in case of failures | bool | -| `instances ` | Instances contains the pod topology of the instances | map[PodName]PodTopologyLabels | +| Name | Description | Type | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------- | +| `successfullyExtracted` | SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors in synchronous replica election in case of failures | bool | +| `instances ` | Instances contains the pod topology of the instances | map[PodName]PodTopologyLabels | +| `nodesUsed ` | NodesUsed represents the count of distinct nodes accommodating the instances. A value of '1' suggests that all instances are hosted on a single node, implying the absence of High Availability (HA). Ideally, this value should be the same as the number of instances in the Postgres HA cluster, implying shared nothing architecture on the compute side. | int32 | diff --git a/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx index a06cc2d723f..d67cd01cddd 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx @@ -8,47 +8,140 @@ specific to Kubernetes and PostgreSQL. ## Kubernetes terminology -| Resource | Description | -| ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) | A *node* is a worker machine in Kubernetes, either virtual or physical, where all services necessary to run pods are managed by the control plane node(s). | -| [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) | A *pod* is the smallest computing unit that can be deployed in a Kubernetes cluster and is composed of one or more containers that share network and storage. | -| [Service](https://kubernetes.io/docs/concepts/services-networking/service/) | A *service* is an abstraction that exposes as a network service an application that runs on a group of pods and standardizes important features such as service discovery across applications, load balancing, failover, and so on. | -| [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) | A *secret* is an object that is designed to store small amounts of sensitive data such as passwords, access keys, or tokens, and use them in pods. | -| [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/) | A *storage class* allows an administrator to define the classes of storage in a cluster, including provisioner (such as AWS EBS), reclaim policies, mount options, volume expansion, and so on. | -| [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) | A *persistent volume* (PV) is a resource in a Kubernetes cluster that represents storage that has been either manually provisioned by an administrator or dynamically provisioned by a *storage class* controller. A PV is associated with a pod using a *persistent volume claim* and its lifecycle is independent of any pod that uses it. Normally, a PV is a network volume, especially in the public cloud. A [*local persistent volume* (LPV)](https://kubernetes.io/docs/concepts/storage/volumes/#local) is a persistent volume that exists only on the particular node where the pod that uses it is running. | -| [Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) | A *persistent volume claim* (PVC) represents a request for storage, which might include size, access mode, or a particular storage class. Similar to how a pod consumes node resources, a PVC consumes the resources of a PV. | -| [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) | A *namespace* is a logical and isolated subset of a Kubernetes cluster and can be seen as a *virtual cluster* within the wider physical cluster. Namespaces allow administrators to create separated environments based on projects, departments, teams, and so on. | -| [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) | *Role Based Access Control* (RBAC), also known as *role-based security*, is a method used in computer systems security to restrict access to the network and resources of a system to authorized users only. Kubernetes has a native API to control roles at the namespace and cluster level and associate them with specific resources and individuals. | -| [CRD](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) | A *custom resource definition* (CRD) is an extension of the Kubernetes API and allows developers to create new data types and objects, *called custom resources*. | -| [Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) | An *operator* is a custom resource that automates those steps that are normally performed by a human operator when managing one or more applications or given services. An operator assists Kubernetes in making sure that the resource's defined state always matches the observed one. | -| [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) | `kubectl` is the command-line tool used to manage a Kubernetes cluster. | +[Node](https://kubernetes.io/docs/concepts/architecture/nodes/) +: A *node* is a worker machine in Kubernetes, either virtual or physical, where + all services necessary to run pods are managed by the control plane node(s). + +[Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) +: A *pod* is the smallest computing unit that can be deployed in a Kubernetes + cluster and is composed of one or more containers that share network and + storage. + +[Service](https://kubernetes.io/docs/concepts/services-networking/service/) +: A *service* is an abstraction that exposes as a network service an + application that runs on a group of pods and standardizes important features + such as service discovery across applications, load balancing, failover, and so + on. + +[Secret](https://kubernetes.io/docs/concepts/configuration/secret/) +: A *secret* is an object that is designed to store small amounts of sensitive + data such as passwords, access keys, or tokens, and use them in pods. + +[Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/) +: A *storage class* allows an administrator to define the classes of storage in + a cluster, including provisioner (such as AWS EBS), reclaim policies, mount + options, volume expansion, and so on. + +[Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) +: A *persistent volume* (PV) is a resource in a Kubernetes cluster that + represents storage that has been either manually provisioned by an + administrator or dynamically provisioned by a *storage class* controller. A PV + is associated with a pod using a *persistent volume claim* and its lifecycle is + independent of any pod that uses it. Normally, a PV is a network volume, + especially in the public cloud. A [*local persistent volume* + (LPV)](https://kubernetes.io/docs/concepts/storage/volumes/#local) is a + persistent volume that exists only on the particular node where the pod that + uses it is running. + +[Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) +: A *persistent volume claim* (PVC) represents a request for storage, which + might include size, access mode, or a particular storage class. Similar to how + a pod consumes node resources, a PVC consumes the resources of a PV. + +[Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) +: A *namespace* is a logical and isolated subset of a Kubernetes cluster and + can be seen as a *virtual cluster* within the wider physical cluster. + Namespaces allow administrators to create separated environments based on + projects, departments, teams, and so on. + +[RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) +: *Role Based Access Control* (RBAC), also known as *role-based security*, is a + method used in computer systems security to restrict access to the network and + resources of a system to authorized users only. Kubernetes has a native API to + control roles at the namespace and cluster level and associate them with + specific resources and individuals. + +[CRD](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) +: A *custom resource definition* (CRD) is an extension of the Kubernetes API + and allows developers to create new data types and objects, *called custom + resources*. + +[Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) +: An *operator* is a custom resource that automates those steps that are + normally performed by a human operator when managing one or more applications + or given services. An operator assists Kubernetes in making sure that the + resource's defined state always matches the observed one. + +[`kubectl`](https://kubernetes.io/docs/reference/kubectl/overview/) +: `kubectl` is the command-line tool used to manage a Kubernetes cluster. EDB Postgres for Kubernetes requires a Kubernetes version supported by the community. Please refer to the ["Supported releases"](supported_releases.md) page for details. ## PostgreSQL terminology -| Resource | Description | -| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Instance | A Postgres server process running and listening on a pair "IP address(es)" and "TCP port" (usually 5432). | -| Primary | A PostgreSQL instance that can accept both read and write operations. | -| Replica | A PostgreSQL instance replicating from the only primary instance in a cluster and is kept updated by reading a stream of Write-Ahead Log (WAL) records. A replica is also known as *standby* or *secondary* server. PostgreSQL relies on physical streaming replication (async/sync) and file-based log shipping (async). | -| Hot Standby | PostgreSQL feature that allows a *replica* to accept read-only workloads. | -| Cluster | To be intended as High Availability (HA) Cluster: a set of PostgreSQL instances made up by a single primary and an optional arbitrary number of replicas. | -| Replica Cluster | A EDB Postgres for Kubernetes `Cluster` that is in continuous recovery mode from a selected PostgreSQL cluster, normally residing outside the Kubernetes cluster. It is a feature that enables multi-cluster deployments in private, public, hybrid, and multi-cloud contexts. | -| Designated Primary | A PostgreSQL standby instance in a replica cluster that is in continuous recovery from another PostgreSQL cluster and that is designated to become primary in case the replica cluster becomes primary. | -| Superuser | In PostgreSQL a *superuser* is any role with both `LOGIN` and `SUPERUSER` privileges. For security reasons, EDB Postgres for Kubernetes performs administrative tasks by connecting to the `postgres` database as the `postgres` user via `peer` authentication over the local Unix Domain Socket. | -| [WAL](https://www.postgresql.org/docs/current/wal-intro.html) | Write-Ahead Logging (WAL) is a standard method for ensuring data integrity in database management systems. | -| PVC group | A PVC group in EDB Postgres for Kubernetes' terminology is a group of related PVCs belonging to the same PostgreSQL instance, namely the main volume containing the PGDATA (`storage`) and the volume for WALs (`walStorage`). | +Instance +: A Postgres server process running and listening on a pair "IP address(es)" + and "TCP port" (usually 5432). + +Primary +: A PostgreSQL instance that can accept both read and write operations. + +Replica +: A PostgreSQL instance replicating from the only primary instance in a + cluster and is kept updated by reading a stream of Write-Ahead Log (WAL) + records. A replica is also known as *standby* or *secondary* server. PostgreSQL + relies on physical streaming replication (async/sync) and file-based log + shipping (async). + +Hot Standby +: PostgreSQL feature that allows a *replica* to accept read-only workloads. + +Cluster +: To be intended as High Availability (HA) Cluster: a set of PostgreSQL + instances made up by a single primary and an optional arbitrary number of + replicas. + +Replica Cluster +: A EDB Postgres for Kubernetes `Cluster` that is in continuous recovery mode from a selected + PostgreSQL cluster, normally residing outside the Kubernetes cluster. It is a + feature that enables multi-cluster deployments in private, public, hybrid, and + multi-cloud contexts. + +Designated Primary +: A PostgreSQL standby instance in a replica cluster that is in continuous + recovery from another PostgreSQL cluster and that is designated to become + primary in case the replica cluster becomes primary. + +Superuser +: In PostgreSQL a *superuser* is any role with both `LOGIN` and `SUPERUSER` + privileges. For security reasons, EDB Postgres for Kubernetes performs administrative tasks + by connecting to the `postgres` database as the `postgres` user via `peer` + authentication over the local Unix Domain Socket. + +[WAL](https://www.postgresql.org/docs/current/wal-intro.html) +: Write-Ahead Logging (WAL) is a standard method for ensuring data integrity in + database management systems. + +PVC group +: A PVC group in EDB Postgres for Kubernetes' terminology is a group of related PVCs + belonging to the same PostgreSQL instance, namely the main volume containing + the PGDATA (`storage`) and the volume for WALs (`walStorage`).| ## Cloud terminology -| Resource | Description | -| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Region | A *region* in the Cloud is an isolated and independent geographic area organized in *availability zones*. Zones within a region have very little round-trip network latency. | -| Zone | An *availability zone* in the Cloud (also known as *zone*) is an area in a region where resources can be deployed. Usually, an availability zone corresponds to a data center or an isolated building of the same data center. | +Region +: A *region* in the Cloud is an isolated and independent geographic area + organized in *availability zones*. Zones within a region have very little + round-trip network latency. + +Zone +: An *availability zone* in the Cloud (also known as *zone*) is an area in a + region where resources can be deployed. Usually, an availability zone + corresponds to a data center or an isolated building of the same data center. ## What to do next Now that you have familiarized with the terminology, you can decide to -[test EDB Postgres for Kubernetes on your laptop using a local cluster](quickstart.md) before deploying the operator in your selected cloud environment. \ No newline at end of file +[test EDB Postgres for Kubernetes on your laptop using a local cluster](quickstart.md) before +deploying the operator in your selected cloud environment. \ No newline at end of file diff --git a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx index 5730fe216fe..5eb2bba42c0 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx @@ -552,7 +552,7 @@ bootstrap: The `kubectl cnp snapshot` command is able to take consistent snapshots of a replica through a technique known as *cold backup*, by fencing the standby before taking a physical copy of the volumes. For details, please refer to -["Snapshotting a Postgres cluster"](#snapshotting-a-postgres-cluster). +["Snapshotting a Postgres cluster"](kubectl-plugin/#snapshotting-a-postgres-cluster). #### Additional considerations diff --git a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx index ff41b2c2ab5..63521eb0afc 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx @@ -584,7 +584,7 @@ service defined in the `Pooler`. features to reduce the perceived downtime by client applications. At the moment, you can achieve the same results by setting the `paused` attribute to `true`, then issuing the switchover command through the - [`cnp` plugin](cnp-plugin.md#promote), and finally restoring the `paused` + [`cnp` plugin](kubectl-plugin.md#promote), and finally restoring the `paused` attribute to `false`. ## Limitations diff --git a/product_docs/docs/postgres_for_kubernetes/1/css/override.css b/product_docs/docs/postgres_for_kubernetes/1/css/override.css new file mode 100644 index 00000000000..f7389b7b398 --- /dev/null +++ b/product_docs/docs/postgres_for_kubernetes/1/css/override.css @@ -0,0 +1,3 @@ +.wy-table-responsive table td, .wy-table-responsive table th { + white-space: normal; +} diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx index bb0aa5da19f..bd6906061e8 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx @@ -17,7 +17,7 @@ database Pods, while keeping the database PVCs. !!! Note Declarative hibernation is different from the existing implementation - of [imperative hibernation via the `cnp` plugin](cnp-plugin.md#cluster-hibernation). + of [imperative hibernation via the `cnp` plugin](kubectl-plugin.md#cluster-hibernation). Imperative hibernation shuts down all Postgres instances in the High Availability cluster, and keeps a static copy of the PVCs of the primary that contain `PGDATA` and WALs. The plugin enables to exit the hibernation phase, by diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx index 4b1dffba10a..44b34bd9470 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx @@ -123,15 +123,44 @@ NOTE: it is considered an error to set both `passwordSecret` and `disablePassword` on a given role. This configuration will be rejected by the validation webhook. +### Password expiry, `VALID UNTIL` + +The `VALID UNTIL` role attribute in PostgreSQL controls password expiry. Roles +created without `VALID UNTIL` specified get NULL by default in PostgreSQL, +meaning that their password will never expire. + +PostgreSQL uses a timestamp type for `VALID UNTIL`, which includes support for +the value `'infinity'` indicating that the password never expires. Please see the +[PostgreSQL documentation](https://www.postgresql.org/docs/current/datatype-datetime.html) +for reference. + +With declarative role management, the `validUntil` attribute for managed roles +controls password expiry. `validUntil` can only take: + +- a Kubernetes timestamp, or +- be omitted (defaulting to `null`) + +In the first case, the given `validUntil` timestamp will be set in the database +as the `VALID UNTIL` attribute of the role. + +In the second case (omitted `validUntil`) the operator will ensure password +never expires, mirroring the behavior of PostgreSQL. Specifically: + +- in case of new role, it will omit the `VALID UNTIL` clause in the role + creation statement +- in case of existing role, it will set `VALID UNTIL` to `infinity` if `VALID + UNTIL` was not set to `NULL` in the database (this is due to PostgreSQL not + allowing `VALID UNTIL NULL` in the `ALTER ROLE` SQL statement) + !!! Warning The declarative role management feature has changed behavior since its initial version (1.20.0). In 1.20.0, a role without a `passwordSecret` would lead to setting the password to NULL in PostgreSQL. In practice there is little difference from 1.20.0. - New roles created without `passwordSecret` will have a NULL password. + New roles created without `passwordSecret` will have a `NULL` password. The relevant change is when using the managed roles to manage roles that had been previously created. In 1.20.0, doing this might inadvertently - result in setting existing passwords to NULL. + result in setting existing passwords to `NULL`. ## Unrealizable role configurations diff --git a/product_docs/docs/postgres_for_kubernetes/1/expose_pg_services.mdx b/product_docs/docs/postgres_for_kubernetes/1/expose_pg_services.mdx index 77abadb4e71..67e9b7ef208 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/expose_pg_services.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/expose_pg_services.mdx @@ -110,14 +110,6 @@ On Minikube you can setup the ingress controller running: minikube addons enable ingress ``` -Then, patch the `tcp-service` ConfigMap to redirect to the primary the -connections on port 5432 of the Ingress: - -```sh -kubectl patch configmap tcp-services -n kube-system \ - --patch '{"data":{"5432":"default/cluster-example-rw:5432"}}' -``` - You can then patch the deployment to allow access on port 5432. Create a file called `patch.yaml` with the following content: @@ -126,16 +118,16 @@ spec: template: spec: containers: - - name: nginx-ingress-controller + - name: controller ports: - containerPort: 5432 hostPort: 5432 ``` -and apply it to the `nginx-ingress-controller deployment`: +and apply it to the `ingress-nginx-controller` deployment: ```sh -kubectl patch deployment nginx-ingress-controller --patch "$(cat patch.yaml)" -n kube-system +kubectl patch deployment ingress-nginx-controller --patch "$(cat patch.yaml)" -n ingress-nginx ``` You can access the primary from your machine running: diff --git a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx index 48b10ca46f3..1c9f2646b15 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx @@ -80,16 +80,24 @@ defined by Ibryam and Huß in > Principles, Patterns, Tools to automate containerized microservices at scale - +Please read the ["Architecture: Synchronizing the state"](architecture.md#synchronizing-the-state) +section. **Why should I use an operator instead of running PostgreSQL as a container?** @@ -184,12 +192,21 @@ an outage of the operator does not necessarily imply a PostgreSQL database outage; it's like running a database without a DBA or system administrator. - \ No newline at end of file diff --git a/product_docs/docs/postgres_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/index.mdx index 661b9717b0a..9d677c49bb4 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/index.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/index.mdx @@ -67,6 +67,7 @@ full lifecycle of a highly available Postgres database clusters with a primary/standby architecture, using native streaming replication. !!! Note + The operator has been renamed from Cloud Native PostgreSQL. Existing users of Cloud Native PostgreSQL will not experience any change, as the underlying components and resources have not changed. @@ -101,6 +102,7 @@ You can [evaluate EDB Postgres for Kubernetes for free](evaluation.md). You need a valid license key to use EDB Postgres for Kubernetes in production. !!! Note + Based on the [Operator Capability Levels model](operator_capability_levels.md), users can expect a **"Level V - Auto Pilot"** set of capabilities from the EDB Postgres for Kubernetes Operator. @@ -128,6 +130,7 @@ The EDB Postgres for Kubernetes Operator container images support the multi-arch format for the following platforms: `linux/amd64`, `linux/arm64`, `linux/ppc64le`, `linux/s390x`. !!! Warning + EDB Postgres for Kubernetes requires that all nodes in a Kubernetes cluster have the same CPU architecture, thus a hybrid CPU architecture Kubernetes cluster is not supported. Additionally, EDB supports `linux/ppc64le` and `linux/s390x` architectures @@ -153,6 +156,7 @@ In case you are not familiar with some basic terminology on Kubernetes and Postg please consult the ["Before you start" section](before_you_start.md). !!! Note + Although the guide primarily addresses Kubernetes, all concepts can be extended to OpenShift as well. diff --git a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx index 538df71ad09..585ff61179e 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx @@ -47,7 +47,7 @@ kubectl cnp install generate \ > cnp_for_specific_namespace.yaml ``` -Please refer to ["`cnp` plugin"](./cnp-plugin.md#generation-of-installation-manifests) documentation +Please refer to ["`cnp` plugin"](./kubectl-plugin.md#generation-of-installation-manifests) documentation for a more comprehensive example. !!! Warning @@ -57,7 +57,7 @@ for a more comprehensive example. ports, as explained in the official [docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) and by this - [issue](https://github.com/cloudnative-pg/cloudnative-pg/issues/1360). + [issue](https://github.com/EnterpriseDB/cloud-native-postgres/issues/1360). You'll need to either change the `targetPort` in the webhook service, to be one of the allowed ones, or open the webhooks' port (`9443`) on the firewall. diff --git a/product_docs/docs/postgres_for_kubernetes/1/cnp-plugin.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx similarity index 67% rename from product_docs/docs/postgres_for_kubernetes/1/cnp-plugin.mdx rename to product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx index 9e6e9c55257..2f1cace2e97 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/cnp-plugin.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx @@ -1,6 +1,6 @@ --- title: 'EDB Postgres for Kubernetes Plugin' -originalFilePath: 'src/cnp-plugin.md' +originalFilePath: 'src/kubectl-plugin.md' --- EDB Postgres for Kubernetes provides a plugin for `kubectl` to manage a cluster in Kubernetes. @@ -32,7 +32,7 @@ section. In that section are pre-built packages for a variety of systems. As a result, you can follow standard practices and instructions to install them in your systems. -**Debian packages:** +#### Debian packages For example, let's install the 1.18.1 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. @@ -51,7 +51,7 @@ Unpacking cnp (1.18.1) over (1.18.1) ... Setting up cnp (1.18.1) ... ``` -**RPM packages:** +#### RPM packages As in the example for `.deb` packages, let's install the 1.18.1 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. @@ -110,6 +110,50 @@ Once the plugin was installed and deployed, you can start using it like this: kubectl cnp ``` +### Generation of installation manifests + +The `cnp` plugin can be used to generate the YAML manifest for the +installation of the operator. This option would typically be used if you want +to override some default configurations such as number of replicas, +installation namespace, namespaces to watch, and so on. + +For details and available options, run: + +```shell +kubectl cnp install generate --help +``` + +The main options are: + +- `-n`: the namespace in which to install the operator (by default: `postgresql-operator-system`) +- `--replicas`: number of replicas in the deployment +- `--version`: minor version of the operator to be installed, such as `1.17`. + If a minor version is specified, the plugin will install the latest patch + version of that minor version. If no version is supplied the plugin will + install the latest `MAJOR.MINOR.PATCH` version of the operator. +- `--watch-namespace`: comma separated string containing the namespaces to + watch (by default all namespaces) + +An example of the `generate` command, which will generate a YAML manifest that +will install the operator, is as follows: + +```shell +kubectl cnp install generate \ + -n king \ + --version 1.17 \ + --replicas 3 \ + --watch-namespace "albert, bb, freddie" \ + > operator.yaml +``` + +The flags in the above command have the following meaning: + +- `-n king` install the CNP operator into the `king` namespace +- `--version 1.17` install the latest patch version for minor version 1.17 +- `--replicas 3` install the operator with 3 replicas +- `--watch-namespaces "albert, bb, freddie"` have the operator watch for + changes in the `albert`, `bb` and `freddie` namespaces only + ### Status The `status` command provides an overview of the current status of your @@ -143,7 +187,7 @@ Cluster in healthy state Name: sandbox Namespace: default System ID: 7039966298120953877 -PostgreSQL Image: quay.io/enterprisedb/postgresql:14.3 +PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3 Primary instance: sandbox-2 Instances: 3 Ready instances: 3 @@ -188,7 +232,7 @@ Cluster in healthy state Name: sandbox Namespace: default System ID: 7039966298120953877 -PostgreSQL Image: quay.io/enterprisedb/postgresql:14.3 +PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3 Primary instance: sandbox-2 Instances: 3 Ready instances: 3 @@ -431,8 +475,14 @@ The command will generate a ZIP file containing various manifest in YAML format Use the `-f` flag to name a result file explicitly. If the `-f` flag is not used, a default time-stamped filename is created for the zip file. +!!! Note + The report plugin obeys `kubectl` conventions, and will look for objects constrained + by namespace. The CNP Operator will generally not be installed in the same + namespace as the clusters. + E.g. the default installation namespace is postgresql-operator-system + ```shell -kubectl cnp report operator +kubectl cnp report operator -n ``` results in @@ -444,7 +494,7 @@ Successfully written report to "report_operator_.zip" (format: "yaml" With the `-f` flag set: ```shell -kubectl cnp report operator -f reportRedacted.zip +kubectl cnp report operator -n -f reportRedacted.zip ``` Unzipping the file will produce a time-stamped top-level folder to keep the @@ -460,17 +510,46 @@ will result in: Archive: reportRedacted.zip creating: report_operator_/ creating: report_operator_/manifests/ - inflating: report_operator_/manifests/deployment.yaml - inflating: report_operator_/manifests/operator-pod.yaml - inflating: report_operator_/manifests/events.yaml - inflating: report_operator_/manifests/validating-webhook-configuration.yaml - inflating: report_operator_/manifests/mutating-webhook-configuration.yaml - inflating: report_operator_/manifests/webhook-service.yaml + inflating: report_operator_/manifests/deployment.yaml + inflating: report_operator_/manifests/operator-pod.yaml + inflating: report_operator_/manifests/events.yaml + inflating: report_operator_/manifests/validating-webhook-configuration.yaml + inflating: report_operator_/manifests/mutating-webhook-configuration.yaml + inflating: report_operator_/manifests/webhook-service.yaml inflating: report_operator_/manifests/postgresql-operator-ca-secret.yaml inflating: report_operator_/manifests/postgresql-operator-webhook-cert.yaml ``` -You can verify that the confidential information is REDACTED: +If you activated the `--logs` option, you'd see an extra subdirectory: + +```shell +Archive: report_operator_.zip + + creating: report_operator_/operator-logs/ + inflating: report_operator_/operator-logs/postgresql-operator-controller-manager-66fb98dbc5-pxkmh-logs.jsonl +``` + +!!! Note + The plugin will try to get the PREVIOUS operator's logs, which is helpful + when investigating restarted operators. + In all cases, it will also try to get the CURRENT operator logs. If current + and previous logs are available, it will show them both. + +```json +====== Begin of Previous Log ===== +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting EDB Postgres for Kubernetes Operator","version":"1.19.1","build":{"Version":"1.19.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} + + +====== End of Previous Log ===== +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting EDB Postgres for Kubernetes Operator","version":"1.19.1","build":{"Version":"1.19.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} +``` + +If the operator hasn't been restarted, you'll still see the `====== Begin …` +and `====== End …` guards, with no content inside. + +You can verify that the confidential information is REDACTED by default: ```shell cd report_operator_/manifests/ @@ -492,7 +571,7 @@ metadata: With the `-S` (`--stopRedaction`) option activated, secrets are shown: ```shell -kubectl cnp report operator -f reportNonRedacted.zip -S +kubectl cnp report operator -n -f reportNonRedacted.zip -S ``` You'll get a reminder that you're about to view confidential information: @@ -518,50 +597,6 @@ metadata: fieldsType: FieldsV1 ``` -##### OpenShift support - -The `report operator` directive will detect automatically if the cluster is -running on OpenShift, and will get the Cluster Service Version and the -Install Plan, and add them automatically to the zip under the `openshift` -sub-folder. - -!!! Note - the namespace becomes very important on OpenShift. The default namespace - for OpenShift in CNP is "openshift-operators". Many (most) clients will use - a different namespace for the CNP operator. - -```sh -kubectl cnp report operator -n openshift-operators -``` - -results in - -```sh -Successfully written report to "report_operator_.zip" (format: "yaml") -``` - -You can find the OpenShift-related files in the `openshift` sub-folder: - -```shell -unzip report_operator_.zip -cd report_operator_/ -cd openshift -head clusterserviceversions.yaml -``` - -```text -apiVersion: operators.coreos.com/v1alpha1 -items: -- apiVersion: operators.coreos.com/v1alpha1 - kind: ClusterServiceVersion - metadata: - annotations: - alm-examples: |- - [ - { - "apiVersion": "postgresql.k8s.enterprisedb.io/v1", -``` - #### report Cluster The `cluster` sub-command gathers the following: @@ -642,3 +677,258 @@ Archive: report_cluster_example_.zip inflating: report_cluster_example_/job-logs/cluster-example-full-1-initdb-qnnvw.jsonl inflating: report_cluster_example_/job-logs/cluster-example-full-2-join-tvj8r.jsonl ``` + +##### OpenShift support + +The `report operator` directive will detect automatically if the cluster is +running on OpenShift, and will get the Cluster Service Version and the +Install Plan, and add them automatically to the zip under the `openshift` +sub-folder. + +!!! Note + the namespace becomes very important on OpenShift. The default namespace + for OpenShift in CNP is "openshift-operators". Many (most) clients will use + a different namespace for the CNP operator. + +```sh +kubectl cnp report operator -n openshift-operators +``` + +results in + +```sh +Successfully written report to "report_operator_.zip" (format: "yaml") +``` + +You can find the OpenShift-related files in the `openshift` sub-folder: + +```shell +unzip report_operator_.zip +cd report_operator_/ +cd openshift +head clusterserviceversions.yaml +``` + +```text +apiVersion: operators.coreos.com/v1alpha1 +items: +- apiVersion: operators.coreos.com/v1alpha1 + kind: ClusterServiceVersion + metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "postgresql.k8s.enterprisedb.io/v1", +``` + +### Destroy + +The `kubectl cnp destroy` command helps remove an instance and all the +associated PVCs from a Kubernetes cluster. + +The optional `--keep-pvc` flag, if specified, allows you to keep the PVCs, +while removing all `metadata.ownerReferences` that were set by the instance. +Additionally, the `k8s.enterprisedb.io/pvcStatus` label on the PVCs will change from +`ready` to `detached` to signify that they are no longer in use. + +Running again the command without the `--keep-pvc` flag will remove the +detached PVCs. + +Usage: + +``` +kubectl cnp destroy [CLUSTER_NAME] [INSTANCE_ID] +``` + +The following example removes the `cluster-example-2` pod and the associated +PVCs: + +``` +kubectl cnp destroy cluster-example 2 +``` + +### Cluster hibernation + +Sometimes you may want to suspend the execution of a EDB Postgres for Kubernetes `Cluster` +while retaining its data, then resume its activity at a later time. We've +called this feature **cluster hibernation**. + +Hibernation is only available via the `kubectl cnp hibernate [on|off]` +commands. + +Hibernating a EDB Postgres for Kubernetes cluster means destroying all the resources +generated by the cluster, except the PVCs that belong to the PostgreSQL primary +instance. + +You can hibernate a cluster with: + +``` +kubectl cnp hibernate on +``` + +This will: + +1. shutdown every PostgreSQL instance +2. detach the PVCs containing the data of the primary instance, and annotate + them with the latest database status and the latest cluster configuration +3. delete the `Cluster` resource, including every generated resource - except + the aforementioned PVCs + +When hibernated, a EDB Postgres for Kubernetes cluster is represented by just a group of +PVCs, in which the one containing the `PGDATA` is annotated with the latest +available status, including content from `pg_controldata`. + +!!! Warning + A cluster having fenced instances cannot be hibernated, as fencing is + part of the hibernation procedure too. + +In case of error the operator will not be able to revert the procedure. You can +still force the operation with: + +``` +kubectl cnp hibernate on cluster-example --force +``` + +A hibernated cluster can be resumed with: + +``` +kubectl cnp hibernate off +``` + +Once the cluster has been hibernated, it's possible to show the last +configuration and the status that PostgreSQL had after it was shut down. +That can be done with: + +``` +kubectl cnp hibernate status +``` + +### Benchmarking the database with pgbench + +Pgbench can be run against an existing PostgreSQL cluster with following +command: + +``` +kubectl cnp pgbench -- --time 30 --client 1 --jobs 1 +``` + +Refer to the [Benchmarking pgbench section](benchmarking.md#pgbench) for more +details. + +### Benchmarking the storage with fio + +fio can be run on an existing storage class with following command: + +``` +kubectl cnp fio -n +``` + +Refer to the [Benchmarking fio section](benchmarking.md#fio) for more details. + +### Requesting a new base backup + +The `kubectl cnp backup` command requests a new physical base backup for +an existing Postgres cluster by creating a new `Backup` resource. + +The following example requests an on-demand backup for a given cluster: + +```shell +kubectl cnp backup [cluster_name] +``` + +The created backup will be named after the request time: + +```shell +kubectl cnp backup cluster-example +backup/cluster-example-20230121002300 created +``` + +By default, new created backup will use the backup target policy defined +in cluster to choose which instance to run on. You can also use `--backup-target` +option to override this policy. please refer to [Backup and Recovery](backup_recovery.md) +for more information about backup target. + +### Launching psql + +The `kubectl cnp psql` command starts a new PostgreSQL interactive front-end +process (psql) connected to an existing Postgres cluster, as if you were running +it from the actual pod. This means that you will be using the `postgres` user. + +!!! Important + As you will be connecting as `postgres` user, in production environments this + method should be used with extreme care, by authorized personnel only. + +```shell +kubectl cnp psql cluster-example + +psql (15.3 (Debian 15.3-1.pgdg110+1)) +Type "help" for help. + +postgres=# +``` + +By default, the command will connect to the primary instance. The user can +select to work against a replica by using the `--replica` option: + +```shell +kubectl cnp psql --replica cluster-example +psql (15.3 (Debian 15.3-1.pgdg110+1)) + +Type "help" for help. + +postgres=# select pg_is_in_recovery(); + pg_is_in_recovery +------------------- + t +(1 row) + +postgres=# \q +``` + +This command will start `kubectl exec`, and the `kubectl` executable must be +reachable in your `PATH` variable to correctly work. + +### Snapshotting a Postgres cluster + +The `kubectl cnp snapshot` creates consistent snapshots of a Postgres +`Cluster` by: + +1. choosing a replica Pod to work on +2. fencing the replica +3. taking the snapshot +4. unfencing the replica + +!!! Warning + A cluster already having a fenced instance cannot be snapshotted. + +At the moment, this command can be used only for clusters having at least one +replica: that replica will be shut down by the fencing procedure to ensure the +snapshot to be consistent (cold backup). As the development of +declarative support for Kubernetes' `VolumeSnapshot` API continues, +this limitation will be removed, allowing you to take online backups +as business continuity requires. + +!!! Important + Even if the procedure will shut down a replica, the primary + Pod will not be involved. + +The `kubectl cnp snapshot` command requires the cluster name: + +```shell +kubectl cnp snapshot cluster-example + +waiting for cluster-example-3 to be fenced +waiting for VolumeSnapshot cluster-example-3-1682539624 to be ready to use +unfencing pod cluster-example-3 +``` + +The `VolumeSnapshot` resource will be created with an empty +`VolumeSnapshotClass` reference. That resource is intended by be used by the +`VolumeSnapshotClass` configured as default. + +A specific `VolumeSnapshotClass` can be requested via the `-c` option: + +```shell +kubectl cnp snapshot cluster-example -c longhorn +``` \ No newline at end of file diff --git a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx index 01af3d21bef..fb3688fb605 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx @@ -105,8 +105,12 @@ metrics, which can be classified in two major categories: - number of `.ready` and `.done` files in the archive status folder - requested minimum and maximum number of synchronous replicas, as well as the expected and actually observed values + - number of distinct nodes accommodating the instances + - timestamps indicating last failed and last available backup, as well + as the first point of recoverability for the cluster - flag indicating if replica cluster mode is enabled or disabled - flag indicating if a manual switchover is required + - flag indicating if fencing is enabled or disabled - Go runtime related metrics, starting with `go_*` @@ -123,6 +127,14 @@ cnp_collector_collection_duration_seconds{collector="Collect.up"} 0.0031393 # TYPE cnp_collector_collections_total counter cnp_collector_collections_total 2 +# HELP cnp_collector_fencing_on 1 if the instance is fenced, 0 otherwise +# TYPE cnp_collector_fencing_on gauge +cnp_collector_fencing_on 0 + +# HELP cnp_collector_nodes_used NodesUsed represents the count of distinct nodes accommodating the instances. A value of '-1' suggests that the metric is not available. A value of '1' suggests that all instances are hosted on a single node, implying the absence of High Availability (HA). Ideally this value should match the number of instances in the cluster. +# TYPE cnp_collector_nodes_used gauge +cnp_collector_nodes_used 3 + # HELP cnp_collector_last_collection_error 1 if the last collection ended with error, 0 otherwise. # TYPE cnp_collector_last_collection_error gauge cnp_collector_last_collection_error 0 @@ -164,7 +176,15 @@ cnp_collector_up{cluster="cluster-example"} 1 # HELP cnp_collector_postgres_version Postgres version # TYPE cnp_collector_postgres_version gauge -cnp_collector_postgres_version{cluster="cluster-example",full="13.4.0"} 13.4 +cnp_collector_postgres_version{cluster="cluster-example",full="15.3"} 15.3 + +# HELP cnp_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp +# TYPE cnp_collector_last_failed_backup_timestamp gauge +cnp_collector_last_failed_backup_timestamp 0 + +# HELP cnp_collector_last_available_backup_timestamp The last available backup as a unix timestamp +# TYPE cnp_collector_last_available_backup_timestamp gauge +cnp_collector_last_available_backup_timestamp 1.63238406e+09 # HELP cnp_collector_first_recoverability_point The first point of recoverability for the cluster as a unix timestamp # TYPE cnp_collector_first_recoverability_point gauge @@ -175,6 +195,42 @@ cnp_collector_first_recoverability_point 1.63238406e+09 cnp_collector_lo_pages{datname="app"} 0 cnp_collector_lo_pages{datname="postgres"} 78 +# HELP cnp_collector_wal_buffers_full Number of times WAL data was written to disk because WAL buffers became full. Only available on PG 14+ +# TYPE cnp_collector_wal_buffers_full gauge +cnp_collector_wal_buffers_full{stats_reset="2023-06-19T10:51:27.473259Z"} 6472 + +# HELP cnp_collector_wal_bytes Total amount of WAL generated in bytes. Only available on PG 14+ +# TYPE cnp_collector_wal_bytes gauge +cnp_collector_wal_bytes{stats_reset="2023-06-19T10:51:27.473259Z"} 1.0035147e+07 + +# HELP cnp_collector_wal_fpi Total number of WAL full page images generated. Only available on PG 14+ +# TYPE cnp_collector_wal_fpi gauge +cnp_collector_wal_fpi{stats_reset="2023-06-19T10:51:27.473259Z"} 1474 + +# HELP cnp_collector_wal_records Total number of WAL records generated. Only available on PG 14+ +# TYPE cnp_collector_wal_records gauge +cnp_collector_wal_records{stats_reset="2023-06-19T10:51:27.473259Z"} 26178 + +# HELP cnp_collector_wal_sync Number of times WAL files were synced to disk via issue_xlog_fsync request (if fsync is on and wal_sync_method is either fdatasync, fsync or fsync_writethrough, otherwise zero). Only available on PG 14+ +# TYPE cnp_collector_wal_sync gauge +cnp_collector_wal_sync{stats_reset="2023-06-19T10:51:27.473259Z"} 37 + +# HELP cnp_collector_wal_sync_time Total amount of time spent syncing WAL files to disk via issue_xlog_fsync request, in milliseconds (if track_wal_io_timing is enabled, fsync is on, and wal_sync_method is either fdatasync, fsync or fsync_writethrough, otherwise zero). Only available on PG 14+ +# TYPE cnp_collector_wal_sync_time gauge +cnp_collector_wal_sync_time{stats_reset="2023-06-19T10:51:27.473259Z"} 0 + +# HELP cnp_collector_wal_write Number of times WAL buffers were written out to disk via XLogWrite request. Only available on PG 14+ +# TYPE cnp_collector_wal_write gauge +cnp_collector_wal_write{stats_reset="2023-06-19T10:51:27.473259Z"} 7243 + +# HELP cnp_collector_wal_write_time Total amount of time spent writing WAL buffers to disk via XLogWrite request, in milliseconds (if track_wal_io_timing is enabled, otherwise zero). This includes the sync time when wal_sync_method is either open_datasync or open_sync. Only available on PG 14+ +# TYPE cnp_collector_wal_write_time gauge +cnp_collector_wal_write_time{stats_reset="2023-06-19T10:51:27.473259Z"} 0 + +# HELP cnp_last_error 1 if the last collection ended with error, 0 otherwise. +# TYPE cnp_last_error gauge +cnp_last_error 0 + # HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 5.01e-05 @@ -191,7 +247,7 @@ go_goroutines 25 # HELP go_info Information about the Go environment. # TYPE go_info gauge -go_info{version="go1.17.1"} 1 +go_info{version="go1.20.5"} 1 # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge @@ -301,9 +357,8 @@ go_threads 18 field named `full`. !!! Note - `cnp_collector_first_recoverability_point` will be zero until - your first backup to the object store. This is separate from - the WAL archival. + `cnp_collector_first_recoverability_point` and `cnp_collector_last_available_backup_timestamp` + will be zero until your first backup to the object store. This is separate from the WAL archival. ### User defined metrics @@ -387,7 +442,7 @@ data: ``` A list of basic monitoring queries can be found in the -[`default-monitoring.yaml` file](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/config/manager/default-monitoring.yaml) +[`default-monitoring.yaml` file](https://github.com/EnterpriseDB/cloud-native-postgres/blob/main/config/manager/default-monitoring.yaml) that is already installed in your EDB Postgres for Kubernetes deployment (see ["Default set of metrics"](#default-set-of-metrics)). #### Example of a user defined metric running on multiple databases @@ -682,7 +737,8 @@ kubectl delete -f curl.yaml These resources are provided for illustration and experimentation, and do not represent any kind of recommendation for your production system -In the [`samples/monitoring/`](https://github.com/EnterpriseDB/docs/tree/main/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring) directory you will find a series of sample files for observability. +In the [`doc/src/samples/monitoring/`](https://github.com/EnterpriseDB/cloud-native-postgres/tree/main/docs/src/samples/monitoring) +directory you will find a series of sample files for observability. Please refer to [Part 4 of the quickstart](quickstart.md#part-4-monitor-clusters-with-prometheus-and-grafana) section for context: diff --git a/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx b/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx index fb6b3352a2f..8a3cc8edb00 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx @@ -487,7 +487,7 @@ keeping the database PVCs. This feature simulates scaling to 0 instances. ### Hibernation (imperative) -EDB Postgres for Kubernetes supports [hibernation of a running PostgreSQL cluster](cnp-plugin.md#cluster-hibernation) +EDB Postgres for Kubernetes supports [hibernation of a running PostgreSQL cluster](kubectl-plugin.md#cluster-hibernation) via the `cnp` plugin. Hibernation shuts down all Postgres instances in the High Availability cluster, and keeps a static copy of the PVC group of the primary, containing `PGDATA` and WALs. The plugin enables to exit the diff --git a/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx index 7fd90a71f79..2e3f713276e 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx @@ -304,10 +304,11 @@ connect to the primary. For example, suppose that you want to use the `app` database with `pg_failover_slots`, you need to add this entry in the `pg_hba` section: -````yaml +```yaml postgresql: pg_hba: - hostssl app streaming_replica all cert +``` ## The `pg_hba` section @@ -317,10 +318,10 @@ used to create the `pg_hba.conf` used by the pods. Since the first matching rule is used for authentication, the `pg_hba.conf` file generated by the operator can be seen as composed of four sections: -1. Fixed rules -2. User-defined rules -3. Optional LDAP section -4. Default rules +1. Fixed rules +2. User-defined rules +3. Optional LDAP section +4. Default rules Fixed rules: @@ -329,7 +330,7 @@ local all all peer hostssl postgres streaming_replica all cert hostssl replication streaming_replica all cert -```` +``` Default rules: diff --git a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx index b678864609c..35647edd10c 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx @@ -246,7 +246,7 @@ helm repo add prometheus-community \ https://prometheus-community.github.io/helm-charts helm upgrade --install \ - -f https://raw.githubusercontent.com/EnterpriseDB/docs/main/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/kube-stack-config.yaml \ + -f https://raw.githubusercontent.com/EnterpriseDB/cloud-native-postgres/main/docs/src/samples/monitoring/kube-stack-config.yaml \ prometheus-community \ prometheus-community/kube-prometheus-stack ``` diff --git a/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx b/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx index dc5d7efc75b..c6f79ab0085 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx @@ -93,4 +93,4 @@ You can trigger a restart with: kubectl cnp restart [cluster] [current_primary] ``` -You can find more information in the [`cnp` plugin page](cnp-plugin.md). \ No newline at end of file +You can find more information in the [`cnp` plugin page](kubectl-plugin.md). \ No newline at end of file diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-roles.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-roles.yaml index bfdf314fe5d..b98d18622cd 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-roles.yaml +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-roles.yaml @@ -11,6 +11,7 @@ spec: roles: - name: app createdb: true + login: true - name: dante ensure: present comment: my database-side comment diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/alerts.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/alerts.yaml index 07ef94ebc0a..7e1987b1521 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/alerts.yaml +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/alerts.yaml @@ -40,9 +40,9 @@ groups: - alert: LastFailedArchiveTime annotations: description: Archiving failed for {{ $labels.pod }} - summary: Checks the last time archiving failed. Will be -1 when it has not failed. + summary: Checks the last time archiving failed. Will be < 0 when it has not failed. expr: |- - cnp_pg_stat_archiver_last_failed_time > 1 + (cnp_pg_stat_archiver_last_failed_time - cnp_pg_stat_archiver_last_archived_time) > 1 for: 1m labels: severity: warning diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/cnp-prometheusrule.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/cnp-prometheusrule.yaml index 797b1272f41..2eaaec978cd 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/cnp-prometheusrule.yaml +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/cnp-prometheusrule.yaml @@ -45,9 +45,9 @@ spec: - alert: LastFailedArchiveTime annotations: description: Archiving failed for {{ $labels.pod }} - summary: Checks the last time archiving failed. Will be -1 when it has not failed. + summary: Checks the last time archiving failed. Will be < 0 when it has not failed. expr: |- - cnp_pg_stat_archiver_last_failed_time > 1 + (cnp_pg_stat_archiver_last_failed_time - cnp_pg_stat_archiver_last_archived_time) > 1 for: 1m labels: severity: warning diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-configmap.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-configmap.yaml index f9454969b3a..1e14184ae1f 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-configmap.yaml +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-configmap.yaml @@ -919,7 +919,7 @@ data: { "datasource": "${DS_PROMETHEUS}", "editorMode": "code", - "expr": "max(cnp_pg_stat_replication_flush_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-.*\"})", + "expr": "max(cnp_pg_stat_replication_flush_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"})", "legendFormat": "__auto", "range": true, "refId": "A" @@ -987,7 +987,7 @@ data: { "datasource": "${DS_PROMETHEUS}", "editorMode": "code", - "expr": "max(cnp_pg_stat_replication_replay_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-.*\"})", + "expr": "max(cnp_pg_stat_replication_replay_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"})", "legendFormat": "__auto", "range": true, "refId": "A" @@ -4894,7 +4894,7 @@ data: { "datasource": "${DS_PROMETHEUS}", "exemplar": true, - "expr": "cnp_pg_stat_replication_write_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-.*\"}", + "expr": "cnp_pg_stat_replication_write_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}", "instant": false, "interval": "", "legendFormat": "{{pod}} -> {{application_name}}", @@ -4981,7 +4981,7 @@ data: { "datasource": "${DS_PROMETHEUS}", "exemplar": true, - "expr": "cnp_pg_stat_replication_flush_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-.*\"}", + "expr": "cnp_pg_stat_replication_flush_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}", "instant": false, "interval": "", "legendFormat": "{{pod}} -> {{application_name}}", @@ -5069,7 +5069,7 @@ data: { "datasource": "${DS_PROMETHEUS}", "exemplar": true, - "expr": "cnp_pg_stat_replication_replay_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-.*\"}", + "expr": "cnp_pg_stat_replication_replay_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}", "interval": "", "legendFormat": "{{pod}} -> {{application_name}}", "refId": "A" @@ -5740,14 +5740,14 @@ data: "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "definition": "cnp_collector_up{namespace=~\"$namespace\",pod=~\"$cluster-.*\"}", + "definition": "cnp_collector_up{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}", "hide": 0, "includeAll": true, "multi": true, "name": "instances", "options": [], "query": { - "query": "cnp_collector_up{namespace=~\"$namespace\",pod=~\"$cluster-.*\"}", + "query": "cnp_collector_up{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}", "refId": "StandardVariableQuery" }, "refresh": 1, diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-dashboard.json b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-dashboard.json index 6cf641f6675..f389574ea43 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-dashboard.json +++ b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-dashboard.json @@ -1052,7 +1052,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "max(cnp_pg_stat_replication_flush_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-.*\"})", + "expr": "max(cnp_pg_stat_replication_flush_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"})", "legendFormat": "__auto", "range": true, "refId": "A" @@ -1126,7 +1126,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "max(cnp_pg_stat_replication_replay_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-.*\"})", + "expr": "max(cnp_pg_stat_replication_replay_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"})", "legendFormat": "__auto", "range": true, "refId": "A" @@ -5414,7 +5414,7 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "cnp_pg_stat_replication_write_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-.*\"}", + "expr": "cnp_pg_stat_replication_write_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}", "instant": false, "interval": "", "legendFormat": "{{pod}} -> {{application_name}}", @@ -5507,7 +5507,7 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "cnp_pg_stat_replication_flush_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-.*\"}", + "expr": "cnp_pg_stat_replication_flush_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}", "instant": false, "interval": "", "legendFormat": "{{pod}} -> {{application_name}}", @@ -5601,7 +5601,7 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "cnp_pg_stat_replication_replay_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-.*\"}", + "expr": "cnp_pg_stat_replication_replay_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}", "interval": "", "legendFormat": "{{pod}} -> {{application_name}}", "refId": "A" @@ -6292,14 +6292,14 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "definition": "cnp_collector_up{namespace=~\"$namespace\",pod=~\"$cluster-.*\"}", + "definition": "cnp_collector_up{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}", "hide": 0, "includeAll": true, "multi": true, "name": "instances", "options": [], "query": { - "query": "cnp_collector_up{namespace=~\"$namespace\",pod=~\"$cluster-.*\"}", + "query": "cnp_collector_up{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}", "refId": "StandardVariableQuery" }, "refresh": 1, diff --git a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx index 33a0ba3b2e2..ca9bba80492 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx @@ -21,7 +21,7 @@ configuration in the `bootstrap` section). ## Issuing a new certificate !!! Seealso "About CNP plugin for kubectl" - Please refer to the ["Certificates" section in the "EDB Postgres for Kubernetes Plugin"](cnp-plugin.md#certificates) + Please refer to the ["Certificates" section in the "EDB Postgres for Kubernetes Plugin"](kubectl-plugin.md#certificates) page for details on how to use the plugin for `kubectl`. You can create a certificate for the `app` user in the `cluster-example` PostgreSQL cluster as follows: diff --git a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx index 4e4a8f7d8c7..296d88870cc 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx @@ -35,7 +35,7 @@ Make sure you know: On top of the mandatory `kubectl` utility, for troubleshooting, we recommend the following plugins/utilities to be available in your system: -- [`cnp` plugin](cnp-plugin.md) for `kubectl` +- [`cnp` plugin](kubectl-plugin.md) for `kubectl` - [`jq`](https://stedolan.github.io/jq/), a lightweight and flexible command-line JSON processor - [`grep`](https://www.gnu.org/software/grep/), searches one or more input files for lines containing a match to a specified pattern. It is already available in most \*nix distros. @@ -248,7 +248,7 @@ kubectl cnp status -n Manage certificates.
Make a rollout restart cluster to apply configuration changes.
Make a reconciliation loop to reload and apply configuration changes.
- For more information, please see [`cnp` plugin](cnp-plugin.md) documentation. + For more information, please see [`cnp` plugin](kubectl-plugin.md) documentation. Get EDB PostgreSQL Advanced Server (EPAS) / PostgreSQL container image version: diff --git a/product_docs/docs/tpa/23/rel_notes/index.mdx b/product_docs/docs/tpa/23/rel_notes/index.mdx index 68d9b69aa38..8ac6a64dc9b 100644 --- a/product_docs/docs/tpa/23/rel_notes/index.mdx +++ b/product_docs/docs/tpa/23/rel_notes/index.mdx @@ -10,6 +10,7 @@ navigation: - tpa_23.14_rel_notes - tpa_23.13_rel_notes - tpa_23.12_rel_notes + - tpa_23.1-11_rel_notes --- The Trusted Postgres Architect documentation describes the latest version of Trusted Postgres Architect 23. @@ -24,4 +25,5 @@ The Trusted Postgres Architect documentation describes the latest version of Tru | [23.14](tpa_23.14_rel_notes) | 23 Feb 2023 | | [23.13](tpa_23.13_rel_notes) | 22 Feb 2023 | | [23.12](tpa_23.12_rel_notes) | 21 Feb 2023 | +| [23.1-11](tpa_23.1-11_rel_notes)| - | diff --git a/product_docs/docs/tpa/23/rel_notes/tpa_23.1-11_rel_notes.mdx b/product_docs/docs/tpa/23/rel_notes/tpa_23.1-11_rel_notes.mdx new file mode 100644 index 00000000000..cba6ad8945b --- /dev/null +++ b/product_docs/docs/tpa/23/rel_notes/tpa_23.1-11_rel_notes.mdx @@ -0,0 +1,175 @@ +--- +title: Trusted Postgres Architect 23.1 to 23.11 release notes +navTitle: "Version 23.1 to 23.11" +--- + +## TPA 23.11 +Released: 2023-01-31 +### Notable changes +* TPA-180 Introduce experimental support for PGD-Always-ON architecture (to be released later this year). +PGD-Always-ON architecture will use the upcoming BDR version 5. Initial support has been added for internal purposes and will be improved in upcoming releases. +### Minor changes +* TPA-349 Bump dependency versions +Bump cryptography version from 38.0.4 to 39.0.0 +Bump jq version from 1.3.0 to 1.4.0 +* TPA-345 Change TPAexec references to TPA in documentation. +Update the documentation to use 'TPA' instead of 'TPAexec' when referring to the product. + +## TPA 23.10 +Released: 2023-01-04 +### Minor changes +* TPA-161 Introduce `harp_manager_restart_on_failure` setting (defaults to false) to enable process restart on failure for the harp-manager systemd service +### Bug Fixes +* TPA-281 Delete FMS security groups when deprovisioning an AWS cluster +Fixes a failure to deprovision a cluster's VPC because of unremoved dependencies. +* TPA-305 Add enterprisedb_password to pre-generated secrets for Tower +* TPA-306 Prefer PEM_PYTHON_EXECUTABLE, if present, to /usr/bin/python3 +Fixes a Python module import error during deployment with PEM 9.0. +* TPA-219 Make pem-agent monitor the bdr_database by default on BDR instances + +## TPA 23.9 +Released: 2022-12-12 +### Bugfixes +* TPA-301 Fix auto-detection of cluster_dir for Tower clusters +When setting cluster_dir based on the Tower project directory, we now correctly check for the existence of the directory on the controller, and not on the instances being deployed to. +* TPA-283 Add dependency on psutil, required for Ansible Tower. +* TPA-278 Remove "umask 0" directive from rsyslog configuration, which previously resulted in the creation of world-readable files such as rsyslogd.pid . +* TPA-291 Respect the postgres_package_version setting when installing the Postgres server package to obtain pg_receivewal on Barman instances. + + +## TPA 23.8 +Released: 2022-11-30 +### Notable changes +* TPA-18 Support Ansible Tower 3.8 +This release supports execution of `deploy.yml` (only) on a `bare` cluster (i.e., with existing servers) through Ansible Tower 3.8. +Install TPAexec on the Tower server and run `tpaexec setup` to create a virtual environment which can be used in Tower Templates to run TPAexec playbooks. +Use the `--use-ansible-tower` and `--tower-git-repository` configure options to generate a Tower-compatible cluster configuration. +For details, see [Ansible Tower](https://techsupport.enterprisedb.com/customer_portal/sw/tpa/trunk/238/#). +### Minor changes +* TPA-238 Initialise the cluster directory as a git repository +If git is available on the system where you run TPAexec, `tpaexec configure` will now initialise a git repository within the cluster directory by default. If git is not available, it will continue as before. +To avoid creating the repository (for example, if you want to store the cluster directory within an existing repository), use the `--no-git` option. + +## TPA 23.7 +Released: 2022-11-09 +### Notable changes +* TPA-234 Support the community release of Ansible 2.9 +TPAexec used to require the 2ndQuadrant/ansible fork of Ansible 2.9. In this release, you may instead choose to use the community release of Ansible with the `tpaexec setup --use-community-ansible`. +For now, the default continues to be to use 2ndQuadrant/ansible. This will change in a future release; support for 2ndQuadrant/ansible will be dropped, and Ansible will become the new default. +### Minor changes +* TPA-209 Accept `--postgres-version 15` as a valid `tpaexec configure` option, subsequent to the release of Postgres 15 +* TPA-226 Accept IP addresses in the `--hostnames-from` file +Formerly, the file passed to `tpaexec configure` was expected to contain one hostname per line. Now it may also contain an optional IP address after each hostname. If present, this address will be set as the `ip_address` for the corresponding instance in config.yml. +(If you specify your own `--hostnames-from` file, the hostnames will no longer be randomised by default.) +* TPA-231 Add a new bdr-pre-group-join hook +This hook is executed before each node joins the BDR node group. It may be used to change the default replication set configuration that TPAexec provides. +* TPA-130 Use the postgresql_user module from community.postgresql +The updated module from the community.postgresql collection is needed in order to correctly report the task status when using a SCRAM password (the default module always reports `changed`). +* TPA-250 Upgrade to the latest versions of various Python dependencies +### Bugfixes +* TPA-220 Ensure LD_LIBRARY_PATH in .bashrc does not start with ":" +* TPA-82 Avoid removing BDR-internal ${group_name}_ext replication sets +* TPA-247 Fix "'str object' has no attribute 'node_dsn'" errors on AWS +The code no longer assigns `hostvars[hostname]` to an intermediate variable and expects it to behave like a normal dict later (which works only sometimes). This fixes a regression in 23.6 reported for AWS clusters with PEM enabled, but also fixes other similar errors throughout the codebase. +* TPA-232 Eliminate a race condition in creating a symlink to generated secrets in the inventory that resulted in "Error while linking: [Errno 17] File exists" errors +* TPA-252 Restore code to make all BDR nodes publish to the witness-only replication set +This code block was inadvertently removed in the v23.6 release as part of the refactoring work done for TPA-193. + + +## TPA 23.6 +Released: 2022-09-28 +### Notable changes +* TPA-21 Use boto3 (instead of the unmaintained boto2) AWS client library for AWS deployments. This enables SSO login and other useful features. +* TPA-202 Add harp-config hook. This deploy-time hook executes after HARP is installed and configured and before it is started on all nodes where HARP is installed. +### Bugfixes +* TPA-181 Set default python version to 2 on RHEL 7. Formerly, tpaexec could generate a config.yml with the unsupported combination of RHEL 7 and python 3. +* TPA-210 Fix aws deployments using existing security groups. Such a deployment used to fail at provision-time but will now work as expected. +* TPA-189 Remove group_vars directory on deprovision. This fixes a problem that caused a subsequent provision to fail because of a dangling symlink. +* TPA-175 Correctly configure systemd to leave shared memory segments alone. This only affects source builds. +* TPA-160 Allow version setting for haproxy and PEM. This fixes a bug whereby latest versions of packages would be installed even if a specific version was specified. +* TPA-172 Install EFM on the correct set of hosts. EFM should be installed only on postgres servers that are members of the cluster, not servers which have postgres installed for other reasons, such as PEM servers. +* TPA-113 Serialize PEM agent registration. This avoids a race condition when several hosts try to run pemworker --register-agent at the same time. + + +## TPA 23.5 +Released: 2022-08-23 +### Notable changes +* TPA-81 Publish tpaexec and tpaexec-deps packages for Ubuntu 22.04 Jammy +* TPA-26 Support harp-proxy and harp-manager installation on a single node. It is now possible to have both harp-proxy and harp-manager service running on the same target node in a cluster. + + +## TPA 23.4 +Released: 2022-08-03 +### Bugfixes +* TPA-152 fix an issue with locale detection during first boot of Debian instances in AWS Hosts would fail to complete first boot which would manifest as SSH key negotiation issues and errors with disks not found during deployment. This issue was introduced in 23.3 and is related to TPA-38 + + +## TPA 23.3 +Released: 2022-08-03 +### Notable changes +* TPA-118 Exposed two new options in harp-manager configuration. The first sets HARP `harp_db_request_timeout` similar to dcs request_timeout but for database connections and the second `harp_ssl_password_command` specifies a command used to de-obfuscate sslpassword used to decrypt the sslkey in SSL enabled database connection +### Minor changes +* TPA-117 Add documentation update on the use of wildcards in `package_version` options in tpaexec config.yml. This introduces a warning that unexpected package upgrades can occur during a `deploy` operation. See documentation in `tpaexec-configure.md` for more info +* TPA-38 Add locale files for all versions of Debian, and RHEL 8 and above. Some EDB software, such as Barman, has a requirement to set the user locale to `en_US.UTF-8`. Some users may wish to also change the locale, character set or language to a local region. This change ensures that OS files provided by libc are installed on AWS instances during firstboot using user-data scripts. The default locale is `en_US.UTF-8`. See `platform_aws.md` documentation for more info +* TPA-23 Add log config for syslog for cluster services Barman, HARP, repmgr, PgBouncer and EFM. The designated log server will store log files received in `/var/log/hosts` directories for these services +* TPA-109 Minor refactoring of the code in pgbench role around choosing lock timeout syntax based on a given version of BDR +### Bugfixes +* TPA-147 For clusters that use the source install method some missing packages for Debian and Rocky Linux were observed. Debian receives library headers for krb5 and lz4. On RedHat derived OSs the mandatory packages from the "Development Tools" package group and the libcurl headers have been added +* TPA-146 Small fix to the method of package selection for clusters installing Postgres 9.6 +* TPA-138 Addresses a warning message on clusters that use the "bare" platform that enable the local-repo configure options. As the OS is not managed by TPAexec in the bare platform we need to inform the user to create the local-repo structure. This previously caused an unhandled error halting the configure progress +* TPA-135 When using `--use-local-repo-only` with the "docker" platform and the Rocky Linux image initial removal of existing yum repository configuration on nodes would fail due to the missing commands `find` and `xargs`. This change ensures that if the `findutils` package exists in the source repo it will be installed first +* TPA-111 Remove a redundant additional argument on the command used to register agents with the PEM server when `--enable-pem` option is given. Previously, this would have caused no problems as the first argument, the one now removed, would be overridden by the second +* TPA-108 Restore SELinux file context for postmaster symlink when Postgres is installed from source. Previously, a cluster using a SELinux enabled OS that is installing postgres from source would fail to restart Postgres as the systemd daemon would be unable to read the symlink stored in the Postgres data bin directory. This was discovered in tests using a recently adopted Rocky Linux image in AWS that has SELinux enabled and in enforcing mode by default + + +## TPA 23.2 +Released: 2022-07-13 +### Notable changes +* Add support for Postgres Backup API for use with Barman and PEM. Accessible through the `--enable-pg-backup-api` option. +* SSL certificates can now be created on a per-service basis, for example the server certificate for Postgres Backup API proxy service. Certificates will be placed in `/etc/tpa/<service>/<hostname>.cert` These certificates can also be signed by a CA certificate generated for the cluster. +* Placement of Etcd for the BDR-Always-ON architecture When using 'harp_consensus_protocol: etcd', explicitly add 'etcd' to the role for each of the following instances: + * BDR Primary ('bdr' role) + * BDR Logical Standby ('bdr' + 'readonly' roles) + * only for the Bronze layout: BDR Witness ('bdr' + 'witness' roles) + * only for the Gold layout: Barman ('barman' role) Credit: Gianni Ciolli gianni.ciolli@enterprisedb.com +### Minor changes +* Replace configure argument `--2q` with `--pgextended` to reflect product branding changes. Existing configuration will retain expected behaviour. +* Improve error reporting on Docker platform compatibility checks when using version 18 of docker, which comes with Debian old stable. +* Add some missing commands to CLI help documentation. +* Improved error reporting of configure command. +* Add initial support for building BDR 5 from source. Credit: Florin Irion florin.irion@enterprisedb.com +* Changes to ensure ongoing compatibility for migration from older versions of Postgres with EDB products. +### Bugfixes +* Fixed an issue which meant packages for etcd were missing when using the download-packages command to populate the local-repo. +* Fixed an issue affecting the use of efm failover manager and the selection of its package dependencies + + +## TPA 23.1 +Released: 2022-06-21 + +This release requires you to run `tpaexec setup` after upgrading (and will fail with an error otherwise) + +### Changes to package installation behavior +In earlier versions, running `tpaexec deploy` could potentially upgrade installed packages, unless an exact version was explicitly specified (e.g., by setting postgres_package_version). However, this was never a safe, supported, or recommended way to upgrade. In particular, services may not have been safely and correctly restarted after a package upgrade during deploy. + +With this release onwards, `tpaexec deploy` will never upgrade installed packages. The first deploy will install all required packages (either a specific version, if set, or the latest available), and subsequent runs will see that the package is installed, and do nothing further. This is a predictable and safe new default behavior. + +If you need to update components, use `tpaexec update-postgres`. In this release, the command can update Postgres and Postgres-related packages such as BDR or pglogical, as well as certain other components, such as HARP, pgbouncer, and etcd (if applicable to a particular cluster). Future releases will safely support upgrades of more components. + +### Notable changes +* Run "harpctl apply" only if the HARP bootstrap config is changed +WARNING: This will trigger a single harp service restart on existing clusters when you run `tpaexec deploy`, because config.yml is changed to ensure that lists are consistently ordered, to avoid unintended changes in future deploys +* Add `tpaexec download-packages` command to download all packages required by a cluster into a local-repo directory, so that they can be copied to cluster instances in airgapped/disconnected environments. See air-gapped.md and local-repo.md for details +* Require `--harp-consensus-protocol <etcd|bdr>` configure option for new BDR-Always-ON clusters +TPAexec no longer supplies a default value here because the choice of consensus protocol can negatively affect failover performance, depending on network latency between data centres/locations, so the user is in a better position to select the protocol most suitable for a given cluster. +This affects the configuration of newly-generated clusters, but does not affect existing clusters that use the former default of `etcd` without setting harp_consensus_protocol explicitly +### Minor changes +* Install openjdk-11 instead of openjdk-8 for EFM on distributions where the older version is not available +* Accept `harp_log_level` setting (e.g., under cluster_vars) to override the default harp-manager and harp-proxy log level (info) +* Configure harp-proxy to use a single multi-host BDR DCS endpoint DSN instead of a list of individual endpoint DSNs, to improve resilience +* Omit extra connection attributes (e.g., ssl*) from the local (Unix socket) DSN for the BDR DCS for harp-manager +### Bugfixes +* Ensure that harp-manager and harp-proxy are restarted if their config changes +* Fix harp-proxy errors by granting additional (new) permissions required by the readonly harp_dcs_user +* Disable BDR4 transaction streaming when CAMO is enabled +If bdr.enable_camo is set, we must disable bdr.default_streaming_mode, which is not compatible with CAMO-protected transactions in BDR4. This will cause a server restart on CAMO-enabled BDR4 clusters (which could not work with streaming enabled anyway). \ No newline at end of file diff --git a/src/pages/index.js b/src/pages/index.js index af55ae6df99..1abfb30bc02 100644 --- a/src/pages/index.js +++ b/src/pages/index.js @@ -57,22 +57,22 @@ const Page = () => (

- Multi-region extreme high availability in preview in - BigAnimal + BigAnimal's no-commitment free trial

- You can now create multi-writer clusters that span BigAnimal - regions to maximize availability, performance, and resiliency - to zonal or regional failures. + You can now get $300 of credits to try out fully managed + Postgres with BigAnimal's cloud account and no commitment. + Then you can move your concept to production with just the + swipe of a credit card.

Find out more → @@ -93,20 +93,20 @@ const Page = () => (

- EDB Postgres Distributed quick start topics + EDB Postgres Distributed's ready reference

- Use these topics to quickly evaluate and deploy EDB Postgres - Distributed. They'll get you up and running with a fully - configured EDB Postgres Distributed cluster using the same - tools that you'll use to deploy to production. + Use the new reference section in EDB Postgres Distributed to + quickly look up views, catalogs, functions, and variables. It's + a new view of the documentation designed to centralize essential + information and speed up your development.

- + Find out more →