From d11668c2864c328a8b17d53cf60733c7223c9b72 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Tue, 22 Mar 2022 07:16:38 -0400 Subject: [PATCH 01/12] EFM: removed v3 as it is EOL --- .../03_installing_efm/01_efm3_rhel_8_x86.mdx | 50 - .../02_efm3_other_linux8_x86.mdx | 50 - .../3/03_installing_efm/03_efm3_rhel7_x86.mdx | 47 - .../03_installing_efm/04_efm3_centos7_x86.mdx | 41 - .../03_installing_efm/06_efm3_sles12_x86.mdx | 51 - .../07_efm3_ubuntu18_deb9_x86.mdx | 34 - .../03_installing_efm/08_efm3_rhel7_ppcle.mdx | 56 - .../03_installing_efm/11_install_details.mdx | 20 - .../3/03_installing_efm/12_initial_config.mdx | 16 - .../docs/efm/3/03_installing_efm/index.mdx | 33 - .../3/efm_pgpool_ha_guide/01_introduction.mdx | 62 - .../3/efm_pgpool_ha_guide/02_architecture.mdx | 34 - .../03_components_ha_pgpool.mdx | 163 --- .../3/efm_pgpool_ha_guide/04_appendix_a.mdx | 67 - .../3/efm_pgpool_ha_guide/05_appendix_b.mdx | 14 - .../3/efm_pgpool_ha_guide/images/EDB_logo.png | 3 - .../images/EFM_PgPool_Azure.png | 3 - .../images/backend_pools.png | 3 - .../images/edb_ha_architecture.png | 3 - .../images/edb_ha_architecture1.png | 3 - .../edb_ha_architecture_separate_VM.png | 3 - .../3/efm_pgpool_ha_guide/images/edb_logo.svg | 56 - .../images/failover_manager_overview.png | 3 - .../images/health_probes.png | 3 - .../images/load_balancing_rules.png | 3 - .../images/placeholder.png | 3 - .../images/rule_port_9898.png | 3 - .../images/rule_port_9999.png | 3 - .../docs/efm/3/efm_pgpool_ha_guide/index.mdx | 32 - .../efm/3/efm_quick_start/images/edb_logo.png | 3 - .../efm/3/efm_quick_start/images/edb_logo.svg | 56 - .../images/failover_manager_overview.png | 3 - .../3/efm_quick_start/images/placeholder.png | 3 - .../docs/efm/3/efm_quick_start/index.mdx | 144 -- .../efm/3/efm_rel_notes/01_310_rel_notes.mdx | 14 - .../efm/3/efm_rel_notes/02_39_rel_notes.mdx | 20 - .../efm/3/efm_rel_notes/03_38_rel_notes.mdx | 12 - .../docs/efm/3/efm_rel_notes/index.mdx | 13 - .../01_prerequisites.mdx | 104 -- .../02_failover_manager_overview/index.mdx | 43 - .../efm/3/efm_user/03_installing_efm-old.mdx | 267 ---- .../01_encrypting_database_password.mdx | 85 -- .../01_cluster_properties/index.mdx | 1154 ----------------- .../04_configuring_efm/02_cluster_members.mdx | 41 - .../03_extending_efm_permissions.mdx | 119 -- .../04_using_vip_addresses.mdx | 154 --- .../3/efm_user/04_configuring_efm/index.mdx | 26 - .../docs/efm/3/efm_user/05_using_efm.mdx | 326 ----- .../3/efm_user/06_monitoring_efm_cluster.mdx | 151 --- .../efm/3/efm_user/07_using_efm_utility.mdx | 219 ---- .../3/efm_user/08_controlling_efm_service.mdx | 61 - .../efm/3/efm_user/09_controlling_logging.mdx | 98 -- .../docs/efm/3/efm_user/10_notifications.mdx | 165 --- .../efm/3/efm_user/11_supported_scenarios.mdx | 137 -- .../12_upgrading_existing_cluster.mdx | 108 -- .../efm/3/efm_user/13_troubleshooting.mdx | 53 - .../14_configuring_streaming_replication.mdx | 50 - .../15_configuring_ssl_authentication.mdx | 73 -- .../efm_user/images/cascading_replication.png | 3 - .../images/cascading_replication1.png | 3 - .../docs/efm/3/efm_user/images/edb_logo.png | 3 - .../images/failover_manager_overview.png | 3 - .../efm/3/efm_user/images/placeholder.png | 3 - .../str_replication_dashboard_master.png | 3 - .../str_replication_dashboard_standby.png | 3 - ...supported_scenarios_master_agent_exits.png | 3 - .../supported_scenarios_master_db_down.png | 3 - ...ported_scenarios_node_becomes_isolated.png | 3 - ...upported_scenarios_standby_agent_exits.png | 3 - .../supported_scenarios_standby_db_down.png | 3 - ...upported_scenarios_witness_agent_exits.png | 3 - product_docs/docs/efm/3/efm_user/index.mdx | 33 - product_docs/docs/efm/3/index.mdx | 30 - static/_redirects | 1 + 74 files changed, 1 insertion(+), 4666 deletions(-) delete mode 100644 product_docs/docs/efm/3/03_installing_efm/01_efm3_rhel_8_x86.mdx delete mode 100644 product_docs/docs/efm/3/03_installing_efm/02_efm3_other_linux8_x86.mdx delete mode 100644 product_docs/docs/efm/3/03_installing_efm/03_efm3_rhel7_x86.mdx delete mode 100644 product_docs/docs/efm/3/03_installing_efm/04_efm3_centos7_x86.mdx delete mode 100644 product_docs/docs/efm/3/03_installing_efm/06_efm3_sles12_x86.mdx delete mode 100644 product_docs/docs/efm/3/03_installing_efm/07_efm3_ubuntu18_deb9_x86.mdx delete mode 100644 product_docs/docs/efm/3/03_installing_efm/08_efm3_rhel7_ppcle.mdx delete mode 100644 product_docs/docs/efm/3/03_installing_efm/11_install_details.mdx delete mode 100644 product_docs/docs/efm/3/03_installing_efm/12_initial_config.mdx delete mode 100644 product_docs/docs/efm/3/03_installing_efm/index.mdx delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/01_introduction.mdx delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/02_architecture.mdx delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/04_appendix_a.mdx delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/05_appendix_b.mdx delete mode 100755 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/EDB_logo.png delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/backend_pools.png delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture.png delete mode 100755 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture1.png delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png delete mode 100755 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_logo.svg delete mode 100755 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/failover_manager_overview.png delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/health_probes.png delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/load_balancing_rules.png delete mode 100755 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/placeholder.png delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/rule_port_9898.png delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/images/rule_port_9999.png delete mode 100644 product_docs/docs/efm/3/efm_pgpool_ha_guide/index.mdx delete mode 100755 product_docs/docs/efm/3/efm_quick_start/images/edb_logo.png delete mode 100755 product_docs/docs/efm/3/efm_quick_start/images/edb_logo.svg delete mode 100755 product_docs/docs/efm/3/efm_quick_start/images/failover_manager_overview.png delete mode 100755 product_docs/docs/efm/3/efm_quick_start/images/placeholder.png delete mode 100644 product_docs/docs/efm/3/efm_quick_start/index.mdx delete mode 100644 product_docs/docs/efm/3/efm_rel_notes/01_310_rel_notes.mdx delete mode 100644 product_docs/docs/efm/3/efm_rel_notes/02_39_rel_notes.mdx delete mode 100644 product_docs/docs/efm/3/efm_rel_notes/03_38_rel_notes.mdx delete mode 100644 product_docs/docs/efm/3/efm_rel_notes/index.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/02_failover_manager_overview/01_prerequisites.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/02_failover_manager_overview/index.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/03_installing_efm-old.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/04_configuring_efm/01_cluster_properties/index.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/04_configuring_efm/02_cluster_members.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/04_configuring_efm/03_extending_efm_permissions.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/04_configuring_efm/04_using_vip_addresses.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/04_configuring_efm/index.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/05_using_efm.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/06_monitoring_efm_cluster.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/07_using_efm_utility.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/08_controlling_efm_service.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/09_controlling_logging.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/10_notifications.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/11_supported_scenarios.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/12_upgrading_existing_cluster.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/13_troubleshooting.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/14_configuring_streaming_replication.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/15_configuring_ssl_authentication.mdx delete mode 100644 product_docs/docs/efm/3/efm_user/images/cascading_replication.png delete mode 100644 product_docs/docs/efm/3/efm_user/images/cascading_replication1.png delete mode 100755 product_docs/docs/efm/3/efm_user/images/edb_logo.png delete mode 100644 product_docs/docs/efm/3/efm_user/images/failover_manager_overview.png delete mode 100755 product_docs/docs/efm/3/efm_user/images/placeholder.png delete mode 100644 product_docs/docs/efm/3/efm_user/images/str_replication_dashboard_master.png delete mode 100644 product_docs/docs/efm/3/efm_user/images/str_replication_dashboard_standby.png delete mode 100644 product_docs/docs/efm/3/efm_user/images/supported_scenarios_master_agent_exits.png delete mode 100644 product_docs/docs/efm/3/efm_user/images/supported_scenarios_master_db_down.png delete mode 100644 product_docs/docs/efm/3/efm_user/images/supported_scenarios_node_becomes_isolated.png delete mode 100644 product_docs/docs/efm/3/efm_user/images/supported_scenarios_standby_agent_exits.png delete mode 100644 product_docs/docs/efm/3/efm_user/images/supported_scenarios_standby_db_down.png delete mode 100644 product_docs/docs/efm/3/efm_user/images/supported_scenarios_witness_agent_exits.png delete mode 100644 product_docs/docs/efm/3/efm_user/index.mdx delete mode 100644 product_docs/docs/efm/3/index.mdx diff --git a/product_docs/docs/efm/3/03_installing_efm/01_efm3_rhel_8_x86.mdx b/product_docs/docs/efm/3/03_installing_efm/01_efm3_rhel_8_x86.mdx deleted file mode 100644 index 34f32950387..00000000000 --- a/product_docs/docs/efm/3/03_installing_efm/01_efm3_rhel_8_x86.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: "RHEL 8/OL 8 on x86_64" ---- - -To request credentials that allow you to access an EnterpriseDB repository, see the [EDB Repository Access instructions](https://info.enterprisedb.com/rs/069-ALB-339/images/Repository%20Access%2004-09-2019.pdf). - -When you install an RPM package that is signed by a source that isn't recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press **Return** to continue. - -During the installation, yum might encounter a dependency that it can't resolve. If it does, it provides a list of the required dependencies to manually resolve. - -Failover Manager must be installed by root. During the installation process, the installer also creates a user named efm that has privileges to invoke scripts that control the Failover Manager service for clusters owned by enterprisedb or postgres. - -After receiving your credentials, you must create the EnterpriseDB repository configuration file on each node of the cluster and then modify the file to enable access. - -## Installing - -1. To create the repository configuration file, assume superuser privileges and invoke the following command: - ```text - dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: - - ```text - sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -3. Install the EPEL repository: - - ```text - dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - ``` - -4. Enable the additional repositories to resolve dependencies: - - ```text - ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" - ``` - -5. Disable the built-in PostgreSQL module: - - ```text - dnf -qy module disable postgresql - ``` -6. Install the selected package: - ```text - dnf -y install edb-efm310 - ``` - - diff --git a/product_docs/docs/efm/3/03_installing_efm/02_efm3_other_linux8_x86.mdx b/product_docs/docs/efm/3/03_installing_efm/02_efm3_other_linux8_x86.mdx deleted file mode 100644 index 87f46edf882..00000000000 --- a/product_docs/docs/efm/3/03_installing_efm/02_efm3_other_linux8_x86.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: "Rocky Linux 8/AlmaLinux 8 on x86_64" ---- - -To request credentials that allow you to access an EnterpriseDB repository, see the [EDB Repository Access instructions](https://info.enterprisedb.com/rs/069-ALB-339/images/Repository%20Access%2004-09-2019.pdf). - -When you install an RPM package that is signed by a source that isn't recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press **Return** to continue. - -During the installation, yum might encounter a dependency that it can't resolve. If it does, it provides a list of the required dependencies to manually resolve. - -Failover Manager must be installed by root. During the installation process, the installer also creates a user named efm that has privileges to invoke scripts that control the Failover Manager service for clusters owned by enterprisedb or postgres. - -After receiving your credentials, you must create the EnterpriseDB repository configuration file on each node of the cluster and then modify the file to enable access. - -## Installing - -1. To create the repository configuration file, assume superuser privileges and invoke the following command: - ```text - dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: - - ```text - sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -3. Install the EPEL repository: - - ```text - dnf -y install epel-release - ``` - -4. Enable the additional repositories to resolve dependencies: - - ```text - dnf config-manager --set-enabled PowerTools - ``` - -5. Disable the built-in PostgreSQL module: - - ```text - dnf -qy module disable postgresql - ``` -6. Install the selected package: - ```text - dnf -y install edb-efm310 - ``` - - diff --git a/product_docs/docs/efm/3/03_installing_efm/03_efm3_rhel7_x86.mdx b/product_docs/docs/efm/3/03_installing_efm/03_efm3_rhel7_x86.mdx deleted file mode 100644 index 7afb1afd161..00000000000 --- a/product_docs/docs/efm/3/03_installing_efm/03_efm3_rhel7_x86.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "RHEL 7/OL 7 on x86_64" ---- - -To request credentials that allow you to access an EnterpriseDB repository, see the [EDB Repository Access instructions](https://info.enterprisedb.com/rs/069-ALB-339/images/Repository%20Access%2004-09-2019.pdf). - -When you install an RPM package that is signed by a source that isn't recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press **Return** to continue. - -During the installation, yum might encounter a dependency that it can't resolve. If it does, it provides a list of the required dependencies to manually resolve. - -Failover Manager must be installed by root. During the installation process, the installer also creates a user named efm that has privileges to invoke scripts that control the Failover Manager service for clusters owned by enterprisedb or postgres. - -After receiving your credentials, you must create the EnterpriseDB repository configuration file on each node of the cluster and then modify the file to enable access. - - -## Installing - -1. To create the repository configuration file, assume superuser privileges and invoke the following command: - - ```text - yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -2. Replace ‘USERNAME:PASSWORD’ with your username and password for the EDB repositories: - - ```text - sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -3. Install the EPEL repository: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` - -4. Enable the additional repositories to resolve dependencies: - - ```text - subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" - ``` - -5. Install the selected package: - - ```text - yum -y install edb-efm310 - ``` - diff --git a/product_docs/docs/efm/3/03_installing_efm/04_efm3_centos7_x86.mdx b/product_docs/docs/efm/3/03_installing_efm/04_efm3_centos7_x86.mdx deleted file mode 100644 index 97c102bfd33..00000000000 --- a/product_docs/docs/efm/3/03_installing_efm/04_efm3_centos7_x86.mdx +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: "CentOS 7 on x86_64" ---- - -To request credentials that allow you to access an EnterpriseDB repository, see the [EDB Repository Access instructions](https://info.enterprisedb.com/rs/069-ALB-339/images/Repository%20Access%2004-09-2019.pdf). - -When you install an RPM package that is signed by a source that isn't recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press **Return** to continue. - -During the installation, yum might encounter a dependency that it can't resolve. If it does, it provides a list of the required dependencies to manually resolve. - -Failover Manager must be installed by root. During the installation process, the installer also creates a user named efm that has privileges to invoke scripts that control the Failover Manager service for clusters owned by enterprisedb or postgres. - -After receiving your credentials, you must create the EnterpriseDB repository configuration file on each node of the cluster and then modify the file to enable access. - - -## Installing - -1. To create the repository configuration file, assume superuser privileges and invoke the following command: - - ```text - yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -2. Replace ‘USERNAME:PASSWORD’ with your username and password for the EDB repositories: - - ```text - sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -3. Install the EPEL repository: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` - -5. Install the selected package: - - ```text - yum -y install edb-efm310 - ``` - diff --git a/product_docs/docs/efm/3/03_installing_efm/06_efm3_sles12_x86.mdx b/product_docs/docs/efm/3/03_installing_efm/06_efm3_sles12_x86.mdx deleted file mode 100644 index 2b1cdcd69c0..00000000000 --- a/product_docs/docs/efm/3/03_installing_efm/06_efm3_sles12_x86.mdx +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "SLES 12 on x86_64" ---- - -To install Failover Manager, you must have credentials that allow access to the EnterpriseDB repository. To request credentials for the repository, see the instructions to [access EDB software repositories](https://www.enterprisedb.com/repository-access-request). - -You can use the `zypper` package manager to install a Failover Manager agent on an SLES 12 host. `zypper` attempts to satisfy package dependencies as it installs a package but requires access to specific repositories that are not hosted at EnterpriseDB. - -## Installing - -1. Assume superuser privileges and stop any firewalls before installing Failover Manager. Then, use the following commands to add EnterpriseDB repositories to your system: - - ```text - zypper addrepo https://zypp.enterprisedb.com/suse/edb-sles.repo - ``` - -2. The commands create the repository configuration files in the `/etc/zypp/repos.d` directory. Use the following command to refresh the metadata on your SLES host to include the EnterpriseDB repository: - - ```text - zypper refresh - ``` - - When prompted, provide credentials for the repository, specify to always trust the provided key, and update the metadata to include the EnterpriseDB repository. - -3. Add SUSEConnect and the SUSE Package Hub extension to the SLES host and register the host with SUSE, allowing access to SUSE repositories: - - ```text - zypper install SUSEConnect - SUSEConnect -r -e - SUSEConnect -p PackageHub/12.4/x86_64 - SUSEConnect -p sle-sdk/12.4/x86_64 - ``` - -4. Install SUSEConnect to register the host with SUSE, allowing access to SUSE repositories: - - ```text - zypper addrepo https://download.opensuse.org/repositories/Apache:/Modules/SLE_12_SP4/Apache:Modules.repo - ``` - -5. Install OpenJDK (version 1.8) for Java-based components: - - ```text - zypper -n install java-1_8_0-openjdk - ``` - -6. Use the `zypper` utility to install a Failover Manager agent: - - ```text - zypper -n install edb-efm310 - ``` - diff --git a/product_docs/docs/efm/3/03_installing_efm/07_efm3_ubuntu18_deb9_x86.mdx b/product_docs/docs/efm/3/03_installing_efm/07_efm3_ubuntu18_deb9_x86.mdx deleted file mode 100644 index 1b386787bbd..00000000000 --- a/product_docs/docs/efm/3/03_installing_efm/07_efm3_ubuntu18_deb9_x86.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Ubuntu 18.04/Debian 9 on x86_64" ---- - -To install Failover Manager, you must have credentials that allow access to the EnterpriseDB repository. To request credentials for the repository, see the [EnterpriseDB website](https://www.enterprisedb.com/user/login). - -Use the EnterpriseDB APT repository to install Failover Manager. - -### Installing - -1. Assume superuser privileges: - ```text - sudo su – - ``` -2. Configure the EnterpriseDB repository by substituting your EnterpriseDB credentials for the username and password placeholders in the following commands: - ```text - sh -c 'echo "deb https://@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` -3. Add support to your system for secure APT repositories: - ```text - apt-get install apt-transport-https - ``` -4. Add the EDB signing key: - ```text - wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - - ``` -5. Update the repository meta data: - ```text - apt-get update - ``` -6. Install Failover Manager: - ```text - apt-get -y install edb-efm310 - ``` diff --git a/product_docs/docs/efm/3/03_installing_efm/08_efm3_rhel7_ppcle.mdx b/product_docs/docs/efm/3/03_installing_efm/08_efm3_rhel7_ppcle.mdx deleted file mode 100644 index 2f3947efa72..00000000000 --- a/product_docs/docs/efm/3/03_installing_efm/08_efm3_rhel7_ppcle.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "RHEL 7 on IBM Power (ppc64le)" ---- - -To request credentials that allow you to access an EnterpriseDB repository, see the [EDB Repository Access instructions](https://info.enterprisedb.com/rs/069-ALB-339/images/Repository%20Access%2004-09-2019.pdf). - - -## Installing - -1. Create a configuration file and install Advance Toolchain: - - ```text - rpm --import https://public.dhe.ibm.com/software/server/POWER/Linux/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b - - cat > /etc/yum.repos.d/advance-toolchain.repo <:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -4. Install the EPEL repository: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` - -5. On RHEL 7 ppc64le, enable the additional repositories to resolve EPEL dependencies: - - ```text - subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" - ``` - -6. Install the selected package: - - ```text - yum -y install edb-efm310 - ``` diff --git a/product_docs/docs/efm/3/03_installing_efm/11_install_details.mdx b/product_docs/docs/efm/3/03_installing_efm/11_install_details.mdx deleted file mode 100644 index ef1045daae7..00000000000 --- a/product_docs/docs/efm/3/03_installing_efm/11_install_details.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "Installation Details" - ---- - - - -Components are installed in the following locations, where `3.x` indicates a minor release: - -| Component | Location | -| --------------------------------- | ----------------------------- | -| Executables | `/usr/edb/efm-3.x/bin` | -| Libraries | `/usr/edb/efm-3.x/lib` | -| Cluster configuration files | `/etc/edb/efm-3.x` | -| Logs | `/var/log/efm-3.x>` | -| Lock files | `/var/lock/efm-3.x` | -| Log rotation file | `/etc/logrotate.d/efm-3.x` | -| sudo configuration file | `/etc/sudoers.d/efm-3.x` | -| Binary to access VIP without sudo | `/usr/edb/efm-3.x/bin/secure` | - diff --git a/product_docs/docs/efm/3/03_installing_efm/12_initial_config.mdx b/product_docs/docs/efm/3/03_installing_efm/12_initial_config.mdx deleted file mode 100644 index 00fb362a352..00000000000 --- a/product_docs/docs/efm/3/03_installing_efm/12_initial_config.mdx +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "Initial Configuration" ---- - - - -If you are using Failover Manager to monitor a cluster owned by a user other than enterprisedb or postgres, see [Extending Failover Manager permissions](../efm_user/04_configuring_efm/03_extending_efm_permissions/#extending_efm_permissions). - -After installing on each node of the cluster: - -1. Modify the [cluster properties file](../efm_user/04_configuring_efm/01_cluster_properties/#cluster_properties) on each node. -2. Modify the [cluster members file](../efm_user/04_configuring_efm/02_cluster_members/#cluster_members) on each node. -3. If applicable, configure and test virtual IP address settings and any scripts that are identified in the cluster properties file. -4. Start the agent on each node of the cluster. For more information, see [Controlling the Failover Manager service](../efm_user/08_controlling_efm_service/#controlling-the-failover-manager-service). - - diff --git a/product_docs/docs/efm/3/03_installing_efm/index.mdx b/product_docs/docs/efm/3/03_installing_efm/index.mdx deleted file mode 100644 index 32759bea46a..00000000000 --- a/product_docs/docs/efm/3/03_installing_efm/index.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "Installing Failover Manager" -redirects: - - ../efm_user/03_installing_efm -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.0/installing_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/4.1/installing_efm.html" ---- - - - - - -For information about the platforms and versions supported by Failover Manager, see [Platform Compatibility](https://www.enterprisedb.com/platform-compatibility#efm). - -For platform-specific install instructions, including accessing the repo, see: - -- Linux x86-64 (amd64): - - - [RHEL 8/OL 8](01_efm3_rhel_8_x86) - - - [Rocky Linux 8/AlmaLinux 8](02_efm3_other_linux8_x86) - - [RHEL 7/OL 7](03_efm3_rhel7_x86) - - [CentOS 7](04_efm3_centos7_x86) - - [SLES 12](06_efm3_sles12_x86) - - [Ubuntu 18.04/Debian 9](07_efm3_ubuntu18_deb9_x86) - -- Linux on IBM Power (ppc64le): - - - [RHEL 7](08_efm3_rhel7_ppcle) - -After you complete the installation, see [Initial Configuration](12_initial_config). \ No newline at end of file diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/01_introduction.mdx b/product_docs/docs/efm/3/efm_pgpool_ha_guide/01_introduction.mdx deleted file mode 100644 index 1bc423b5d81..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/01_introduction.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "Architecture Overview" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.8/introduction.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.9/introduction.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/introduction.html" ---- - -This guide explains how to configure Failover Manager and Pgpool best to leverage the benefits that they provide for Advanced Server. Using the reference architecture described in the Architecture section, you can learn how to achieve high availability by implementing an automatic failover mechanism (with Failover Manager) while scaling the system for larger workloads and an increased number of concurrent clients with read-intensive or mixed workloads to achieve horizontal scaling/read-scalability (with Pgpool). - -The architecture described in this document has been developed and tested for EFM 3.10, EDB Pgpool, and Advanced Server 12. - -Documentation for Advanced Server and Failover Manager are available from EnterpriseDB at: - - - -Documentation for pgPool-II can be found at: - - - -## Failover Manager Overview - -Failover Manager is a high-availability module that monitors the health of a Postgres streaming replication cluster and verifies failures quickly. When a database failure occurs, Failover Manager can automatically promote a streaming replication Standby node into a writable Primary node to ensure continued performance and protect against data loss with minimal service interruption. - -**Basic EFM Architecture Terminology** - -A Failover Manager cluster is comprised of EFM processes that reside on the following hosts on a network: - -- A **Primary** node is the Primary database server that is servicing database clients. -- One or more **Standby nodes** are streaming replication servers associated with the Primary node. -- The **Witness node** confirms assertions of either the Primary or a Standby in a failover scenario. If, during a failure situation, the Primary finds itself in a partition with half or more of the nodes, it will stay Primary. As such, EFM supports running in a cluster with an even number of agents. - -## Pgpool-II Overview - -Pgpool-II (Pgpool) is an open-source application that provides connection pooling and load balancing for horizontal scalability of SELECT queries on multiple Standbys in EPAS and community Postgres clusters. For every backend, a backend_weight parameter can set the ratio of read traffic to be directed to the backend node. To prevent read traffic on the Primary node, the backend_weight parameter can be set to 0. In such cases, data modification language (DML) queries (i.e., INSERT, UPDATE, and DELETE) will still be sent to the Primary node, while read queries are load-balanced to the Standbys, providing scalability with mixed and read-intensive workloads. - -EnterpriseDB supports the following Pgpool functionality: - -- Load balancing -- Connection pooling -- High availability -- Connection limits - -### PCP Overview - -Pgpool provides an interface called PCP for administrators that performs management operations such as retrieving the status of Pgpool or terminating Pgpool processes remotely. PCP commands are UNIX commands that manipulate Pgpool via the network. - -### Pgpool Watchdog - -`watchdog` is an optional sub process of Pgpool that provides a high availability feature. Features added by `watchdog` include: - -- Health checking of the pgpool service -- Mutual monitoring of other watchdog processes -- Changing leader/Standby state if certain faults are detected -- Automatic virtual IP address assigning synchronous to server switching -- Automatic registration of a server as a Standby during recovery - -More information about the `Pgpool watchdog` component can be found at: - - diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/02_architecture.mdx b/product_docs/docs/efm/3/efm_pgpool_ha_guide/02_architecture.mdx deleted file mode 100644 index 95ab1a8d8db..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/02_architecture.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Architecture" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.8/architecture.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.9/architecture.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/architecture.html" ---- - -![A typical EFM and Pgpool configuration](images/edb_ha_architecture.png) - -The sample architecture diagram shows four nodes as described in the table below: - -| **Systems** | **Components** | -| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Primary Pgpool/EFM witness node | The Primary Pgpool node will only run Pgpool, and EFM witness, as such leaving as much resources available to Pgpool as possible. During normal runmode (no Pgpool Failovers), the Primary Pgpool node has attached the Virtual IP address, and all applications connect through the Virtual IP address to Pgpool. Pgpool will forward all write traffic to the Primary Database node, and will balance all read across all Standby nodes.On the Primary Pgpool node, the EFM witness process ensures that a minimum quota of three EFM agents remains available even if one of the database nodes fails. Some examples are when a node is already unavailable due to maintenance, or failure, and another failure occurs. | -| Primary Database node | The Primary Database node will only run Postgres (Primary)and EFM, leaving all resources to Postgres. Read/Write traffic (i.e., INSERT, UPDATE, DELETE) is forwarded to this node by the Primary Pgpool node. | -| Standby nodes | The Standby nodes are running Postgres (Standby), EFM and an inactive Pgpool process. In case of a Primary database failure, EFM will promote Postgres on one of these Standby nodes to handle read-write traffic. In case of a Primary Pgpool failure, the Pgpool watchdog will activate Pgpool on one of the Standby nodes which will attach the VIP, and handle the forwarding of the application connections to the Database nodes. Note that in a double failure situation (both the Primary Pgpool node and the Primary Database node are in failure), both of these Primary processes might end up on the same node. | - -This architecture: - -- Achieves high availability by providing two Standbys that can be promoted in case of a Primary Postgres node failure. -- Achieves high availability by providing at least three Pgpool processes in a watchdog configuration. -- Increases performance with mixed and read-intensive workloads by introducing increased read scalability with more than one Standby for load balancing. -- Reduces load on the Primary database node by redirecting read-only traffic with the Primary pgpool node. -- Prevents resource contention between Pgpool and Postgres on the Primary Database node. By not running Pgpool on the Primary database node, the Primary Postgres process can utilize as much resources as possible. -- Prevents resource contention between pgpool and Postgres on the Primary Pgpool node. By not running Standby databases on the Primary Pgpool node, Pgpool can utilize as many resources as possible. -- Optionally, synchronous replication can be set up to achieve near-zero data loss in a failure event. - -!!! Note - The architecture also allows us to completely separate 3 virtual machines running Postgres from 3 virtual machines running Pgpool. This kind of setup requires 2 extra virtual machines, but it is a better choice if you want to prevent resource contention between Pgpool and Postgres in Failover scenarios. In this setup, the architecture can run without an extra 7th node running the EFM Witness Process. To increase failure resolution efm witness agents could be deployed on the Pgpool servers. - -![Deployment of EFM and Pgpool on separate virtual machines](images/edb_ha_architecture_separate_VM.png) diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx b/product_docs/docs/efm/3/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx deleted file mode 100644 index e94dc2cd4cc..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: "Implementing High Availability with Pgpool" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.8/components_ha_pgpool.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.9/components_ha_pgpool.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/components_ha_pgpool.html" ---- - -Failover Manager monitors the health of Postgres nodes; in the event of a database failure, Failover Manager performs an automatic failover to a Standby node. Note that Pgpool does not monitor the health of backend nodes and will not perform failover to any Standby nodes. - -## Configuring Failover Manager - -Failover Manager provides functionality that will remove failed database nodes from Pgpool load balancing; it can also re-attach nodes to Pgpool when returned to the Failover Manager cluster. To configure EFM for high availability using Pgpool, you must set the following properties in the cluster properties file: - -pgpool.enable =<true/false> - -'pcp.user' = <User that would be invoking PCP commands> - -'pcp.host' = <Virtual IP that would be used by pgpool. Same as pgpool parameter 'delegate_IP’> - -'pcp.port' = <The port on which pgpool listens for pcp commands> - -'pcp.pass.file' = <Absolute path of PCPPASSFILE> - -'pgpool.bin' = <Absolute path of pgpool bin directory> - -## Configuring Pgpool - -The section lists the configuration of some important parameters in the `pgpool.conf` file to integrate the Pgpool-II with EFM. - -**Backend node setting** - -There are three PostgreSQL backend nodes, one Primary and two Standby nodes. Configure using `backend_*` configuration parameters in `pgpool.conf`, and use the equal backend weights for all nodes. This will make the read queries to be distributed equally among all nodes. - -```text -backend_hostname0 = ‘server1_IP' -backend_port0 = 5444 -backend_weight0 = 1 -backend_flag0 = 'DISALLOW_TO_FAILOVER' - -backend_hostname1 = ‘server2_IP' -backend_port1 = 5444 -backend_weight1 = 1 -backend_flag1 = 'DISALLOW_TO_FAILOVER' - -backend_hostname2 = ‘server3_IP' -backend_port2 = 5444 -backend_weight2 = 1 -backend_flag2 = 'DISALLOW_TO_FAILOVER' -``` - -**Enable Load-balancing and streaming replication mode** - -Set the following configuration parameter in the `pgpool.conf` file to enable load balancing and streaming replication mode - -```text -master_slave_mode = on -master_slave_sub_mode = 'stream' -load_balance_mode = on -``` - -**Disable health-checking and failover** - -Health-checking and failover must be handled by EFM and hence, these must be disabled on Pgpool-II side. To disable the health-check and failover on pgpool-II side, assign the following values: - -```text -health_check_period = 0 -fail_over_on_backend_error = off -failover_if_affected_tuples_mismatch = off -failover_command = ‘’ -failback_command = ‘’ -``` - -Ensure the following while setting up the values in the `pgpool.conf` file: - -- Keep the value of wd_priority in pgpool.conf different on each node. The node with the highest value gets the highest priority. -- The properties backend_hostname0 , backend_hostname1, backend_hostname2 and so on are shared properties (in EFM terms) and should hold the same value for all the nodes in pgpool.conf file. -- Update the correct interface value in *if\_* \* and arping cmd props in the pgpool.conf file. -- Add the properties heartbeat_destination0, heartbeat_destination1, heartbeat_destination2 etc. as per the number of nodes in pgpool.conf file on every node. Here heartbeat_destination0 should be the ip/hostname of the local node. - -**Setting up PCP** - -Script uses the PCP interface, So we need to set up the PCP and .PCPPASS file to allow PCP connections without password prompt. - -setup PCP: - -setup PCPPASS: - -Note that the load-balancing is turned on to ensure read scalability by distributing read traffic across the standby nodes - -The health checking and error-triggered backend failover have been turned off, as Failover Manager will be responsible for performing health checks and triggering failover. It is not advisable for Pgpool to perform health checking in this case, so as not to create a conflict with Failover Manager, or prematurely perform failover. - -Finally, `search_primary_node_timeout` has been set to a low value to ensure prompt recovery of Pgpool services upon an Failover Manager-triggered failover. - -## Virtual IP Addresses - -Both Pgpool-II and Failover Manager provide functionality to employ a virtual IP for seamless failover. While both provide this capability, the pgpool-II leader is the process that receives the Application connections through the Virtual IP. As in this design, such Virtual IP management is performed by the Pgpool-II watchdog system. EFM VIP has no beneficial effect in this design and it must be disabled. - -Note that in a failure situation of the active instance of Pgpool (The Primary Pgpool Server in our sample architecture), the next available Standby Pgpool instance (according to watchdog priority) will be activated and takes charge as the leader Pgpool instance. - -## Configuring Pgpool-II Watchdog - -Watchdog provides the high availability of Pgpool-II nodes. This section lists the configuration required for watchdog on each Pgpool-II node. - -**Common watchdog configurations on all Pgpool nodes** - -The following configuration parameters enable and configure the watchdog. The interval and retry values can be adjusted depending upon the requirements and testing results. - -```text -use_watchdog = on # enable watchdog -wd_port = 9000 # watchdog port, can be changed -delegate_IP = ‘Virtual IP address’ -wd_lifecheck_method = 'heartbeat' -wd_interval = 10 # we can lower this value for quick detection -wd_life_point = 3 -# virtual IP control -ifconfig_path = '/sbin' # ifconfig command path -if_up_cmd = 'ifconfig eth0:0 inet $_IP_$ netmask 255.255.255.0' - # startup delegate IP command -if_down_cmd = 'ifconfig eth0:0 down' # shutdown delegate IP command -arping_path = '/usr/sbin' # arping command path -``` - -!!! Note - Replace the value of eth0 with the network interface on your system. See [Chapter 5](05_appendix_b/#configuration-for-number-of-connections-and-pooling) for tuning the number of connections, and pooling configuration. - -**Watchdog configurations on server 2** - -```text -other_pgpool_hostname0 = 'server 3 IP/hostname' -other_pgpool_port0 = 9999 -other_wd_port0 = 9000 -other_pgpool_hostname1 = 'server 4 IP/hostname' -other_pgpool_port1 = 9999 -other_wd_port1 = 9000 -wd_priority = 1 -``` - -**Watchdog configurations on server 3** - -```text -other_pgpool_hostname0 = 'server 2 IP/hostname' -other_pgpool_port0 = 9999 -other_wd_port0 = 9000 -other_pgpool_hostname1 = 'server 4 IP/hostname' -other_pgpool_port1 = 9999 -other_wd_port1 = 9000 -wd_priority = 3 -``` - -**Watchdog configurations on server 4** - -```text -other_pgpool_hostname0 = 'server 2 IP/hostname' -other_pgpool_port0 = 9999 -other_wd_port0 = 9000 -other_pgpool_hostname1 = 'server 3 IP/hostname' -other_pgpool_port1 = 9999 -other_wd_port1 = 9000 -wd_priority = 5 # use high watchdog priority on server 4 -``` diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/04_appendix_a.mdx b/product_docs/docs/efm/3/efm_pgpool_ha_guide/04_appendix_a.mdx deleted file mode 100644 index 3f741c7a272..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/04_appendix_a.mdx +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: "EFM Pgpool Integration Using Azure Network Load Balancer" - ---- - - - -This section describes a specific use case for EFM Pgpool integration, where the database, EFM, and Pgpool are installed on Rocky Linux 8 Virtual Machines in Azure. For this specific use case, Azure Load Balancer (LNB) has been used to distribute the traffic amongst all the active Pgpool Instances instead of directing the traffic using Pgpool VIP. - -![Architecture diagram for EFM and Pgpool integration using Azure Load Balancer](images/EFM_PgPool_Azure.png) - -**Step 1 (Installation)**: - -Install and configure Advanced Server database, EFM, and Pgpool on Azure Virtual Machines as following: - -| **Systems** | **Components** | -| ----------- | ------------------------------------------------------------------------------ | -| Primary | Primary node running Advanced Server 13 and Failover Manager 4.1 | -| Standby 1 | Standby node running Advanced Server 13, Failover Manager 4.1, and Pgpool 4.1. | -| Standby 2 | Standby node running Advanced Server 13, Failover Manager 4.1, and Pgpool 4.1. | -| Witness | Witness node running Failover Manager 4.1 and Pgpool 4.1. | - -**Step 2 (Pgpool configuration)**: - -Configure Pgpool as per the steps given in chapter 3 (except for delegate_ip, which should be left empty in this architecture). - -**Step 3 (Azure Load Balancer configuration)**: - -You need to do the following configuration for using Azure NLB: - -**Networking**: You need to ensure the following settings for Network Load Balancer and for each of the virtual machines: Assign Public IP as well as private IP to the NLB, and only private IP to the virtual machines. The application server should connect to the NLB over public IP and NLB in turn should connect to the virtual machines over private IPs. - -In the current scenario, following are the IP addresses assigned to each component: - -- Public IP of NLB : 40.76.240.33 (pcp.host) -- Private IP of Primarydb : 172.16.1.3 (note that this is not part of the backend pool of the Load Balancer) -- Private IP of Standby 1 : 172.16.1.4 -- Private IP of Standby 2 : 172.16.1.5 -- Private IP of witness node: 172.16.1.6 - -Ensure that the ports required to run the database, EFM, and Pgpool are open for communication. Following is the list of default ports for each of these component (you can customize the ports for your environment): - -- Database: 5444 -- EFM: 7800 (bind.address) -- Pgpool: 9000, 9694, 9898, 9999 - -**Backend pool**: Create a Backend pool consisting of all the 3 virtual machines running Pgpool instances. Use the private IPs of the virtual machines to create the Backend pool. - -![Backend pool in Azure console](images/backend_pools.png) - -**Health Probe**: Add a health probe to check if the Pgpool instance is available on the virtual machines. The health probe periodically pings the virtual machines of the Backend pool on port 9999. If it does not receive any response from any of the virtual machines, it assumes that the Pgpool instance is not available and hence stops sending traffic towards that particular machine. - -![Health probes in Azure console](images/health_probes.png) - -**Load balancing rules**: Add two Load balancing rules - one each for port 9898 and port 9999. These rules should ensure that the network traffic coming towards that particular port gets distributed evenly among all the virtual machines present in the Backend pool. - -![Load balancing rules in Azure console](images/load_balancing_rules.png) - -1. Rule created for port 9999 (i.e. PCP port) - -![Load balancing rule for port 9999](images/rule_port_9898.png) - -1. Rule created for port 9999 (i.e. Pgpool port) - -![Load balancing rule for port 9999](images/rule_port_9999.png) - -After configuration of the above-mentioned setup, you can connect to Postgres on the IP address of the Network Load Balancer on port 9999. If a failure occurs on the Primary database server, EFM will promote a new Primary and then reconfigure Pgpool to redistribute traffic. If any one of the Pgpool processes is not available to accept traffic anymore, the Network Load Balancer will redistribute all the traffic to the remaining two Pgpool processes. Make sure that listen_backlog_multiplier is tuned to compensate for the higher number of connections in case of failover. diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/05_appendix_b.mdx b/product_docs/docs/efm/3/efm_pgpool_ha_guide/05_appendix_b.mdx deleted file mode 100644 index bbf483f973f..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/05_appendix_b.mdx +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: "Configuration for Number of Connections and Pooling" - ---- - -Pgpool has some configuration to tune the pooling and connection processing. Depending on this configuration, also the Postgres configuration for `max_connections` should be set to make sure all connections can be accepted as required. Furthermore, note that the Cloud Architecture works with active/active instances, which requires to spread `num_init_children` over all Pgpool instances (divide the normally used value by the number of active instances). The below text describes the effect of changing the configuration, and advises values for both the on-premise and the Cloud architecture. - -**max_pool**: Generally, it is advised to set `max_pool` to 1. Alternatively, for applications with a lot of reconnects, `max_pool` can be set to the number of distinct combinations of users, databases and connection options for the application connections. All but one connection in the pool would be stale connections, which consumes a connection slot from Postgres, without adding to performance. It is therefore advised not to configure `max_pool` beyond 4 to preserve a healthy ratio between active and stale connections. As an example, for an application which constantly reconnects and uses 2 distinct users both connecting to their own database, set it to 2. If both users would be able to connect to both databases set it to 4. Note that increasing `max_pool` requires to tune down `num_init_children` in Pgpool, or tune up `max_connections` in Postgres. - -**num_init_children**: It is advised to set `num_init_children` to the number of connections that could be running active in parallel, but the value should be divided by the number of active Pgpool-II instances (one with the on-premise architecture, and all instances for the cloud architecture). As an example: In an architecture with 3 Pgpool instances, to allow the application to have 100 active connections in parallel, set `num_init_children` to 100 for the on-premise architecture, and set `num_init_children` to 33 for the cloud architecture. Note that increasing `num_init_children` generally requires to tune up `max_connections` in Postgres. - -**listen_backlog_multiplier**: Can be set to multiply the number of open connections (as perceived by the application) with the number of active connections (`num_init_children`). As an example, when the application might open 500 connections of which 100 should be active in parallel, with the on-premise architecture, `num_init_children` should be set to 100, and `listen_backlog_multiplier` should be set to 4. This setup can process 100 connections active in parallel, and another 400 (`listen_backlog_multiplier*num_init_children`) connections will be queued before connections will be blocked. The application would perceive a total of 500 open connections, and Postgres would process the load of 100 connections maximum at all times. Note that increasing `listen_backlog_multiplier` only causes the application to perceive more connections, but will not increase the number of parallel active connections (which is determined by `num_init_children`). - -**max_connections**: It is advised to set `max_connections` in Postgres higher than `[number of active pgpool instances]*[max_pool]*[num_init_children] + [superuser_reserved_connections] (Postgres)`. As an example: in the on-premise setup with 3 instances active/passive, `max_pool` set to 2, `num_init_children` set to 100, and `superuser_reserved_connections (Postgres)` set to 5, Postgres `max_connections` should be set equal or higher then `[1*2*100+5]` which is 205 connections or higher. A similar setup in the cloud setup would run with 3 active instances, `max_pool` set to 2, `num_init_children` set to 33, and `superuser_reserved_connections (Postgres)` set to 5, in which case Postgres `max_connections` should be set equal or higher than `[3*2*33+5]` which is 203 or higher. Note that configuring below the advised setting can cause issues opening new connections, and in a combination with `max_pool` can cause unexpected behaviour (low or no active connections but still connection issues due to stale pooled connections using connection slots from Postgres. For more information on the relation between `num_init_children`, `max_pool` and `max_connections`, see this background information. diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/EDB_logo.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/EDB_logo.png deleted file mode 100755 index 9ec76139f63..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/EDB_logo.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5d3f95f25c7493174f25102604b286ceb5116b7b41c15a0dc232c8fd852536de -size 13356 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png deleted file mode 100644 index 5bde6798c07..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/EFM_PgPool_Azure.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f96dc8dad8fb1514127e410dbe6bd668691a0138b731e150afb8b5cffb2f9e65 -size 38838 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/backend_pools.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/backend_pools.png deleted file mode 100644 index 927dbdbc997..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/backend_pools.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6674dda03b836ac7e5e06cb059a15650f966f3d816263a04ddbb7fba4ec74436 -size 147475 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture.png deleted file mode 100644 index cd42278ac4d..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9a08834d26e39190da4f533032ad9f78ec5f253c97167f504aee92da9ec9ce76 -size 35314 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture1.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture1.png deleted file mode 100755 index 547cbf01a6e..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:950a1df9ad74895e52417a738a64014eed2203d7d98a1ee95c5aa86ba3078577 -size 116023 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png deleted file mode 100644 index 826dfbabc8b..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_ha_architecture_separate_VM.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8c7ad7caf3ea611ac0d56dbdfdc3c67513863e0efd1b88dec306a77caa8d127c -size 39576 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_logo.svg b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_logo.svg deleted file mode 100755 index 74babf2f8da..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/edb_logo.svg +++ /dev/null @@ -1,56 +0,0 @@ - - - - -logo - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/failover_manager_overview.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/failover_manager_overview.png deleted file mode 100755 index 0a3389950c6..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/failover_manager_overview.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a5784411bf1d038252baba457c643c00d59a9ea67d3eaaab73b04b8025a62249 -size 87850 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/health_probes.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/health_probes.png deleted file mode 100644 index d68d6e41fd9..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/health_probes.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:16026cb626476565b516885fd5dadc3dbceb933964d0189bb22a992cb4de8229 -size 114669 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/load_balancing_rules.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/load_balancing_rules.png deleted file mode 100644 index 081db02c30e..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/load_balancing_rules.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f26fa44740e64aed35044635b87629b4561f083dd6ce950a88ba6a38c3008daa -size 138639 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/placeholder.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/placeholder.png deleted file mode 100755 index 3c3bf2a4365..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/placeholder.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 -size 16849 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/rule_port_9898.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/rule_port_9898.png deleted file mode 100644 index 290825aeeb3..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/rule_port_9898.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:593aa7ddebe937d7fb837b4784658abfa1733389cd09873a150b5ea66778a2d4 -size 118143 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/rule_port_9999.png b/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/rule_port_9999.png deleted file mode 100644 index 8d19389dd7a..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/images/rule_port_9999.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:738e8fad910a66ce32c087cd410c6b9b06a7eff0b9388bc553b021b08f085301 -size 117221 diff --git a/product_docs/docs/efm/3/efm_pgpool_ha_guide/index.mdx b/product_docs/docs/efm/3/efm_pgpool_ha_guide/index.mdx deleted file mode 100644 index c04599f4806..00000000000 --- a/product_docs/docs/efm/3/efm_pgpool_ha_guide/index.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "High Availability & Scalability Guide" - -#legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/genindex.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/introduction.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/conclusion.html" - #- "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/index.html" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.8/index.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.8/genindex.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.8/introduction.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.8/conclusion.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.9/index.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.9/genindex.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.9/introduction.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.9/conclusion.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/index.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/genindex.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/introduction.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/high-availability-scalability-guide/3.10/conclusion.html" ---- - -Since high-availability and read scalability are not part of the core feature set of EDB Postgres Advanced Server, Advanced Server relies on external tools to provide this functionality. This document focuses on the functionality provided by EDB Failover Manager and Pgpool-II, and discusses the implications of a high-availability architecture formed around these tools. - -
- -introduction architecture components_ha_pgpool appendix_a appendix_b conclusion - -
diff --git a/product_docs/docs/efm/3/efm_quick_start/images/edb_logo.png b/product_docs/docs/efm/3/efm_quick_start/images/edb_logo.png deleted file mode 100755 index 3c3bf2a4365..00000000000 --- a/product_docs/docs/efm/3/efm_quick_start/images/edb_logo.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 -size 16849 diff --git a/product_docs/docs/efm/3/efm_quick_start/images/edb_logo.svg b/product_docs/docs/efm/3/efm_quick_start/images/edb_logo.svg deleted file mode 100755 index 74babf2f8da..00000000000 --- a/product_docs/docs/efm/3/efm_quick_start/images/edb_logo.svg +++ /dev/null @@ -1,56 +0,0 @@ - - - - -logo - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/product_docs/docs/efm/3/efm_quick_start/images/failover_manager_overview.png b/product_docs/docs/efm/3/efm_quick_start/images/failover_manager_overview.png deleted file mode 100755 index 0a3389950c6..00000000000 --- a/product_docs/docs/efm/3/efm_quick_start/images/failover_manager_overview.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a5784411bf1d038252baba457c643c00d59a9ea67d3eaaab73b04b8025a62249 -size 87850 diff --git a/product_docs/docs/efm/3/efm_quick_start/images/placeholder.png b/product_docs/docs/efm/3/efm_quick_start/images/placeholder.png deleted file mode 100755 index 3c3bf2a4365..00000000000 --- a/product_docs/docs/efm/3/efm_quick_start/images/placeholder.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 -size 16849 diff --git a/product_docs/docs/efm/3/efm_quick_start/index.mdx b/product_docs/docs/efm/3/efm_quick_start/index.mdx deleted file mode 100644 index f2346efa378..00000000000 --- a/product_docs/docs/efm/3/efm_quick_start/index.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: "Creating a Failover Manager Cluster" - -#legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/3.10/genindex.html" - #- "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/3.10/index.html" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/3.10/genindex.html" - - "/edb-docs/d/edb-postgres-failover-manager/installation-getting-started/quick-start/3.10/index.html" ---- - -EDB Postgres Failover Manager (Failover Manager) is a high-availability module from EnterpriseDB that enables a Postgres Primary node to automatically failover to a Standby node in the event of a software or hardware failure on the Primary. - -This quick start guide describes configuring a Failover Manager cluster in a test environment. You should read and understand the [EDB Failover Manager User's Guide](/efm/latest/) before configuring Failover Manager for a production deployment. - -You must perform some basic installation and configuration steps before performing this tutorial: - -- You must install and initialize a database server on one primary and one or two standby nodes; for information about installing Advanced Server, visit: - - [https://www.enterprisedb.com/docs/p/edb-postgres-advanced-server](/epas/latest/) - -- Postgres streaming replication must be configured and running between the primary and standby nodes. For detailed information about configuring streaming replication, visit: - - . - -- You must also install Failover Manager on each primary and standby node. During Advanced Server installation, you configured an EnterpriseDB repository on each database host. You can use the EnterpriseDB repository and the `yum install` command to install Failover Manager on each node of the cluster: - - ```text - yum install edb-efm310 - ``` - -During the installation process, the installer will create a user named `efm` that has sufficient privileges to invoke scripts that control the Failover Manager service for clusters owned by `enterprisedb` or `postgres`. The example that follows creates a cluster named `efm`. - -Start the configuration process on a primary or standby node. Then, copy the configuration files to other nodes to save time. - -**Step 1: Create Working Configuration Files** - -Copy the provided sample files to create EFM configuration files, and correct the ownership: - -```text -cd /etc/edb/efm-3.10 - -cp efm.properties.in efm.properties - -cp efm.nodes.in efm.nodes - -chown efm:efm efm.properties - -chown efm:efm efm.nodes -``` - -**Step 2: Create an Encrypted Password** - -Create the [encrypted password](/efm/latest/efm_user/04_configuring_efm/02_encrypting_database_password/) needed for the properties file: - -```text -/usr/edb/efm-3.10/bin/efm encrypt efm -``` - -Follow the onscreen instructions to produce the encrypted version of your database password. - -**Step 3: Update the efm.properties File** - -The `.properties` file (efm.properties file in this example) contains parameters that specify connection properties and behaviors for your Failover Manager cluster. Modifications to property settings are applied when Failover Manager starts. - -The properties mentioned in this tutorial are the minimal properties required to configure a Failover Manager cluster. If you are configuring a production system, please review the *EDB Failover Manager Guide* for detailed information about Failover Manager options. - -Provide values for the following properties on all cluster nodes: - -| Property | Description | -| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -| `db.user` | The name of the database user. | -| `db.password.encrypted` | The encrypted password of the database user. | -| `db.port` | The port monitored by the database. | -| `db.database` | The name of the database. | -| `db.service.owner` | The owner of the `data` directory (usually `postgres` or `enterprisedb`). Required only if the database is running as a service. | -| `db.service.name` | The name of the database service (used to restart the server). Required only if the database is running as a service. | -| `db.bin` | The path to the `bin` directory (used for calls to `pg_ctl`). | -| `db.data.dir` | The `data` directory in which EFM will find or create the `recovery.conf` file or the `standby.signal` file. | -| `user.email` | An email address at which to receive email notifications (notification text is also in the agent log file). | -| `bind.address` | The local address of the node and the port to use for EFM. The format is: `bind.address=1.2.3.4:7800` | -| `is.witness` | `true` on a witness node and `false` if it is a primary or standby. | -| `ping.server.ip` | If you are running on a network without Internet access, set `ping.server.ip` to an address that is available on your network. | -| `auto.allow.hosts` | On a test cluster, set to `true` to simplify startup; for production usage, consult the user's guide. | -| `stable.nodes.file` | On a test cluster, set to `true` to simplify startup; for production usage, consult the user's guide. | - -**Step 4: Update the efm.nodes File** - -The `.nodes` file (efm.nodes file in this example) is read at startup to tell an agent how to find the rest of the cluster or, in the case of the first node started, can be used to simplify authorization of subsequent nodes. Add the addresses and ports of each node in the cluster to this file. One node will act as the membership coordinator; the list should include at least the membership coordinator's address. For example: - - `1.2.3.4:7800` - - `1.2.3.5:7800` - - `1.2.3.6:7800` - -Please note that the Failover Manager agent will not verify the content of the `efm.nodes` file; the agent expects that some of the addresses in the file cannot be reached (e.g. that another agent hasn’t been started yet). - -**Step 5: Configure the Other Nodes** - -Copy the `efm.properties` and `efm.nodes` files to the `/etc/edb/efm-3.10` directory on the other nodes in your sample cluster. After copying the files, change the file ownership so the files are owned by `efm:efm`. The `efm.properties` file can be the same on every node, except for the following properties: - -- Modify the `bind.address` property to use the node’s local address. -- Set `is.witness` to `true` if the node is a witness node. If the node is a witness node, the properties relating to a local database installation will be ignored. - -**Step 6: Start the EFM Cluster** - -On any node, start the Failover Manager agent. The agent is named `edb-efm-3.10`; you can use your platform-specific service command to control the service. For example, on a CentOS/RHEL 7.x or Rocky Linux/AlmaLinux/RHEL 8.x host use the command: - -```text -systemctl start edb-efm-3.10 -``` - -After the agent starts, run the following command to see the status of the single-node cluster. You should see the addresses of the other nodes in the `Allowed node host` list. - -```text -/usr/edb/efm-3.10/bin/efm cluster-status efm -``` - -Start the agent on the other nodes. Run the `efm cluster-status efm` command on any node to see the cluster status. - -If any agent fails to start, see the startup log for information about what went wrong: - -```text -cat /var/log/efm-3.10/startup-efm.log -``` - -**Performing a Switchover** - -If the cluster status output shows that the primary and standby(s) are in sync, you can perform a switchover with the following command: - -```text -/usr/edb/efm-3.10/bin/efm promote efm -switchover -``` - -The command will promote a standby and reconfigure the primary database as a new standby in the cluster. To switch back, run the command again. - -For quick access to online help, you can invoke the following command: - -```text -/usr/edb/efm-3.10/bin/efm --help -``` diff --git a/product_docs/docs/efm/3/efm_rel_notes/01_310_rel_notes.mdx b/product_docs/docs/efm/3/efm_rel_notes/01_310_rel_notes.mdx deleted file mode 100644 index a3c68627e23..00000000000 --- a/product_docs/docs/efm/3/efm_rel_notes/01_310_rel_notes.mdx +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: "Version 3.10" ---- - -Enhancements, bug fixes, and other changes in EFM 3.10 include: - -| Type | Description | -| ---- | ----------- | -| Enhancement | Added physical replication slot support for PostgreSQL 12 and EDB Postgres Advanced Server 12. EFM now creates and advances slots on standby instances.| -| Enhancement | Added Network Address Translation (NAT) support - enables EFM agents to communicate through public IP addresses. [Support Ticket #945799] | -| Enhancement | Replaced the trigger file with pg_ctl to promote the standby to master. | -| Enhancement | Added a new command to check the local efm status: `efm node-status-json `. | - -See [The Cluster Properties File](../efm_user/04_configuring_efm/01_cluster_properties) for more information on the new properties. \ No newline at end of file diff --git a/product_docs/docs/efm/3/efm_rel_notes/02_39_rel_notes.mdx b/product_docs/docs/efm/3/efm_rel_notes/02_39_rel_notes.mdx deleted file mode 100644 index 139adfeb71b..00000000000 --- a/product_docs/docs/efm/3/efm_rel_notes/02_39_rel_notes.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "Version 3.9" ---- - -Enhancements, bug fixes, and other changes in EFM 3.9 include: - -| Type | Description | -| ---- | ----------- | -| Enhancement | Properties have been renamed. See the table below for details. If you use the upgrade utility to upgrade your Failover Manager installation, they are automatically updated. | -| Enhancement | The prefix of the Failover Manager service name has changed. The new service name is edb-efm-3.9. | -| Enhancement | EFM can now reduce the `num_sync` value for synchronous replicas when standbys leave the cluster so that the primary can still accept writes. | -| Enhancement | EFM now includes features to work with new EDB replication server to allow clusters with a mix of physical and logical replication. | -| Enhancement | When there are multiple standbys, EFM can now promote based on current replay value instead of standby priority. | -| Enhancement | EFM now handles database file layouts on Debian/Ubuntu installations. | -| Bug Fix | A fix for ssl connections requiring hostname validation. [Support Ticket: #957322] | -| Bug Fix | Handling equals sign in recovery conf information. [Support Ticket: #963294] | - -See [The Cluster Properties File](../efm_user/04_configuring_efm/01_cluster_properties) for more information on the new and renamed properties. - - diff --git a/product_docs/docs/efm/3/efm_rel_notes/03_38_rel_notes.mdx b/product_docs/docs/efm/3/efm_rel_notes/03_38_rel_notes.mdx deleted file mode 100644 index 853a42ede4a..00000000000 --- a/product_docs/docs/efm/3/efm_rel_notes/03_38_rel_notes.mdx +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: "Version 3.8" ---- - -Enhancements, bug fixes, and other changes in EFM 3.8 include: - -| Type | Description | -| ---- | ----------- | -| Enhancement | You can use the restore.command property to update the restore_command parameter if a new master is promoted. | -| Enhancement | You can use the reconfigure.sync.master property to specify Failover Manager behavior if the number of synchronous standbys drops below the required level. | -| Enhancement | New notifications inform you if the state of a synchronous replication master changes, if a recovery file exists on the master node, or if the trigger file path is not writable. -| Bug Fix | Handle WAL log backup removal on standbys during promotion for database version 9.X. | \ No newline at end of file diff --git a/product_docs/docs/efm/3/efm_rel_notes/index.mdx b/product_docs/docs/efm/3/efm_rel_notes/index.mdx deleted file mode 100644 index f5f8e14ee98..00000000000 --- a/product_docs/docs/efm/3/efm_rel_notes/index.mdx +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "Release Notes" ---- -The Failover Manager documentation describes the latest version of EFM 3 including minor releases and patches. The release notes in this section provide information on what was new in each release. For new functionality introduced in a minor or patch release, there are also indicators within the content about what release introduced the feature. - -| Version | Release Date | -| ------- | ------------ | -| [3.10](01_310_rel_notes) | 2020 Jun 11 | -| [3.9](02_39_rel_notes) | 2020 Mar 20 | -| [3.8](03_38_rel_notes) | 2020 Jan 9 | - - - diff --git a/product_docs/docs/efm/3/efm_user/02_failover_manager_overview/01_prerequisites.mdx b/product_docs/docs/efm/3/efm_user/02_failover_manager_overview/01_prerequisites.mdx deleted file mode 100644 index d19c2f4c474..00000000000 --- a/product_docs/docs/efm/3/efm_user/02_failover_manager_overview/01_prerequisites.mdx +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: "Prerequisites" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/prerequisites.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/prerequisites.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/prerequisites.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/prerequisites.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/prerequisites.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.07.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.07.html" ---- - - - -Before configuring a Failover Manager cluster, you must satisfy the prerequisites described below. - -**Install Java 1.8 (or later)** - -Before using Failover Manager, you must first install Java (version 1.8 or later). Failover Manager is tested with OpenJDK, and we strongly recommend installing that version of Java. [Installation instructions for Java](https://openjdk.java.net/install/) are platform specific. - -**Provide an SMTP Server** - -You can receive notifications from Failover Manager as specified by a user-defined notification script, by email, or both. - -- If you are using email notifications, an SMTP server must be running on each node of the Failover Manager scenario. -- If you provide a value in the script.notification property, you can leave the user.email field blank; an SMTP server is not required. - -If an event occurs, Failover Manager invokes the script (if provided), and can also send a notification email to any email addresses specified in the user.email parameter of the cluster properties file. For more information about using an SMTP server, visit: - -[https://access.redhat.com/site/documentation](https://access.redhat.com/site/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Deployment_Guide/s1-email-mta.html) - -**Configure Streaming Replication** - -Failover Manager requires that PostgreSQL streaming replication be configured between the Primary node and the Standby node or nodes. Failover Manager does not support other types of replication. - -On database versions 11 (or prior), unless specified with the `-sourcenode` option, a `recovery.conf` file is copied from a random standby node to the stopped primary during switchover. You should ensure that the paths within the `recovery.conf` file on your standby nodes are consistent before performing a switchover. For more information about the `-sourcenode` option, please see [Promoting a Failover Manager Node](../05_using_efm/#promote_node). - -On database version 12, the `primary_conninfo` and `restore_command` properties are copied from a random standby node to the stopped primary during switchover (unless otherwise specified with the `-sourcenode` option). - -**Modify the pg_hba.conf File** - -You must modify the `pg_hba.conf` file on the Primary and Standby nodes, adding entries that allow communication between the all of the nodes in the cluster. The following example demonstrates entries that might be made to the pg_hba.conf file on the Primary node: - -```text -# access for itself -host fmdb efm 127.0.0.1/32 md5 -# access for standby -host fmdb efm 192.168.27.1/32 md5 -# access for witness -host fmdb efm 192.168.27.34/32 md5 -``` - -Where: - - `efm` specifies the name of a valid database user. - - `fmdb` specifies the name of a database to which the efm user may connect. - -By default, the `pg_hba.conf` file resides in the `data` directory, under your Postgres installation. After modifying the `pg_hba.conf` file, you must reload the configuration file on each node for the changes to take effect. You can use the following command: - - `# systemctl reload edb-as-x` - -Where `x` specifies the Postgres version. - -**Using Autostart for the Database Servers** - -If a Primary node reboots, Failover Manager may detect the database is down on the Primary node and promote a Standby node to the role of Primary. If this happens, the Failover Manager agent on the (rebooted) Primary node will not get a chance to write the `recovery.conf` file; the `recovery.conf` file prevents the database server from starting. If this happens, the rebooted Primary node will return to the cluster as a second Primary node. - -To prevent this, ensure that the Failover Manager agent auto starts before the database server. The agent will start in idle mode, and check to see if there is already a primary in the cluster. If there is a primary node, the agent will verify that a `recovery.conf` or `standby.signal` file exists, and the database will not start as a second primary. - -**Ensure Communication Through Firewalls** - -If a Linux firewall (i.e. iptables) is enabled on the host of a Failover Manager node, you may need to add rules to the firewall configuration that allow tcp communication between the Failover Manager processes in the cluster. For example: - -```text -# iptables -I INPUT -p tcp --dport 7800:7810 -j ACCEPT -/sbin/service iptables save -``` - -The command shown above opens a small range of ports (7800 through 7810). Failover Manager will connect via the port that corresponds to the port specified in the cluster properties file. - -**Ensure that the Database user has Sufficient Privileges** - -The database user specified by the `db.user` property in the `efm.properties` file must have sufficient privileges to invoke the following functions on behalf of Failover Manager: - - `pg_current_wal_lsn()` - - `pg_last_wal_replay_lsn()` - - `pg_wal_replay_resume()` - - `pg_reload_conf()` - -For detailed information about each of these functions, please see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/index.html). - -The user must also have permissions to read the values of configuration variables; a database superuser can use the PostgreSQL `GRANT` command to provide the permissions needed: - -```text -GRANT pg_read_all_settings TO user_name; -``` - -For more information about `pg_read_all_settings`, please see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/default-roles.html). diff --git a/product_docs/docs/efm/3/efm_user/02_failover_manager_overview/index.mdx b/product_docs/docs/efm/3/efm_user/02_failover_manager_overview/index.mdx deleted file mode 100644 index 9c7fe8a2a92..00000000000 --- a/product_docs/docs/efm/3/efm_user/02_failover_manager_overview/index.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "Failover Manager Overview" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/failover_manager_overview.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/failover_manager_overview.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/failover_manager_overview.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/failover_manager_overview.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/failover_manager_overview.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.05.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.05.html" ---- - - - -An EDB Postgres Failover Manager (EFM) cluster is comprised of Failover Manager processes that reside on the following hosts on a network: - -- A Primary node - The Primary node is the primary database server that is servicing database clients. -- One or more Standby nodes - A Standby node is a streaming replication server associated with the Primary node. -- A Witness node - The Witness node confirms assertions of either the Primary or a Standby in a failover scenario. A cluster does not need a dedicated witness node if the cluster contains three or more nodes. If you do not have a third cluster member that is a database host, you can add a dedicated Witness node. A cluster may include more than one witness node. - -Traditionally, a *cluster* is a single instance of Postgres managing multiple databases. In this document, the term cluster refers to a Failover Manager cluster. A Failover Manager cluster consists of a Primary agent, one or more Standby agents, and an optional Witness agent that reside on servers in a cloud or on a traditional network and communicate using the JGroups toolkit. - -![An EFM scenario employing a Virtual IP address.](../images/failover_manager_overview.png) - -When a non-witness agent starts, it connects to the local database and checks the state of the database: - -- If the agent cannot reach the database, it will start in idle mode. -- If it finds that the database is in recovery, the agent assumes the role of standby. -- If the database is not in recovery, the agent assumes the role of primary. - -In the event of a failover, Failover Manager attempts to ensure that the promoted standby is the most up-to-date standby in the cluster; please note that data loss is possible if the standby node is not in sync with the primary node. - -[JGroups](http://www.jgroups.org/) provides technology that allows Failover Manager to create clusters whose member nodes can communicate with each other and detect node failures. - -The figure shown above illustrates a Failover Manager cluster that employs a virtual IP address. You can use a load balancer in place of a [virtual IP address](../04_configuring_efm/05_using_vip_addresses/#using_vip_addresses) if you provide your own [script](../04_configuring_efm/01_cluster_properties/#cluster_properties) to re-configure the load balancer whenever databases are added or removed. You can also choose to enable native EFM-Pgpool integration for high availability. - -
- -prerequisites - -
diff --git a/product_docs/docs/efm/3/efm_user/03_installing_efm-old.mdx b/product_docs/docs/efm/3/efm_user/03_installing_efm-old.mdx deleted file mode 100644 index 6b25fe242f5..00000000000 --- a/product_docs/docs/efm/3/efm_user/03_installing_efm-old.mdx +++ /dev/null @@ -1,267 +0,0 @@ ---- -title: "Installing Failover Manager" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/installing_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/installing_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/installing_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/installing_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/installing_efm.html" ---- - - - - - -To request credentials that allow you to access an EnterpriseDB repository, visit the EDB website at: - - - -## RedHat/CentOS/Rocky Linux/AlmaLinux Host - -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter y, and press Return to continue. - -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. - -Failover Manager must be installed by root. During the installation process, the installer will also create a user named efm that has sufficient privileges to invoke scripts that control the Failover Manager service for clusters owned by enterprisedb or postgres. - -After receiving your credentials, you must create the EnterpriseDB repository configuration file on each node of the cluster, and then modify the file to enable access. The following steps provide detailed information about accessing the EnterpriseDB repository; the steps must be performed on each node of the cluster. - -### RHEL or CentOS 7 PPCLE Host - -1. Use the following command to create a configuration file and install Advance Toolchain: - - ```text - rpm --import https://public.dhe.ibm.com/software/server/POWER/Linux/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b - - cat > /etc/yum.repos.d/advance-toolchain.repo <:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -4. Install the EPEL repository: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` - -5. On RHEL 7 PPCLE, enable the additional repositories to resolve EPEL dependencies: - - ```text - subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" - ``` - -6. Install the selected package: - - ```text - yum -y install edb-efm310 - ``` - -### RHEL or CentOS 7 Host - -1. To create the repository configuration file, assume superuser privileges, and invoke the following command: - - ```text - yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: - - ```text - sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -3. Install the EPEL repository: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` - -4. On RHEL 7, enable the additional repositories to resolve dependencies: - - ```text - subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" - ``` - -5. Install the selected package: - - ```text - yum -y install edb-efm310 - ``` - -### RHEL or Rocky Linux or AlmaLinux 8 Host - -1. To create the repository configuration file, assume superuser privileges, and invoke the following command: - ```text - dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: - - ```text - sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -3. Install the EPEL repository: - -- On Rocky Linux 8 or AlmaLinux 8 - ```text - dnf -y install epel-release - ``` -- On RHEL 8 - ```text - dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - ``` - -4. Enable the additional repositories to resolve dependencies: - -- On Rocky Linux 8 or AlmaLinux 8 - ```text - dnf config-manager --set-enabled PowerTools - ``` -- On RHEL 8 - - ```text - ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" - ``` - -5. Disable the built-in PostgreSQL module: - - ```text - dnf -qy module disable postgresql - ``` -6. Install the selected package: - ```text - dnf -y install edb-efm310 - ``` - -## Debian or Ubuntu Host - -To install Failover Manager, you must have credentials that allow access to the EnterpriseDB repository. To request credentials for the repository, visit the EnterpriseDB website at: - - - -The following steps will walk you through using the EnterpriseDB apt repository to install Failover Manager. - -1. Assume superuser privileges: - ```text - sudo su – - ``` -2. Configure the EnterpriseDB repository by substituting your EnterpriseDB credentials for the username and password placeholders in the following commands: - - ```text - sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` -3. Add support to your system for secure APT repositories: - ```text - apt-get install apt-transport-https - ``` -4. Add the EDB signing key: - ```text - wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - - ``` -5. Update the repository meta data: - ```text - apt-get update - ``` -6. Install Failover Manager: - ```text - apt-get -y install edb-efm310 - ``` - -## SLES Host - -To install Failover Manager, you must have credentials that allow access to the EnterpriseDB repository. To request credentials for the repository, visit the EnterpriseDB website at: - - - -You can use the zypper package manager to install a Failover Manager agent on an SLES 12 host. zypper will attempt to satisfy package dependencies as it installs a package, but requires access to specific repositories that are not hosted at EnterpriseDB. - -1. You must assume superuser privileges and stop any firewalls before installing Failover Manager. Then, use the following commands to add EnterpriseDB repositories to your system: - - ```text - zypper addrepo https://zypp.enterprisedb.com/suse/edb-sles.repo - ``` - -2. The commands create the repository configuration files in the /etc/zypp/repos.d directory. Then, use the following command to refresh the metadata on your SLES host to include the EnterpriseDB repository: - - ```text - zypper refresh - ``` - - When prompted, provide credentials for the repository, and specify a to always trust the provided key, and update the metadata to include the EnterpriseDB repository. - -3. You must also add SUSEConnect and the SUSE Package Hub extension to the SLES host, and register the host with SUSE, allowing access to SUSE repositories. Use the commands: - - ```text - zypper install SUSEConnect - SUSEConnect -r -e - SUSEConnect -p PackageHub/12.4/x86_64 - SUSEConnect -p sle-sdk/12.4/x86_64 - ``` - -4. Install SUSEConnect to register the host with SUSE, allowing access to SUSE repositories: - - ```text - zypper addrepo https://download.opensuse.org/repositories/Apache:/Modules/SLE_12_SP4/Apache:Modules.repo - ``` - -5. Install OpenJDK (version 1.8) for Java based components: - - ```text - zypper -n install java-1_8_0-openjdk - ``` - -6. Now you can use the zypper utility to install a Failover Manager agent: - - ```text - zypper -n install edb-efm310 - ``` - -## Performing post-installation tasks - -If you are using Failover Manager to monitor a cluster owned by a user other than `enterprisedb` or `postgres`, see [Extending Failover Manager Permissions](04_configuring_efm/04_extending_efm_permissions/#extending_efm_permissions) . - -After installing on each node of the cluster, you must: - -1. Modify the [cluster properties file](04_configuring_efm/01_cluster_properties/#cluster_properties) on each node. -2. Modify the [cluster members file](04_configuring_efm/03_cluster_members/#cluster_members) on each node. -3. If applicable, configure and test virtual IP address settings and any scripts that are identified in the cluster properties file. -4. Start the agent on each node of the cluster. For more information about controlling the service, see [Section 5](08_controlling_efm_service/#controlling-the-failover-manager-service). - -### Installation Locations - -components are installed in the following locations: - -| Component | Location | -| --------------------------------- | --------------------------- | -| Executables | /usr/edb/efm-3.10/bin | -| Libraries | /usr/edb/efm-3.10/lib | -| Cluster configuration files | /etc/edb/efm-3.10 | -| Logs | /var/log/efm- 3.10 | -| Lock files | /var/lock/efm-3.10 | -| Log rotation file | /etc/logrotate.d/efm-3.10 | -| sudo configuration file | /etc/sudoers.d/efm-310 | -| Binary to access VIP without sudo | /usr/edb/efm-3.10/bin/secure | diff --git a/product_docs/docs/efm/3/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx b/product_docs/docs/efm/3/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx deleted file mode 100644 index 523b6d2d285..00000000000 --- a/product_docs/docs/efm/3/efm_user/04_configuring_efm/01_cluster_properties/01_encrypting_database_password.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "Encrypting Your Database Password" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/encrypting_database_password.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/encrypting_database_password.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/encrypting_database_password.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/encrypting_database_password.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/encrypting_database_password.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.15.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.15.html" ---- - - -Failover Manager requires you to encrypt your database password before including it in the cluster properties file. Use the [efm utility](../../07_using_efm_utility/#efm_encrypt) (located in the `/usr/edb/efm-3.10/bin` directory) to encrypt the password. When encrypting a password, you can either pass the password on the command line when you invoke the utility, or use the `EFMPASS` environment variable. - -To encrypt a password, use the command: - -```text -# efm encrypt [ --from-env ] -``` - -Where `` specifies the name of the Failover Manager cluster. - -If you include the `--from-env` option, you must export the value you wish to encrypt before invoking the encryption utility. For example: - -```text -export EFMPASS=password -``` - -If you do not include the `--from-env` option, Failover Manager will prompt you to enter the database password twice before generating an encrypted password for you to place in your cluster property file. When the utility shares the encrypted password, copy and paste the encrypted password into the cluster property files. - -!!! Note - Many Java vendors ship their version of Java with full-strength encryption included, but not enabled due to export restrictions. If you encounter an error that refers to an illegal key size when attempting to encrypt the database password, you should download and enable a Java Cryptography Extension (JCE) that provides an unlimited policy for your platform. - -The following example demonstrates using the encrypt utility to encrypt a password for the `acctg` cluster: - -```text -# efm encrypt acctg -This utility will generate an encrypted password for you to place in - your EFM cluster property file: -/etc/edb/efm-3.10/acctg.properties -Please enter the password and hit enter: -Please enter the password again to confirm: -The encrypted password is: 516b36fb8031da17cfbc010f7d09359c -Please paste this into your acctg.properties file -db.password.encrypted=516b36fb8031da17cfbc010f7d09359c -``` - -!!! Note - The utility will notify you if a properties file does not exist. - -After receiving your encrypted password, paste the password into the properties file and start the Failover Manager service. If there is a problem with the encrypted password, the Failover Manager service will not start: - -```text -[witness@localhost ~]# systemctl start edb-efm-3.10 -Job for edb-efm-3.10.service failed because the control process exited with error code. See "systemctl status edb-efm-3.10.service" and "journalctl -xe" for details. -``` - -If you receive this message when starting the Failover Manager service, please see the startup log (located in `/var/log/efm-3.10/startup-efm.log`) for more information. - -If you are using RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x, startup information is also available with the following command: - -```text -systemctl status edb-efm-3.10 -``` - -To prevent a cluster from inadvertently connecting to the database of another cluster, the cluster name is incorporated into the encrypted password. If you modify the cluster name, you will need to re-encrypt the database password and update the cluster properties file. - -**Using the EFMPASS Environment Variable** - -The following example demonstrates using the --from-env environment variable when encrypting a password. Before invoking the `efm encrypt` command, set the value of `EFMPASS` to the password (`1safepassword`): - -```text -# export EFMPASS=1safepassword -``` - -Then, invoke `efm encrypt`, specifying the `--from-env` option: - -```text -# efm encrypt acctg --from-env -# 7ceecd8965fa7a5c330eaa9e43696f83 -``` - -The encrypted password (`7ceecd8965fa7a5c330eaa9e43696f83`) is returned as a text value; when using a script, you can check the exit code of the command to confirm that the command succeeded. A successful execution returns `0`. diff --git a/product_docs/docs/efm/3/efm_user/04_configuring_efm/01_cluster_properties/index.mdx b/product_docs/docs/efm/3/efm_user/04_configuring_efm/01_cluster_properties/index.mdx deleted file mode 100644 index b947e99feec..00000000000 --- a/product_docs/docs/efm/3/efm_user/04_configuring_efm/01_cluster_properties/index.mdx +++ /dev/null @@ -1,1154 +0,0 @@ ---- -title: "The Cluster Properties File" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/cluster_properties.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/cluster_properties.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/cluster_properties.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/cluster_properties.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/cluster_properties.html" ---- - - - -Each node in a Failover Manager cluster has a properties file (by default, named `efm.properties`) that contains the properties of the individual node on which it resides. The Failover Manager installer creates a file template for the properties file named `efm.properties.in` in the `/etc/edb/efm-3.10` directory. - -After completing the Failover Manager installation, you must make a working copy of the template before modifying the file contents: - -```text -# cp /etc/edb/efm-3.10/efm.properties.in /etc/edb/efm-3.10/efm.properties -``` - -After copying the template file, change the owner of the file to `efm`: - -```text -# chown efm:efm efm.properties -``` - -!!! Note - By default, Failover Manager expects the cluster properties file to be named `efm.properties`. If you name the properties file something other than `efm.properties`, you must modify the service script or unit file to instruct Failover Manager to use a different name. - -After creating the cluster properties file, add (or modify) configuration parameter values as required. For detailed information about each property, see [Specifying Cluster Properties](#specifying-cluster-properties). - -The property files are owned by `root`. The Failover Manager service script expects to find the files in the `/etc/edb/efm-3.10 directory`. If you move the property file to another location, you must create a symbolic link that specifies the new location. - -!!! Note - All user scripts referenced in the properties file will be invoked as the Failover Manager user. - - - -## Specifying Cluster Properties - -You can use the properties listed in the cluster properties file to specify connection properties and behaviors for your Failover Manager cluster. Modifications to property settings will be applied when Failover Manager starts. If you modify a property value you must restart Failover Manager to apply the changes. - -Property values are case-sensitive. Note that while Postgres uses quoted strings in parameter values, Failover Manager does not allow quoted strings in property values. For example, while you might specify an IP address in a Postgres configuration parameter as: - -> `listen_addresses='192.168.2.47'` - -Failover Manager requires that the value *not* be enclosed in quotes: - -> `bind.address=192.168.2.54:7800` - -Use the properties in the `efm.properties` file to specify connection, administrative, and operational details for Failover Manager. - -**Legends**: In the following table: - -- `A`: Required on Primary or Standby node -- `W`: Required on Witness node -- `Y` : Yes - -| **Property Name** | **A** | **W** | **Default Value** | **Comments** | -| ------------------------------------------------------------- | ----- | ----- | ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [db.user](#db_user) | Y | Y | | Username for the database. | -| [db.password.encrypted](#db_password_encrypted) | Y | Y | | Password encrypted using 'efm encrypt'. | -| [db.port](#db_port) | Y | Y | | This value must be same for all the agents. | -| [db.database](#db_database) | Y | Y | | Database name. | -| [db.service.owner](#db_service_owner) | Y | | | Owner of $PGDATA dir for db.database. | -| [db.service.name](#db_service_name) | | | | Required if running the database as a service. | -| [db.bin](#db_bin) | Y | | | Directory containing the pg_controldata/pg_ctl commands such as '/usr/edb/as12/bin'. | -| [db.data.dir](#db_data_dir) | Y | | | Same as the output of query 'show data_directory;' **Name changed from db.recovery.dir in EFM 3.9.** | -| [db.config.dir](#db_config_dir) | | | | Same as the output of query 'show config_file;'. Should be specified if it is not same as *db.data.dir*. **Text changed in EFM Version 3.8 and 3.9**. | -| [jdbc.sslmode](#jdbc_sslmode) | Y | Y | disable | See the [note](#jdbc_note). | -| [user.email](#user_email) | | | | This value must be same for all the agents; can be left blank if using a notification script. | -| [from.email](#from_email) | | | [efm@localhost](mailto:efm@localhost) | Leave blank to use the default [efm@localhost](mailto:efm@localhost). | -| [notification.level](#notification_level) | Y | Y | INFO | See the [list of notifications](../../10_notifications/#notifications). | -| [script.notification](#script_notification) | | | | Required if user.email property is not used; both parameters can be used together. | -| [bind.address](#bind_address) | Y | Y | | Example: <ip_address>:<port> | -| [external.address](#external_address) | | | | Example: <ip_address/hostname> **Available in EFM 3.10 and later.** | -| [admin.port](#admin_port) | Y | Y | 7809 | Modify if the default port is already in use. | -| [is.witness](#is_witness) | Y | Y | | See description. | -| [local.period](#local_period) | Y | | 10 | | -| [local.timeout](#local_timeout) | Y | | 60 | | -| [local.timeout.final](#local_timeout_final) | Y | | 10 | | -| [remote.timeout](#remote_timeout) | Y | Y | 10 | | -| [node.timeout](#node_timeout) | Y | Y | 50 | This value must be same for all the agents. | -| [stop.isolated.primary](#stop_isolated_primary) | Y | | true | | -| [stop.failed.primary](#stop_failed_primary) | Y | | true | | -| [primary.shutdown.as.failure](#primary_shutdown_as_failure) | Y | Y | false | | -| [update.physical.slots.period](#update_physical_slots_period) | Y | | 0 |**Available in EFM 3.10 and later. **| | -| [ping.server.ip](#ping_server_ip) | Y | Y | 8.8.8.8 | **Name changed from pingServerIp in EFM 3.9.** | -| [ping.server.command](#ping_server_command) | Y | Y | /bin/ping -q -c3 -w5 | **Name changed from pingServerCommand in EFM 3.9.** | -| [auto.allow.hosts](#auto_allow_hosts) | Y | Y | false | | -| [stable.nodes.file](#stable_nodes_file) | Y | Y | false | | -| [db.reuse.connection.count](#db_reuse_connection_count) | Y | | 0 | | -| [auto.failover](#auto_failover) | Y | Y | true | | -| [auto.reconfigure](#auto_reconfigure) | Y | | true | This value must be same for all the agents. | -| [promotable](#promotable) | Y | | true | | -| [use.replay.tiebreaker](#use_replay_tiebreaker) | Y | Y | true | This value must be same for all the agents. **Available in EFM 3.9 and later.** - | -| [application.name](#application_name) | | | | Set to replace the application_name portion of the primary_conninfo entry with this property value before starting the original primary database as a standby. | -| [restore.command](#restore_command) | | | | Example: restore.command=scp <db_service_owner>@%h: <archive_path>/%f %p | -| [reconfigure.num.sync](#reconfigure_num_sync) | Y | | false | **Available in EFM 3.9 and later.** | | -| [reconfigure.sync.primary](#reconfigure_sync_primary) | Y | | false | **Text changed in EFM 3.9.** | -| [minimum.standbys](#minimum_standbys) | Y | Y | 0 | This value must be same for all the nodes. | -| [recovery.check.period](#recovery_check_period) | Y | | 1 | | -| [restart.connection.timeout](#restart_connection_timeout) | | | 60 | | -| [auto.resume.period](#auto_resume_period) | Y | | 0 | | -| [virtual.ip](#virtual_ip) | | | (see virtual.ip.single) | Leave blank if you do not specify a VIP. **Name changed from virtualIp in EFM 3.9.** | -| [virtual.ip.interface](#virtual_ip) | | | | Required if you specify a VIP. **Name changed from virtualIp.interface in EFM 3.9.** | -| [virtual.ip.prefix](#virtual_ip) | | | | Required if you specify a VIP. **Name changed from virtualIp.prefix in EFM 3.9.** | -| [virtual.ip.single](#virtual_ip) | Y | Y | Yes | This value must be same for all the nodes. **Name changed from virtualIp.single in EFM 3.9.** | -| [check.vip.before.promotion](#check_vip_before_promotion) | Y | Y | Yes | | -| [script.load.balancer.attach](#script_load_balancer) | | | | Example: script.load.balancer.attach= /<path>/<attach_script> %h %t | -| [script.load.balancer.detach](#script_load_balancer) | | | | Example: script.load.balancer.detach= /<path>/<detach_script> %h %t | -| [script.fence](#script_fence) | | | | Example: script.fence= /<path>/<script_name> %p %f | -| [script.post.promotion](#script_post_promotion) | | | | Example: script.post.promotion= /<path>/<script_name> %f %p | -| [script.resumed](#script_resumed) | | | | Example: script.resumed= /<path>/<script_name> | -| [script.db.failure](#script_db_failure) | | | | Example: script.db.failure= /<path>/<script_name> | -| [script.primary.isolated](#script_primary_isolated) | | | | Example: script.primary.isolated= /<path>/<script_name> | -| [script.remote.pre.promotion](#script_remote_pre_promotion) | | | | Example: script.remote.pre.promotion= /<path>/<script_name> %p | -| [script.remote.post.promotion](#script_remote_post_promotion) | | | | Example: script.remote.post.promotion= /<path>/<script_name> %p | -| [script.custom.monitor](#script_custom_monitor) | | | | Example: script.custom.monitor= /<path>/<script_name> | -| [custom.monitor.interval](#script_custom_monitor) | | | | Required if a custom monitoring script is specified | -| [custom.monitor.timeout](#script_custom_monitor) | | | | Required if a custom monitoring script is specified | -| [custom.monitor.safe.mode](#script_custom_monitor) | | | | Required if a custom monitoring script is specified | -| [sudo.command](#sudo_command) | Y | Y | sudo | | -| [sudo.user.command](#sudo_command) | Y | Y | sudo -u %u | | -| [lock.dir](#lock_dir) | | | | If not specified, defaults to '/var/lock/efm-<version>' | -| [log.dir](#log_dir) | | | | If not specified, defaults to '/var/log/efm-<version>' | -| [syslog.host](#syslog_logging) | | | localhost | | -| [syslog.port](#syslog_logging) | | | 514 | | -| [syslog.protocol](#syslog_logging) | | | | | -| [syslog.facility](#syslog_logging) | | | UDP | | -| [file.log.enabled](#logtype_enabled) | Y | Y | true | | -| [syslog.enabled](#logtype_enabled) | Y | Y | false | | -| [jgroups.loglevel](#loglevel) | | | info | | -| [efm.loglevel](#loglevel) | | | info | | -| [jvm.options](#jvm_options) | | | -Xmx128m | | - -**Cluster Properties** - - - - - - - - - -Use the following properties to specify connection details for the Failover Manager cluster: - -```text -# The value for the password property should be the output from -# 'efm encrypt' -- do not include a cleartext password here. To -# prevent accidental sharing of passwords among clusters, the -# cluster name is incorporated into the encrypted password. If -# you change the cluster name (the name of this file), you must -# encrypt the password again with the new name. -# The db.port property must be the same for all nodes. -db.user= -db.password.encrypted= -db.port= -db.database= -``` - -The `db.user` specified must have sufficient privileges to invoke selected PostgreSQL commands on behalf of Failover Manager. For more information, please see [Prerequisites](../../02_failover_manager_overview/01_prerequisites/#prerequisites). - -For information about encrypting the password for the database user, see [Encrypting Your Database Password](01_encrypting_database_password/#encrypting_database_password). - - - -Use the `db.service.owner` property to specify the name of the operating system user that owns the cluster that is being managed by Failover Manager. This property is not required on a dedicated witness node. - -```text -# This property tells EFM which OS user owns the $PGDATA dir for -# the 'db.database'. By default, the owner is either 'postgres' -# for PostgreSQL or 'enterprisedb' for EDB Postgres Advanced -# Server. However, if you have configured your db to run as a -# different user, you will need to copy the /etc/sudoers.d/efm-XX -# conf file to grant the necessary permissions to your db owner. -# -# This username must have write permission to the -# 'db.data.dir' specified below. -db.service.owner= -``` - - - -Specify the name of the database service in the `db.service.name` property if you use the service or systemctl command when starting or stopping the service. - -```text -# Specify the proper service name in order to use service commands -# rather than pg_ctl to start/stop/restart a database. For example, if -# this property is set, then 'service restart' or 'systemctl -# restart ' -# (depending on OS version) will be used to restart the database rather -# than pg_ctl. -# This property is required if running the database as a service. -db.service.name= -``` - - - -You should use the same service control mechanism (pg_ctl, service, or systemctl) each time you start or stop the database service. If you use the `pg_ctl` program to control the service, specify the location of the `pg_ctl` program in the `db.bin` property. - -```text -# Specify the directory containing the pg_controldata/pg_ctl commands, -# for example: -# /usr/edb/as11/bin. Unless the db.service.name property is used, the -# pg_ctl command is used to start/stop/restart databases as needed -# after a failover or switchover. This property is required. -db.bin= -``` - - - -Use the `db.data.dir` property to specify the location to which a recovery file will be written on the Primary node of the cluster during promotion. This property is required on primary and standby nodes; it is not required on a dedicated witness node. - -```text -# For database version 12 and up, this is the directory where a -# standby.signal file will exist for a standby node. For previous -# versions, this is the location of the db recovery.conf file on -# the node. -# After a failover, the recovery.conf files on remaining standbys are -# changed to point to the new primary db (a copy of the original is made -# first). On a primary node, a recovery.conf file will be written during -# failover and promotion to ensure that the primary node can not be -# restarted as the primary database. -# This corresponds to database environment variable PGDATA and should -# be same as the output of query 'show data_directory;' on respective -# database. -db.data.dir= -``` - - - -Use the `db.config.dir` property to specify the location of database configuration files if they are not stored in the same directory as the `recovery.conf` or `standby.signal` file. This should be the value specified by the `config_file` parameter directory of your Advanced Server or PostgreSQL installation. This value will be used as the location of the Postgres `data` directory when stopping, starting, or restarting the database. - -```text -# Specify the location of database configuration files if they are -# not contained in the same location as the recovery.conf or -# standby.signal file. This is most likely the case for Debian -# installations. The location specified will be used as the -D value -# (the location of the data directory for the cluster) when calling -# pg_ctl to start or stop the database. If this property is blank, -# the db.data.dir location specified by the db.data.dir property will -# be used. This corresponds to the output of query 'show config_file;' -# on respective database. -db.config.dir= -``` - -For more information about database configuration files, visit the [PostgreSQL website](https://www.postgresql.org/docs/current/runtime-config-file-locations.html). - - - -Use the `jdbc.sslmode` property to instruct Failover Manager to use SSL connections; by default, SSL is disabled. - -```text -# Use the jdbc.sslmode property to enable ssl for EFM -# connections. Setting this property to anything but 'disable' -# will force the agents to use 'ssl=true' for all JDBC database -# connections (to both local and remote databases). -# Valid values are: -# -# disable - Do not use ssl for connections. -# verify-ca - EFM will perform CA verification before allowing -# the certificate. -# require - Verification will not be performed on the server -# certificate. -jdbc.sslmode=disable -``` - - - -!!! Note - If you set the value of `jdbc.sslmode` to `verify-ca` and you want to use Java trust store for certificate validation, you need to set the following value: - - `jdbc.properties=sslfactory=org.postgresql.ssl.DefaultJavaSSLFactory` - -For information about configuring and using SSL, please see: - -> - -and - -> - - - -Use the `user.email` property to specify an email address (or multiple email addresses) that will receive any notifications sent by Failover Manager. - -```text -# Email address(es) for notifications. The value of this -# property must be the same across all agents. Multiple email -# addresses must be separated by space. If using a notification -# script instead, this property can be left blank. -user.email= -``` - - - -The `from.email` property specifies the value that will be used as the sender's address on any email notifications from Failover Manager. You can: - -- leave `from.email` blank to use the default value (`efm@localhost`). -- specify a custom value for the email address. -- specify a custom email address, using the `%h` placeholder to represent the name of the node host (e.g., [example@%h](mailto:example@%h)). The placeholder will be replaced with the name of the host as returned by the Linux hostname utility. - -For more information about notifications, see [Notifications](../../10_notifications/#notifications). - -```text -# Use the from.email property to specify the from email address that -# will be used for email notifications. Use the %h placeholder to -# represent the name of the node host (e.g. example@%h). The -# placeholder will be replaced with the name of the host as returned -# by the hostname command. -# Leave blank to use the default, efm@localhost. -from.email= -``` - - - -Use the `notification.level` property to specify the minimum severity level at which Failover Manager will send user notifications or when a notification script is called. For a complete list of notifications, please see [Notifications](../../10_notifications/#notifications). - -```text -# Minimum severity level of notifications that will be sent by -# the agent. The minimum level also applies to the notification -# script (below). Valid values are INFO, WARNING, and SEVERE. -# A list of notifications is grouped by severity in the user's -# guide. -notification.level=INFO -``` - - - -Use the `script.notification` property to specify the path to a user-supplied script that acts as a notification service; the script will be passed a message subject and a message body. The script will be invoked each time Failover Manager generates a user notification. - -```text -# Absolute path to script run for user notifications. -# -# This is an optional user-supplied script that can be used for -# notifications instead of email. This is required if not using -# email notifications. Either/both can be used. The script will -# be passed two parameters: the message subject and the message -# body. -script.notification= -``` - - - -The `bind.address` property specifies the IP address and port number of the agent on the current node of the Failover Manager cluster. - -```text -# This property specifies the ip address and port that jgroups -# will bind to on this node. The value is of the form -# :. -# Note that the port specified here is used for communicating -# with other nodes, and is not the same as the admin.port below, -# used only to communicate with the local agent to send control -# signals. -# For example, :7800 -bind.address= -``` - - - -Use the `external.address` property to specify the IP address or hostname that should be used for communication with all other Failover Manager agents in a NAT environment. - -```text -# This is the ip address/hostname to be used for communication with all -# other Failover Manager agents. All traffic towards this address -# should be routed by the network to the bind.address of the node. -# The value is in the ip/hostname format only. This address will be -# used in scenarios where nodes are on different networks and broadcast -# an IP address other than the bind.address to the external world. -external.address= -``` - - - -Use the `admin.port` property to specify a port on which Failover Manager listens for administrative commands. - -```text -# This property controls the port binding of the administration -# server which is used for some commands (ie cluster-status). The -# default is 7809; you can modify this value if the port is -# already in use. -admin.port=7809 -``` - - - -Set the `is.witness` property to true to indicate that the current node is a witness node. If is.witness is true, the local agent will not check to see if a local database is running. - -```text -# Specifies whether or not this is a witness node. Witness nodes -# do not have local databases running. -is.witness= -``` - -The Postgres `pg_is_in_recovery()` function is a boolean function that reports the recovery state of a database. The function returns `true` if the database is in recovery, or false if the database is not in recovery. When an agent starts, it connects to the local database and invokes the `pg_is_in_recovery()` function. If the server responds true, the agent assumes the role of standby; if the server responds false, the agent assumes the role of primary. If there is no local database, the agent will assume an idle state. - -!!! Note - If `is.witness` is `true`, Failover Manager will not check the recovery state. - - - - - - - -The following properties specify properties that apply to the local server: - -- The `local.period` property specifies how many seconds between attempts to contact the database server. -- The `local.timeout` property specifies how long an agent will wait for a positive response from the local database server. -- The `local.timeout.final` property specifies how long an agent will wait after the above-mentioned previous checks have failed to contact the database server on the current node. If a response is not received from the database within the number of seconds specified by the `local.timeout.final` property, the database is assumed to have failed. - -For example, given the default values of these properties, a check of the local database happens once every 10 seconds. If an attempt to contact the local database does not come back positive within 60 seconds, Failover Manager makes a final attempt to contact the database. If a response is not received within 10 seconds, Failover Manager declares database failure and notifies the administrator listed in the user.email property. These properties are not required on a dedicated witness node. - -```text -# These properties apply to the connection(s) EFM uses to monitor -# the local database. Every 'local.period' seconds, a database -# check is made in a background thread. If the main monitoring -# thread does not see that any checks were successful in -# 'local.timeout' seconds, then the main thread makes a final -# check with a timeout value specified by the -# 'local.timeout.final' value. All values are in seconds. -# Whether EFM uses single or multiple connections for database -# checks is controlled by the 'db.reuse.connection.count' -# property. -local.period=10 -local.timeout=60 -local.timeout.final=10 -``` - -If necessary, you should modify these values to suit your business model. - - - -Use the `remote.timeout` property to specify how many seconds an agent waits for a response from a remote database server (i.e., how long a standby agent waits to verify that the primary database is actually down before performing failover). The `remote.timeout` property value specifies a timeout value for agent-to-agent communication; other timeout properties in the cluster properties file specify values for agent-to-database communication. - -```text -# Timeout for a call to check if a remote database is responsive. -# For example, this is how long a standby would wait for a -# DB ping request from itself and the witness to the primary DB -# before performing failover. -remote.timeout=10 -``` - - - -Use the `node.timeout` property to specify the number of seconds that an agent will wait for a response from a node when determining if a node has failed. - -```text -# The total amount of time in seconds to wait before determining -# that a node has failed or been disconnected from this node. -# -# The value of this property must be the same across all agents. -node.timeout=50 -``` - - - -Use the `stop.isolated.primary` property to instruct Failover Manager to shut down the database if a primary agent detects that it is isolated. When true (the default), Failover Manager will stop the database before invoking the script specified in the `script.primary.isolated` property. - -```text -# Shut down the database after a primary agent detects that it has -# been isolated from the majority of the efm cluster. If set to -# true, efm will stop the database before running the -# 'script.primary.isolated' script, if a script is specified. -stop.isolated.primary=true -``` - - - -Use the `stop.failed.primary` property to instruct Failover Manager to attempt to shut down a primary database if it can not reach the database. If `true`, Failover Manager will run the script specified in the `script.db.failure` property after attempting to shut down the database. - -```text -# Attempt to shut down a failed primary database after EFM can no -# longer connect to it. This can be used for added safety in the -# case a failover is caused by a failure of the network on the -# primary node. -# If specified, a 'script.db.failure' script is run after this attempt. -stop.failed.primary=true -``` - - - -Use the `primary.shutdown.as.failure` parameter to indicate that any shutdown of the Failover Manager agent on the primary node should be treated as a failure. If this parameter is set to `true` and the primary agent stops (for any reason), the cluster will attempt to confirm if the database on the primary node is running: - -- If the database is reached, a notification will be sent informing you of the agent status. -- If the database is not reached, a failover will occur. - -```text -# Treat a primary agent shutdown as an agent failure. This can be set -# to true to treat a primary agent shutdown as a failure situation, -# e.g. during the shutdown of a node, accidental or otherwise. -# Caution should be used when using this feature, as it could -# cause an unwanted promotion in the case of performing primary -# database maintenance. -# Please see the user's guide for more information. -primary.shutdown.as.failure=false -``` - -The `primary.shutdown.as.failure` property is meant to catch user error, rather than failures, such as the accidental shutdown of a primary node. The proper shutdown of a node can appear to the rest of the cluster like a user has stopped the primary Failover Manager agent (for example to perform maintenance on the primary database). If you set the `primary.shutdown.as.failure` property to `true`, care must be taken when performing maintenance. - -To perform maintenance on the primary database when `primary.shutdown.as.failure` is `true`, you should stop the primary agent and wait to receive a notification that the primary agent has failed but the database is still running. Then it is safe to stop the primary database. Alternatively, you can use the `efm stop-cluster` command to stop all of the agents without failure checks being performed. - - - -Use the `update.physical.slots.period` property to define the slot advance frequency for database version 12 and above. When `update.physical.slots.period` is set to a non-zero value, the primary agent will read the current `restart_lsn` of the physical replication slots after every `update.physical.slots.period` seconds, and send this information with its `pg_current_wal_lsn` and `primary_slot_name` (If it is set in the postgresql.conf file) to the standbys. If physical slots do not already exist, setting this parameter to a non-zero value will create the slots and then update the `restart_lsn parameter` for these slots. A non-promotable standby will not create new slots but will update them if they exist. - -```text -# Period in seconds between having the primary agent update promotable -# standbys with physical replication slot information so that -# the cluster will continue to use replication slots after a failover. -# Set to zero to turn off. -update.physical.slots.period=0 -``` - - - -Use the `ping.server.ip` property to specify the IP address of a server that Failover Manager can use to confirm that network connectivity is not a problem. - -```text -# This is the address of a well-known server that EFM can ping -# in an effort to determine network reachability issues. It -# might be the IP address of a nameserver within your corporate -# firewall or another server that *should* always be reachable -# via a 'ping' command from each of the EFM nodes. -# -# There are many reasons why this node might not be considered -# reachable: firewalls might be blocking the request, ICMP might -# be filtered out, etc. -# -# Do not use the IP address of any node in the EFM cluster -# (primary, standby, or witness) because this ping server is meant -# to provide an additional layer of information should the EFM -# nodes lose sight of each other. -# -# The installation default is Google's DNS server. -ping.server.ip=8.8.8.8 -``` - - - -Use the `ping.server.command` property to specify the command used to test network connectivity. - -```text -# This command will be used to test the reachability of certain -# nodes. -# -# Do not include an IP address or hostname on the end of -# this command - it will be added dynamically at runtime with the -# values contained in 'virtual.ip' and 'ping.server.ip'. -# -# Make sure this command returns reasonably quickly - test it -# from a shell command line first to make sure it works properly. -ping.server.command=/bin/ping -q -c3 -w5 -``` - - - -Use the `auto.allow.hosts` property to instruct the server to use the addresses specified in the .nodes file of the first node started to update the allowed host list. Enabling this property (setting `auto.allow.hosts` to true) can simplify cluster start-up. - -```text -# Have the first node started automatically add the addresses -# from its .nodes file to the allowed host list. This will make -# it faster to start the cluster when the initial set of hosts -# is already known. -auto.allow.hosts=false -``` - - - -Use the `stable.nodes.file` property to instruct the server to not rewrite the nodes file when a node joins or leaves the cluster. This property is most useful in clusters with unchanging IP addresses. - -```text -# When set to true, EFM will not rewrite the .nodes file whenever -# new nodes join or leave the cluster. This can help starting a -# cluster in the cases where it is expected for member addresses -# to be mostly static, and combined with 'auto.allow.hosts' makes -# startup easier when learning failover manager. -stable.nodes.file=false -``` - - - -The `db.reuse.connection.count` property allows the administrator to specify the number of times Failover Manager reuses the same database connection to check the database health. The default value is 0, indicating that Failover Manager will create a fresh connection each time. This property is not required on a dedicated witness node. - -```text -# This property controls how many times a database connection is -# reused before creating a new one. If set to zero, a new -# connection will be created every time an agent pings its local -# database. -db.reuse.connection.count=0 -``` - - - -The `auto.failover` property enables automatic failover. By default, auto.failover is set to true. - -```text -# Whether or not failover will happen automatically when the primary -# fails. Set to false if you want to receive the failover notifications -# but not have EFM actually perform the failover steps. -# The value of this property must be the same across all agents. -auto.failover=true -``` - - - -Use the `auto.reconfigure` property to instruct Failover Manager to enable or disable automatic reconfiguration of remaining Standby servers after the primary standby is promoted to Primary. Set the property to `true` to enable automatic reconfiguration (the default) or `false` to disable automatic reconfiguration. This property is not required on a dedicated witness node. If you are using Advanced Server or PostgreSQL version 11 or earlier, the `recovery.conf` file will be backed up during the reconfiguration process. - -```text -# After a standby is promoted, Failover Manager will attempt to -# update the remaining standbys to use the new primary. For database -# versions before 12, Failover Manager will back up recovery.conf. -# Then it will change the host parameter of the primary_conninfo entry -# in recovery.conf or postgresql.auto.conf, and restart the database. -# The restart command is contained in either the efm_db_functions or -# efm_root_functions file; default when not running db as an os -# service is: "pg_ctl restart -m fast -w -t -D " -# where the timeout is the local.timeout property value and the -# directory is specified by db.data.dir. To turn off -# automatic reconfiguration, set this property to false. -auto.reconfigure=true -``` - -!!! Note - `primary_conninfo` is a space-delimited list of keyword=value pairs. - - - -Use the `promotable` property to indicate that a node should not be promoted. The `promotable` property is ignored when a primary agent is started. This simplifies switching back to the original primary after a switchover or failover. To override the setting, use the efm set-priority command at runtime; for more information about the efm set-priority command, see [Using the efm Utility](../../07_using_efm_utility/#using_efm_utility). - -```text -# A standby with this set to false will not be added to the -# failover priority list, and so will not be available for -# promotion. The property will be used whenever an agent starts -# as a standby or resumes as a standby after being idle. After -# startup/resume, the node can still be added or removed from the -# priority list with the 'efm set-priority' command. This -# property is required for all non-witness nodes. -promotable=true -``` - - - -If the same amount of data has been written to more than one standby node, and a failover occurs, the `use.replay.tiebreaker` value will determine how Failover Manager selects a replacement primary. Set the `use.replay.tiebreaker` property to `true` to instruct Failover Manager to failover to the node that will come out of recovery faster, as determined by the log sequence number. To ignore the log sequence number and promote a node based on user preference, set `use.replay.tiebreaker` to `false`. - -```text -# Use replay LSN value for tiebreaker when choosing a standby to -# promote before using failover priority. Set this property to true to -# consider replay location as more important than failover priority -# (as seen in cluster-status command) when choosing the "most ahead" -# standby to promote. -use.replay.tiebreaker=true -``` - - - -You can use the `application.name` property to provide the name of an application that will be copied to the `primary_conninfo` parameter before restarting an old primary node as a standby. - -```text -# During a switchover, recovery settings are copied from a standby -# to the original primary. If the application.name property is set, -# Failover Manager will replace the application_name portion of the -# primary_conninfo entry with this property value before starting -# the original primary database as a standby. If this property is -# not set, Failover Manager will remove the parameter value -# from primary_conninfo. -application.name= -``` - -!!! Note - You should set the `application.name` property on the primary and any promotable standby; in the event of a failover/switchover, the primary node could potentially become a standby node again. - - - -Use the `restore.command` property to instruct Failover Manager to update the `restore_command` when a new primary is promoted. `%h` represents the address of the new primary; Failover Manager will replace `%h` with the address of the new primary. `%f` and `%p` are placeholders used by the server. If the property is left blank, Failover Manager will not update the `restore_command` values on the standbys after a promotion. - -See the PostgreSQL documentation for more information about using a [restore_command](https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-ARCHIVE-RECOVERY). - -```text -# If the restore_command on a standby restores directly from the -# primary node, use this property to have Failover Manager change -# the command when a new primary is promoted. -# -# Use the %h placeholder to represent the address of the new primary. -# During promotion it will be replaced with the address of the new -# primary. -# -# If not specified, failover manager will not change the -# restore_command value, if any, on standby nodes. -# -# Example: -# restore.command=scp @%h:/var/lib/edb/as12/data/archive/%f %p -restore.command= -``` - - - -The database parameter `synchronous_standby_names` on the primary node specifies the names and count of the synchronous standby servers that will confirm receipt of data, to ensure that the primary nodes can accept write transactions. When `reconfigure.num.sync` property is set to true, Failover Manager will reduce the number of synchronous standby servers and reload the configuration of the primary node to reflect the current value. - -```text -# Reduce num_sync when the number of synchronous standbys drops below -# the value required by the primary database. If set to true, Failover -# Manager will reduce the number of standbys needed in the primary's -# synchronous_standby_names property and reload the primary -# configuration. Failover Manager will not reduce the number below 1, -# taking the primary out of synchronous replication, unless the -# reconfigure.sync.primary property is also set to true. -# To raise num_sync, see the reconfigure.num.sync.max property below. -reconfigure.num.sync=false -``` -!!! Note - - If you are using the `reconfigure.num.sync` property, ensure that the `wal_sender_timeout` in the primary database is set to at least ten seconds less than the `efm.node.timeout` value. - - - - - -Set the `reconfigure.sync.primary` property to `true` to take the primary database out of synchronous replication mode if the number of standby nodes drops below the level required. Set `reconfigure.sync.primary` to `false` to send a notification if the standby count drops, but not interrupt synchronous replication. - -```text -# Take the primary database out of synchronous replication mode when -# needed. If set to true, Failover Manager will clear the -# synchronous_standby_names configuration parameter on the primary -# if the number of synchronous standbys drops below the required -# level for the primary to accept writes. -# If set to false, Failover Manager will detect the situation but -# will only send a notification if the standby count drops below the -# required level. -# -# CAUTION: TAKING THE PRIMARY DATABASE OUT OF SYNCHRONOUS MODE MEANS -# THERE MAY ONLY BE ONE COPY OF DATA. DO NOT MAKE THIS CHANGE UNLESS -# YOU ARE SURE THIS IS OK. -reconfigure.sync.primary=false -``` -!!! Note - - If you are using the `reconfigure.sync.primary` property, ensure that the `wal_sender_timeout` in the primary database is set to at least ten seconds less than the `efm.node.timeout` value. - - - -Use the `minimum.standbys` property to specify the minimum number of standby nodes that will be retained on a cluster; if the standby count drops to the specified minimum, a replica node will not be promoted in the event of a failure of the primary node. - -```text -# Instead of setting specific standbys as being unavailable for -# promotion, this property can be used to set a minimum number -# of standbys that will not be promoted. Set to one, for -# example, promotion will not happen if it will drop the number -# of standbys below this value. This property must be the same on -# each node. -minimum.standbys=0 -``` - - - -Use the `recovery.check.period` property to specify the number of seconds that Failover Manager will wait before checks to see if a database is out of recovery. - -```text -# Time in seconds between checks to see if a promoting database -# is out of recovery. -recovery.check.period=1 -``` - - - -Use the `restart.connection.timeout` property to specify the number of seconds that Failover Manager will attempt to connect to a newly reconfigured primary or standby node while the database on that node prepares to accept connections. - -```text -# Time in seconds to keep trying to connect to a database after a -# start or restart command returns successfully but the database -# is not ready to accept connections yet (a rare occurance). This -# applies to standby databases that are restarted when being -# reconfigured for a new primary, and to primary databases that -# are stopped and started as standbys during a switchover. -# This retry mechanism is unrelated to the auto.resume.period -# parameter. -restart.connection.timeout=60 -``` - - - -Use the `auto.resume.period` property to specify the number of seconds (after a monitored database fails and an agent has assumed an idle state, or when starting in IDLE mode) during which an agent will attempt to resume monitoring that database. - -```text -# Period in seconds for IDLE agents to try to resume monitoring -# after a database failure or when starting in IDLE mode. Set to -# 0 for agents to not try to resume (in which case the -# 'efm resume ' command is used after bringing a -# database back up). -auto.resume.period=0 -``` - - - -Failover Manager provides support for clusters that use a virtual IP. If your cluster uses a virtual IP, provide the host name or IP address in the `virtual.ip` property; specify the corresponding prefix in the `virtual.ip.prefix` property. If `virtual.ip` is left blank, virtual IP support is disabled. - -Use the `virtual.ip.interface` property to provide the network interface used by the VIP. - -The specified virtual IP address is assigned only to the primary node of the cluster. If you specify `virtual.ip.single=true`, the same VIP address will be used on the new primary in the event of a failover. Specify a value of false to provide a unique IP address for each node of the cluster. - -For information about using a virtual IP address, see [Using Failover Manager with Virtual IP Addresses](../05_using_vip_addresses/#using_vip_addresses). - -```text -# These properties specify the IP and prefix length that will be -# remapped during failover. If you do not use a VIP as part of -# your failover solution, leave the virtual.ip property blank to -# disable Failover Manager support for VIP processing (assigning, -# releasing, testing reachability, etc). -# -# If you specify a VIP, the interface and prefix are required. -# -# If you specify a host name, it will be resolved to an IP address -# when acquiring or releasing the VIP. If the host name resolves -# to more than one IP address, there is no way to predict which -# address Failover Manager will use. -# -# By default, the virtual.ip and virtual.ip.prefix values must be -# the same across all agents. If you set virtual.ip.single to -# false, you can specify unique values for virtual.ip and -# virtual.ip.prefix on each node. -# -# If you are using an IPv4 address, the virtual.ip.interface value -# should not contain a secondary virtual ip id (do not include -# ":1", etc). -virtual.ip= -virtual.ip.interface= -virtual.ip.prefix= -virtual.ip.single=true -``` - -!!! Note - If a primary agent is started and the node does not currently have the VIP, the EFM agent will acquire it. Stopping a primary agent does not drop the VIP from the node. - - - -Set the `check.vip.before.promotion` property to false to indicate that Failover Manager will not check to see if a VIP is in use before assigning it to a a new primary in the event of a failure. Note that this could result in multiple nodes broadcasting on the same VIP address; unless the primary node is isolated or can be shut down via another process, you should set this property to true. - -```text -# Whether to check if the VIP (when used) is still in use before -# promoting after a primary failure. Turning this off may allow -# the new primary to have the VIP even though another node is also -# broadcasting it. This should only be used in environments where -# it is known that the failed primary node will be isolated or -# shut down through other means. -check.vip.before.promotion=true -``` - - - -Use the following properties to provide paths to scripts that reconfigure your load balancer in the event of a switchover or primary failure scenario. The scripts will also be invoked in the event of a standby failure. If you are using these properties, they should be provided on every node of the cluster (primary, standby, and witness) to ensure that if a database node fails, another node will call the detach script with the failed node's address. - -You do not need to set the below properties if you are using Pgpool as Load Balancer solution and have set the Pgpool integration properties. - -Provide a script name after the `script.load.balancer.attach` property to identify a script that will be invoked when a node should be attached to the load balancer. Use the `script.load.balancer.detach` property to specify the name of a script that will be invoked when a node should be detached from the load balancer. Include the `%h` placeholder to represent the IP address of the node that is being attached or removed from the cluster. Include the `%t` placeholder to instruct Failover Manager to include an p (for a primary node) or an s (for a standby node) in the string. - -```text -# Absolute path to load balancer scripts -# The attach script is called when a node should be attached to -# the load balancer, for example after a promotion. The detach -# script is called when a node should be removed, for example -# when a database has failed or is about to be stopped. Use %h to -# represent the IP/hostname of the node that is being -# attached/detached. Use %t to represent the type of node being -# attached or detached: the letter m will be passed in for primary nodes -#and the letter s for standby nodes. -# -# Example: -# script.load.balancer.attach=/somepath/attachscript %h %t -script.load.balancer.attach= -script.load.balancer.detach= -``` - - - -`script.fence` specifies the path to an optional user-supplied script that will be invoked during the promotion of a standby node to primary node. - -```text -# absolute path to fencing script run during promotion -# -# This is an optional user-supplied script that will be run -# during failover on the standby database node. If left blank, -# no action will be taken. If specified, EFM will execute this -# script before promoting the standby. -# -# Parameters can be passed into this script for the failed primary -# and new primary node addresses. Use %p for new primary and %f -# for failed primary. On a node that has just been promoted, %p -# should be the same as the node's efm binding address. -# -# Example: -# script.fence=/somepath/myscript %p %f -# -# NOTE: FAILOVER WILL NOT OCCUR IF THIS SCRIPT RETURNS A NON-ZERO EXIT -# CODE. -script.fence= -``` - -
- -Use the `script.post.promotion` property to specify the path to an optional user-supplied script that will be invoked after a standby node has been promoted to primary. - -```text -# Absolute path to fencing script run after promotion -# -# This is an optional user-supplied script that will be run after -# failover on the standby node after it has been promoted and -# is no longer in recovery. The exit code from this script has -# no effect on failover manager, but will be included in a -# notification sent after the script executes. -# -# Parameters can be passed into this script for the failed primary -# and new primary node addresses. Use %p for new primary and %f -# for failed primary. On a node that has just been promoted, %p -# should be the same as the node's efm binding address. -# -# Example: -# script.post.promotion=/somepath/myscript %f %p -script.post.promotion= -``` - - - -Use the `script.resumed property` to specify an optional path to a user-supplied script that will be invoked when an agent resumes monitoring of a database. - -```text -# Absolute path to resume script -# -# This script is run before an IDLE agent resumes -# monitoring its local database. -script.resumed= -``` - - - -Use the `script.db.failure` property to specify the complete path to an optional user-supplied script that Failover Manager will invoke if an agent detects that the database that it monitors has failed. - -```text -# Absolute path to script run after database failure -# This is an optional user-supplied script that will be run after -# an agent detects that its local database has failed. -script.db.failure= -``` - - - -Use the `script.primary.isolated` property to specify the complete path to an optional user-supplied script that Failover Manager will invoke if the agent monitoring the primary database detects that the primary is isolated from the majority of the Failover Manager cluster. This script is called immediately after the VIP is released (if a VIP is in use). - -```text -# Absolute path to script run on isolated primary -# This is an optional user-supplied script that will be run after -# a primary agent detects that it has been isolated from the -# majority of the efm cluster. -script.primary.isolated= -``` - - - -Use the `script.remote.pre.promotion` property to specify the path and name of a script that will be invoked on any agent nodes not involved in the promotion when a node is about to promote its database to primary. - -Include the %p placeholder to identify the address of the new primary node. - -```text -# Absolute path to script invoked on non-promoting agent nodes -# before a promotion. -# -# This optional user-supplied script will be invoked on other -# agents when a node is about to promote its database. The exit -# code from this script has no effect on Failover Manager, but -# will be included in a notification sent after the script -# executes. -# -# Pass a parameter (%p) with the script to identify the new -# primary node address. -# -# Example: -# script.remote.pre.promotion=/path_name/script_name %p -script.remote.pre.promotion= -``` - -
- -Use the `script.remote.post.promotion` property to specify the path and name of a script that will be invoked on any non-primary nodes after a promotion occurs. - -Include the %p placeholder to identify the address of the new primary node. - -```text -# Absolute path to script invoked on non-primary agent nodes -# after a promotion. -# -# This optional user-supplied script will be invoked on nodes -# (except the new primary) after a promotion occurs. The exit code -# from this script has no effect on Failover Manager, but will be -# included in a notification sent after the script executes. -# -# Pass a parameter (%p) with the script to identify the new -# primary node address. -# -# Example: -# script.remote.post.promotion=/path_name/script_name %p -script.remote.post.promotion= -``` - - - -Use the `script.custom.monitor` property to provide the name and location of an optional script that will be invoked on regular intervals (specified in seconds by the `custom.monitor.interval` property). - -Use `custom.monitor.timeout` to specify the maximum time that the script will be allowed to run; if script execution does not complete within the time specified, Failover Manager will send a notification. - -Set `custom.monitor.safe.mode` to `true` to instruct Failover Manager to report non-zero exit codes from the script, but not promote a standby as a result of an exit code. - -```text -# Absolute path to a custom monitoring script. -# -# Use script.custom.monitor to specify the location and name of -# an optional user-supplied script that will be invoked -# periodically to perform custom monitoring tasks. A non-zero -# exit value means that a check has failed; this will be treated -# as a database failure. On a primary node, script failure will -# cause a promotion. On a standby node script failure will -# generate a notification and the agent will become IDLE. -# -# The custom.monitor.\* properties are required if a custom -# monitoring script is specified: -# -# custom.monitor.interval is the time in seconds between executions -# of the script. -# -# custom.monitor.timeout is a timeout value in seconds for how -# long the script will be allowed to run. If script execution -# exceeds the specified time, the task will be stopped and a -# notification sent. Subsequent runs will continue. -# -# If custom.monitor.safe.mode is set to true, non-zero exit codes -# from the script will be reported but will not cause a promotion -# or be treated as a database failure. This allows testing of the -# script without affecting EFM. -# -script.custom.monitor= -custom.monitor.interval= -custom.monitor.timeout= -custom.monitor.safe.mode= -``` - - - -Use the `sudo.command` property to specify a command that will be invoked by Failover Manager when performing tasks that require extended permissions. Use this option to include command options that might be specific to your system authentication. - -Use the `sudo.user.command` property to specify a command that will be invoked by Failover Manager when executing commands that will be performed by the database owner. - -```text -# Command to use in place of 'sudo' if desired when efm runs -# the efm_db_functions or efm_root_functions, or efm_address -# scripts. -# Sudo is used in the following ways by efm: -# -# sudo /usr/edb/efm-/bin/efm_address -# sudo /usr/edb/efm-/bin/efm_root_functions -# sudo -u /usr/edb/efm-/bin/efm_db_functions -# -# 'sudo' in the first two examples will be replaced by the value -# of the sudo.command property. 'sudo -u ' will -# be replaced by the value of the sudo.user.command property. -# The '%u' field will be replaced with the db owner. -sudo.command=sudo -sudo.user.command=sudo -u %u -``` - - - -Use the `lock.dir` property to specify an alternate location for the Failover Manager lock file; the file prevents Failover Manager from starting multiple (potentially orphaned) agents for a single cluster on the node. - -```text -# Specify the directory of lock file on the node. Failover -# Manager creates a file named .lock at this location to -# avoid starting multiple agents for same cluster. If the path -# does not exist, Failover Manager will attempt to create it. If -# not specified defaults to '/var/lock/efm-' -lock.dir= -``` - - - -Use the `log.dir` property to specify the location to which agent log files will be written; Failover Manager will attempt to create the directory if the directory does not exist. - -```text -# Specify the directory of agent logs on the node. If the path -# does not exist, Failover Manager will attempt to create it. If -# not specified defaults to '/var/log/efm-'. (To store -# Failover Manager startup logs in a custom location, modify the -# path in the service script to point to an existing, writable -# directory.) -# If using a custom log directory, you must configure -# logrotate separately. Use 'man logrotate' for more information. -log.dir= -``` - - - -After enabling the UDP or TCP protocol on a Failover Manager host, you can enable logging to syslog. Use the `syslog.protocol` parameter to specify the protocol type (UDP or TCP) and the `syslog.port` parameter to specify the listener port of the syslog host. The `syslog.facility` value may be used as an identifier for the process that created the entry; the value must be between LOCAL0 and LOCAL7. - -```text -# Syslog information. The syslog service must be listening on -# the port for the given protocol, which can be UDP or TCP. -# The facilities supported are LOCAL0 through LOCAL7. -syslog.host=localhost -syslog.port=514 -syslog.protocol=UDP -syslog.facility=LOCAL1 -``` - - - -Use the `file.log.enabled` and `syslog.enabled` properties to specify the type of logging that you wish to implement. Set `file.log.enabled` to `true` to enable logging to a file; enable the UDP protocol or TCP protocol and set `syslog.enabled` to `true` to enable logging to syslog. You can enable logging to both a file and syslog. - -```text -# Which logging is enabled. -file.log.enabled=true -syslog.enabled=false -``` - -For more information about configuring syslog logging, see [Enabling syslog Log File Entries](../../09_controlling_logging/#enabling_syslog). - - - -Use the `jgroups.loglevel` and `efm.loglevel` parameters to specify the level of detail logged by Failover Manager. The default value is INFO. For more information about logging, see [Controlling Logging](../../09_controlling_logging/#controlling_logging). - -```text -# Logging levels for JGroups and EFM. -# Valid values are: TRACE, DEBUG, INFO, WARN, ERROR -# Default value: INFO -# It is not necessary to increase these values unless debugging a -# specific issue. If nodes are not discovering each other at -# startup, increasing the jgroups level to DEBUG will show -# information about the TCP connection attempts that may help -# diagnose the connection failures. -jgroups.loglevel=INFO -efm.loglevel=INFO -``` - - - -Use the `jvm.options` property to pass JVM-related configuration information. The default setting specifies the amount of memory that the Failover Manager agent will be allowed to use. - -```text -# Extra information that will be passed to the JVM when starting -# the agent. -jvm.options=-Xmx128m -``` - diff --git a/product_docs/docs/efm/3/efm_user/04_configuring_efm/02_cluster_members.mdx b/product_docs/docs/efm/3/efm_user/04_configuring_efm/02_cluster_members.mdx deleted file mode 100644 index 77034d68ac9..00000000000 --- a/product_docs/docs/efm/3/efm_user/04_configuring_efm/02_cluster_members.mdx +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: "The Cluster Members File" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/cluster_members.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/cluster_members.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/cluster_members.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/cluster_members.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/cluster_members.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.16.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.16.html" ---- - - - -Each node in a Failover Manager cluster has a cluster members file (by default, named efm.nodes) that contains a list of the current Failover Manager cluster members. When an agent starts, it uses the file to locate other cluster members. The Failover Manager installer creates a file template for the cluster members file named `efm.nodes.in` in the `/etc/edb/efm-3.10` directory. - -After completing the Failover Manager installation, you must make a working copy of the template: - -```text -cp /etc/edb/efm-3.10/efm.nodes.in /etc/edb/efm-3.10/efm.nodes -``` - -After copying the template file, change the owner of the file to `efm`: - -```text -chown efm:efm efm.nodes -``` - -By default, Failover Manager expects the cluster members file to be named `efm.nodes`. If you name the cluster members file something other than `efm.nodes`, you must modify the Failover Manager service script to instruct Failover Manager to use the new name. - -The cluster members file on the first node started can be empty; this node will become the Membership Coordinator. On each subsequent node, the cluster member file must contain the address and port number of the Membership Coordinator. Each entry in the cluster members file must be listed in an address:port format, with multiple entries separated by white space. - -The agents will update the contents of the `efm.nodes` file to match the current members of the cluster. As agents join or leave the cluster, the `efm.nodes` files on other agents are updated to reflect the current cluster membership. If you invoke the [efm stop-cluster](../07_using_efm_utility/#efm_stop_cluster) command, Failover Manager does not modify the file. - -If the Membership Coordinator leaves the cluster, another node will assume the role. You can use the [efm cluster-status](../07_using_efm_utility/#efm_cluster_status) command to find the address of the Membership Coordinator. If a node joins or leaves a cluster while an agent is down, before starting that agent you must manually ensure that the file includes at least the current Membership Coordinator's address and port. - -If you know the addresses and ports of the nodes that will be joining the cluster, you can include the addresses in the cluster members file at any time. At startup, any addresses that do not identify cluster members will be ignored unless the `auto.allow.hosts` property (in the [cluster properties file](01_cluster_properties/#auto_allow_hosts)) is set to `true`. - -If the `stable.nodes.file` property (located in the [cluster properties file](01_cluster_properties/#auto_allow_hosts)) is set to `true`, the agent will not update the `.nodes` file when cluster members join or leave the cluster; this behavior is most useful when the IP addresses of cluster members do not change often. diff --git a/product_docs/docs/efm/3/efm_user/04_configuring_efm/03_extending_efm_permissions.mdx b/product_docs/docs/efm/3/efm_user/04_configuring_efm/03_extending_efm_permissions.mdx deleted file mode 100644 index e8009c9ee1c..00000000000 --- a/product_docs/docs/efm/3/efm_user/04_configuring_efm/03_extending_efm_permissions.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: "Extending Failover Manager Permissions" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/extending_efm_permissions.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/extending_efm_permissions.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/extending_efm_permissions.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/extending_efm_permissions.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/extending_efm_permissions.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.13.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.13.html" ---- - - - -During the Failover Manager installation, the installer creates a user named `efm`. `efm` does not have sufficient privileges to perform management functions that are normally limited to the database owner or operating system superuser. - -- When performing management functions requiring database superuser privileges, `efm` invokes the `efm_db_functions` script. -- When performing management functions requiring operating system superuser privileges, `efm` invokes the `efm_root_functions` script. -- When assigning or releasing a virtual IP address, `efm` invokes the `efm_address` script. -- When enabling Pgpool integration, `efm` invokes the `efm_pgpool_functions` script. - -The `efm_db_functions` or `efm_root_functions` scripts perform management functions on behalf of the `efm` user. - -The sudoers file contains entries that allow the user `efm` to control the Failover Manager service for clusters owned by `postgres` or `enterprisedb`. You can modify a copy of the sudoers file to grant permission to manage Postgres clusters owned by other users to `efm`. - -The `efm-41` file is located in `/etc/sudoers.d`, and contains the following entries: - -```text -# Copyright EnterpriseDB Corporation, 2014-2020. All Rights Reserved. -# -# Do not edit this file. Changes to the file may be overwritten -# during an upgrade. -# -# This file assumes you are running your efm cluster as user 'efm'. If not, -# then you will need to copy this file. - -# Allow user 'efm' to sudo efm_db_functions as either 'postgres' or 'enterprisedb'. -# If you run your db service under a non-default account, you will need to copy -# this file to grant the proper permissions and specify the account in your efm -# cluster properties file by changing the 'db.service.owner' property. -efm ALL=(postgres) NOPASSWD: /usr/edb/efm-3.10/bin/efm_db_functions -efm ALL=(enterprisedb) NOPASSWD: /usr/edb/efm-3.10/bin/efm_db_functions - -# Allow user 'efm' to sudo efm_root_functions as 'root' to write/delete the PID file, -# validate the db.service.owner property, etc. -efm ALL=(ALL) NOPASSWD: /usr/edb/efm-3.10/bin/efm_root_functions - -# Allow user 'efm' to sudo efm_address as root for VIP tasks. -efm ALL=(ALL) NOPASSWD: /usr/edb/efm-3.10/bin/efm_address - -# relax tty requirement for user 'efm' -Defaults:efm !requiretty -``` - -If you are using Failover Manager to monitor clusters that are owned by users other than `postgres` or `enterprisedb`, make a copy of the `efm-41` file, and modify the content to allow the user to access the `efm_functions` script to manage their clusters. - -If an agent cannot start because of permission problems, make sure the default `/etc/sudoers` file contains the following line at the end of the file: - -```text -## Read drop-in files from /etc/sudoers.d (the # here does not # mean a comment) - -#includedir /etc/sudoers.d -``` - - - -## Running Failover Manager without sudo - -By default, Failover Manager uses sudo to securely manage access to system functionality. If you choose to configure Failover Manager to run without sudo access, Note that root access is still required to: - -- install the Failover Manager RPM. -- perform Failover Manager setup tasks. - -To run Failover Manager without sudo, you must select a database process owner that will have privileges to perform management functions on behalf of Failover Manager. The user could be the default database superuser (for example, enterprisedb or postgres) or another privileged user. After selecting the user: - -1. Use the following command to add the user to the `efm` group: - - ```text - usermod -a -G efm enterprisedb - ``` - - This should allow the user to write to `/var/run/efm-3.10` and `/var/lock/efm-3.10`. - -2. If you are reusing a cluster name, remove any previously created log files; the new user will not be able to write to log files created by the default (or other) owner. - -3. Copy the cluster properties template file and the nodes template file: - - ```text - su - enterprisedb - - cp /etc/edb/efm-3.10/efm.properties.in .properties - - cp /etc/edb/efm-3.10/efm.nodes.in /.nodes - ``` - -Then, modify the cluster properties file, providing the name of the user in the `db.service.owner` property. You must also ensure that the `db.service.name` property is blank; without sudo, you cannot run services without root access. - -After modifying the configuration, the new user can control Failover Manager with the following command: - -```text -/usr/edb/efm-3.10/bin/runefm.sh start|stop .properties -``` - -Where `` specifies the full path of the cluster properties file. Note that the user must ensure that the full path to the properties file must be provided whenever the non-default user is controlling agents or using the efm script. - -To allow the new user to manage Failover Manager as a service, you must provide a custom script or unit file. - -Failover Manager uses a binary named `manage-vip` that resides in `/usr/edb/efm-3.10/bin/secure/` to perform VIP management operations without sudo privileges. This script uses setuid to acquire with the privileges needed to manage Virtual IP addresses. - -- This directory is only accessible to root and users in the `efm` group. -- The binary is only executable by root and the `efm` group. - -For security reasons, we recommend against modifying the access privileges of the `/usr/edb/efm-3.10/bin/secure/` directory or the `manage-vip` script. - -For more information about using Failover Manager without sudo, visit: - - diff --git a/product_docs/docs/efm/3/efm_user/04_configuring_efm/04_using_vip_addresses.mdx b/product_docs/docs/efm/3/efm_user/04_configuring_efm/04_using_vip_addresses.mdx deleted file mode 100644 index 7aec6fab8ee..00000000000 --- a/product_docs/docs/efm/3/efm_user/04_configuring_efm/04_using_vip_addresses.mdx +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: "Using Failover Manager with Virtual IP Addresses" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/using_vip_addresses.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/using_vip_addresses.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/using_vip_addresses.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/using_vip_addresses.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/using_vip_addresses.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.17.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.17.html" ---- - - - -Failover Manager uses the `efm_address` script to assign or release a virtual IP address. - -!!! Note - Virtual IP addresses are not supported by many cloud providers. In those environments, another mechanism should be used (such as an Elastic IP Address on AWS), which can be changed when needed by a fencing or post-promotion script. - -By default, the script resides in: - - `/usr/edb/efm-3.10/bin/efm_address` - -Failover Manager uses the following command variations to assign or release an IPv4 or IPv6 IP address. - -To assign a virtual IPv4 IP address: - -```text -# efm_address add4 / -``` - -To assign a virtual IPv6 IP address: - -```text -# efm_address add6 / -``` - -To release a virtual address: - -```text -# efm_address del -``` - -Where: - - `` matches the name specified in the `virtual.ip.interface` property in the cluster properties file. - - `` or `` matches the value specified in the `virtual.ip` property in the cluster properties file. - - `prefix` matches the value specified in the `virtual.ip.prefix` property in the cluster properties file. - -For more information about properties that describe a virtual IP address, see [The Cluster Properties File](01_cluster_properties/#virtual_ip). - -You must invoke the `efm_address` script as the root user. The `efm` user is created during the installation, and is granted privileges in the sudoers file to run the `efm_address` script. For more information about the sudoers file, see [Extending Failover Manager Permissions](04_extending_efm_permissions/#extending_efm_permissions). - -!!! Note - If a VIP address (or any address other than the `bind.address`) is assigned to a node, the operating system can choose the source address used when contacting the database. Be sure that you modify the `pg_hba.conf` file on all monitored databases to allow contact from all addresses within your replication scenario. - -**Testing the VIP** - -When using a virtual IP (VIP) address with Failover Manager, it is important to test the VIP functionality manually before starting Failover manager. This will catch any network-related issues before they cause a problem during an actual failover. While testing the VIP, ensure that Failover Manager is not running. - -The following steps test the actions that Failover Manager will take. The example uses the following property values: - -```text -virtual.ip=172.24.38.239 -virtual.ip.interface=eth0 -virtual.ip.prefix=24 -ping.server.command=/bin/ping -q -c3 -w5 -``` - -!!! Note - The `virtual.ip.prefix` specifies the number of significant bits in the virtual Ip address. - -When instructed to ping the VIP from a node, use the command defined by the `ping.server.command` property. - -1. Ping the VIP from all nodes to confirm that the address is not already in use: - -```text -# /bin/ping -q -c3 -w5 172.24.38.239 -PING 172.24.38.239 (172.24.38.239) 56(84) bytes of data. ---- 172.24.38.239 ping statistics --- -4 packets transmitted, 0 received, +3 errors, 100% packet loss, - time 3000ms -``` - -You should see 100% packet loss. - -2. Run the `efm_address add4` command on the Primary node to assign the VIP and then confirm with ip address: - -```text -# efm_address add4 eth0 172.24.38.239/24 -# ip address - -eth0 Link encap:Ethernet HWaddr 36:AA:A4:F4:1C:40 -inet addr:172.24.38.239 Bcast:172.24.38.255 -... -``` - -3. Ping the VIP from the other nodes to verify that they can reach the VIP: - -```text -# /bin/ping -q -c3 -w5 172.24.38.239 -PING 172.24.38.239 (172.24.38.239) 56(84) bytes of data. ---- 172.24.38.239 ping statistics --- -3 packets transmitted, 3 received, 0% packet loss, time 1999ms -rtt min/avg/max/mdev = 0.023/0.025/0.029/0.006 ms -``` - -You should see no packet loss. - -4. Use the `efm_address del` command to release the address on the primary node and confirm the node has been released with ip address: - -```text -# efm_address del eth0 172.24.38.239/24 -# ip address -eth0 Link encap:Ethernet HWaddr 22:00:0A:89:02:8E -inet addr:10.137.2.142 Bcast:10.137.2.191 -... -``` - -The output from this step should not show an eth0 interface - -5. Repeat step 3, this time verifying that the Standby and Witness do not see the VIP in use: - -```text -# /bin/ping -q -c3 -w5 172.24.38.239 -PING 172.24.38.239 (172.24.38.239) 56(84) bytes of data. ---- 172.24.38.239 ping statistics --- -4 packets transmitted, 0 received, +3 errors, 100% packet loss, - time 3000ms -``` - -You should see 100% packet loss. Repeat this step on all nodes. - -6. Repeat step 2 on all Standby nodes to assign the VIP to every node. You can ping the VIP from any node to verify that it is in use. - -```text -# efm_address add4 eth0 172.24.38.239/24 -# ip address - -eth0 Link encap:Ethernet HWaddr 36:AA:A4:F4:1C:40 -inet addr:172.24.38.239 Bcast:172.24.38.255 -... -``` - -After the test steps above, release the VIP from any non-Primary node before attempting to start Failover Manager. - -!!! Note - The network interface used for the VIP does not have to be the same interface used for the Failover Manager agent's `bind.address` value. The primary agent will drop the VIP as needed during a failover, and Failover Manager will verify that the VIP is no longer available before promoting a standby. A failure of the bind address network will lead to primary isolation and failover. - -If the VIP uses a different interface, you may encounter a timing condition where the rest of the cluster checks for a reachable VIP before the primary agent has dropped it. In this case, EFM will retry the VIP check for the number of seconds specified in the `node.timeout` property to help ensure that a failover happens as expected. diff --git a/product_docs/docs/efm/3/efm_user/04_configuring_efm/index.mdx b/product_docs/docs/efm/3/efm_user/04_configuring_efm/index.mdx deleted file mode 100644 index 6ab9be37827..00000000000 --- a/product_docs/docs/efm/3/efm_user/04_configuring_efm/index.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "Configuring Failover Manager" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/configuring_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/configuring_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/configuring_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/configuring_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/configuring_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.14.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.14.html" ---- - - - -Configurable Failover Manager properties are specified in two user-modifiable files: - -- [efm.properties](01_cluster_properties/#cluster_properties) -- [efm.nodes](03_cluster_members/#cluster_members) - -
- -cluster_properties encrypting_database_password cluster_members extending_efm_permissions using_vip_addresses - -
diff --git a/product_docs/docs/efm/3/efm_user/05_using_efm.mdx b/product_docs/docs/efm/3/efm_user/05_using_efm.mdx deleted file mode 100644 index e694ad7f39f..00000000000 --- a/product_docs/docs/efm/3/efm_user/05_using_efm.mdx +++ /dev/null @@ -1,326 +0,0 @@ ---- -title: "Using Failover Manager" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/using_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/using_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/using_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/using_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/using_efm.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.22.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.18.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.19.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.19.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.18.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.22.html" ---- - - - -Failover Manager offers support for monitoring and failover of clusters with one or more Standby servers. You can add or remove nodes from the cluster as your demand for resources grows or shrinks. - -If a primary node reboots, Failover Manager may detect the database is down on the Primary node and promote a Standby node to the role of Primary. If this happens, the Failover Manager agent on the (rebooted) Primary node will not get a chance to write the `recovery.conf` file; the rebooted Primary node will return to the cluster as a second Primary node. To prevent this, start the Failover Manager agent before starting the database server. The agent will start in idle mode, and check to see if there is already a primary in the cluster. If there is a primary node, the agent will verify that a `recovery.conf` or `standby.signal` file exists, and the database will not start as a second primary. - -## Managing a Failover Manager Cluster - -Once configured, a Failover Manager cluster requires no regular maintenance. The following sections provide information about performing the management tasks that may occasionally be required by a Failover Manager Cluster. - -By default, [some of the efm commands](07_using_efm_utility/#using_efm_utility) must be invoked by `efm` or an OS superuser; an administrator can selectively permit users to invoke these commands by adding the user to the `efm` group. The commands are: - -- [efm allow-node](07_using_efm_utility/#efm_allow_node) -- [efm disallow-node](07_using_efm_utility/#efm_disallow_node) -- [efm promote](07_using_efm_utility/#efm_promote) -- [efm resume](07_using_efm_utility/#efm_resume) -- [efm set-priority](07_using_efm_utility/#efm_set_priority) -- [efm stop-cluster](07_using_efm_utility/#efm_stop_cluster) -- [efm upgrade-conf](07_using_efm_utility/#efm_upgrade_conf) - - - -### Starting the Failover Manager Cluster - -You can start the nodes of a Failover Manager cluster in any order. - -To start the Failover Manager cluster on RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x, assume superuser privileges, and invoke the command: - -```text -systemctl start edb-efm-3.10 -``` - -If the cluster properties file for the node specifies that `is.witness` is `true`, the node will start as a Witness node. - -If the node is not a dedicated Witness node, Failover Manager will connect to the local database and invoke the `pg_is_in_recovery()` function. If the server responds `false`, the agent assumes the node is a Primary node, and assigns a virtual IP address to the node (if applicable). If the server responds `true`, the Failover Manager agent assumes that the node is a Standby server. If the server does not respond, the agent will start in an idle state. - -After joining the cluster, the Failover Manager agent checks the supplied database credentials to ensure that it can connect to all of the databases within the cluster. If the agent cannot connect, the agent will shut down. - -If a new primary or standby node joins a cluster, all of the existing nodes will also confirm that they can connect to the database on the new node. - - - -!!! Note - If you are running `/var/lock` or `/var/run` on `tmpfs` (Temporary File System), make sure that the systemd service file for Failover Manager has a dependency on `systemd-tmpfiles-setup.service`. - -### Adding Nodes to a Cluster - -You can add a node to a Failover Manager cluster at any time. When you add a node to a cluster, you must modify the cluster to allow the new node, and then tell the new node how to find the cluster. The following steps detail adding a node to a cluster: - -1. Unless `auto.allow.hosts` is set to `true`, use the `efm allow-node` command, to add the address of the new node to the Failover Manager allowed node host list. When invoking the command, specify the cluster name and the address of the new node: - - ```text - efm allow-node
- ``` - - For more information about using the `efm allow-node` command or controlling a Failover Manager service, see [Using the EFM Utility](07_using_efm_utility/#efm_allow_node). - - Install a Failover Manager agent and configure the cluster properties file on the new node. For more information about modifying the properties file, see [The Cluster Properties File](04_configuring_efm/01_cluster_properties/#cluster_properties). - -2. Configure the cluster members file on the new node, adding an entry for the Membership Coordinator. For more information about modifying the cluster members file, see [The Cluster Members File](04_configuring_efm/03_cluster_members/#cluster_members). - -3. Assume superuser privileges on the new node, and start the Failover Manager agent. To start the Failover Manager cluster on RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x, invoke the command: - - ```text - systemctl start edb-efm-3.10 - ``` - -When the new node joins the cluster, Failover Manager will send a notification to the administrator email provided in the `user.email` property, and/or will invoke the specified notification script. - - - -!!! Note - To be a useful Standby for the current node, the node must be a standby in the PostgreSQL Streaming Replication scenario. - -### Changing the Priority of a Standby - -If your Failover Manager cluster includes more than one Standby server, you can use the `efm set-priority` command to influence the promotion priority of a Standby node. Invoke the command on any existing member of the Failover Manager cluster, and specify a priority value after the IP address of the member. - -For example, the following command instructs Failover Manager that the `acctg` cluster member that is monitoring `10.0.1.9` is the primary Standby `(1)`: - -```text -efm set-priority acctg 10.0.1.9 1 -``` - -You can set the priority of a standby to `0` to make the standby non-promotable. Setting the priority of a standby to a value greater than `0` overrides a property value of `promotable=false`. - -For example, if the properties file on node `10.0.1.10` includes a setting of `promotable=false` and you use `efm set-priority` to set the promotion priority of `10.0.1.10` to be the standby used in the event of a failover, the value designated by the `efm set-priority` command will override the value in the property file: - -```text -efm set-priority acctg 10.0.1.10 1 -``` - -In the event of a failover, Failover Manager will first retrieve information from Postgres streaming replication to confirm which Standby node has the most recent data, and promote the node with the least chance of data loss. If two Standby nodes contain equally up-to-date data, the node with a higher user-specified priority value will be promoted to Primary unless [use.replay.tiebreaker](04_configuring_efm/01_cluster_properties/#use_replay_tiebreaker) is set to `false` . To check the priority value of your Standby nodes, use the command: - -```text -efm cluster-status -``` - - - -!!! Note - The promotion priority may change if a node becomes isolated from the cluster, and later re-joins the cluster. - -### Promoting a Failover Manager Node - -You can invoke `efm promote` on any node of a Failover Manager cluster to start a manual promotion of a Standby database to Primary database. - -Manual promotion should only be performed during a maintenance window for your database cluster. If you do not have an up-to-date Standby database available, you will be prompted before continuing. To start a manual promotion, assume the identity of `efm` or the OS superuser, and invoke the command: - -```text -efm promote [-switchover] [-sourcenode
] [-quiet] [-noscripts]` -``` - -Where: - - `` is the name of the Failover Manager cluster. - - Include the `–switchover` option to reconfigure the original Primary as a Standby. If you include the `–switchover` keyword, the cluster must include a primary node and at least one standby, and the nodes must be in sync. - - Include the `–sourcenode` keyword to specify the node from which the recovery settings will be copied to the primary. - - Include the `-quiet` keyword to suppress notifications during switchover. - - Include the `-noscripts` keyword to prevent instruct Failover Manager to not invoke fencing and post-promotion scripts. - -During switchover: - -- For server versions 11 and prior, the `recovery.conf` file is copied from an existing standby to the primary node. For server version 12 and later, the `primary_conninfo` and `restore_command` parameters are copied and stored in memory. -- The primary database is stopped. -- If you are using a VIP, the address is released from the primary node. -- A standby is promoted to replace the primary node, and acquires the VIP. -- The address of the new primary node is added to the `recovery.conf` file or the `primary_conninfo` details are stored in memory. -- If the `application.name` property is set for this node, the application_name property will be added to the `recovery.conf` file or the `primary_conninfo` information will be stored in memory. -- If you are using server version 12 or later, the recovery settings that have been stored in memory are written to the `postgresql.auto.conf` file. A `standby.signal` file is created. -- The old primary is started; the agent will resume monitoring it as a standby. - -During a promotion, the Primary agent releases the virtual IP address. If it is not a switchover, a recovery.conf file is created in the directory specified by the db.data.dir property. The recovery.conf file is used to prevent the old primary database from starting until the file is removed, preventing the node from starting as a second primary in the cluster. If the promotion is part of a switchover, recovery settings are handled as described above. - -The Primary agent remains running, and assumes a status of Idle. - -The Standby agent confirms that the virtual IP address is no longer in use before pinging a well- known address to ensure that the agent is not isolated from the network. The Standby agent runs the fencing script and promotes the Standby database to Primary. The Standby agent then assigns the virtual IP address to the Standby node, and runs the post-promotion script (if applicable). - -Note that this command instructs the service to ignore the value specified in the `auto.failover` parameter in the cluster properties file. - -To return a node to the role of primary, place the node first in the promotion list: - -```text -efm set-priority
-``` - -Then, perform a manual promotion: - -```text -efm promote ‑switchover -``` - -For more information about the efm utility, see [Using the EFM Utility](07_using_efm_utility/#using_efm_utility). - - - -### Stopping a Failover Manager Agent - -When you stop an agent, Failover Manager will remove the node's address from the cluster members list on all of the running nodes of the cluster, but will not remove the address from the Failover Manager Allowed node host list. - -To stop the Failover Manager agent on RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x, assume superuser privileges, and invoke the command: - -```text -systemctl stop edb-efm-3.10 -``` - -Until you invoke the `efm disallow-node` command (removing the node's address of the node from the Allowed node host list), you can use the `service edb-efm-3.10 start` command to restart the node at a later date without first running the `efm allow-node` command again. - - -Note that stopping an agent does not signal the cluster that the agent has failed unless the [primary.shutdown.as.failure](04_configuring_efm/01_cluster_properties/cluster_properties/#primary_shutdown_as_failure) property is set to `true`. - -### Stopping a Failover Manager Cluster - -To stop a Failover Manager cluster, connect to any node of a Failover Manager cluster, assume the identity of `efm` or the OS superuser, and invoke the command: - -```text -efm stop-cluster -``` - -The command will cause *all* Failover Manager agents to exit. Terminating the Failover Manager agents completely disables all failover functionality. - -!!! Note - When you invoke the `efm stop-cluster` command, all authorized node information is lost from the Allowed node host list. - -### Removing a Node from a Cluster - -The `efm disallow-node` command removes the IP address of a node from the Failover Manager Allowed Node host list. Assume the identity of `efm` or the OS superuser on any existing node (that is currently part of the running cluster), and invoke the `efm disallow-node` command, specifying the cluster name and the IP address of the node: - -```text -efm disallow-node
-``` - -The `efm disallow-node` command will not stop a running agent; the service will continue to run on the node until you [stop the agent](#stop_efm_agent). If the agent or cluster is subsequently stopped, the node will not be allowed to rejoin the cluster, and will be removed from the failover priority list (and will be ineligible for promotion). - -After invoking the `efm disallow-node` command, you must use the [efm allow-node](07_using_efm_utility/#efm_allow_node) command to add the node to the cluster again. - - - -## Running Multiple Agents on a Single Node - -You can monitor multiple database clusters that reside on the same host by running multiple Primary or Standby agents on that Failover Manager node. You may also run multiple Witness agents on a single node. To configure Failover Manager to monitor more than one database cluster, while ensuring that Failover Manager agents from different clusters do not interfere with each other, you must: - -1. Create a cluster properties file for each member of each cluster that defines a unique set of properties and the role of the node within the cluster. -2. Create a cluster members file for each member of each cluster that lists the members of the cluster. -3. Customize the unit file (on a RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x system) for each cluster to specify the names of the cluster properties and the cluster members files. -4. Start the services for each cluster. - -The examples that follow uses two database clusters (acctg and sales) running on the same node: - -- Data for `acctg` resides in `/opt/pgdata1`; its server is monitoring port `5444`. -- Data for `sales` resides in `/opt/pgdata2`; its server is monitoring port `5445`. - -To run a Failover Manager agent for both of these database clusters, use the `efm.properties.in` template to create two properties files. Each cluster properties file must have a unique name. For this example, we create `acctg.properties` and `sales.properties` to match the `acctg` and `sales` database clusters. - -The following parameters must be unique in each cluster properties file: - - `admin.port` - - `bind.address` - - `db.port` - - `db.data.dir` - - `virtual.ip` (if used) - -Within each cluster properties file, the `db.port` parameter should specify a unique value for each cluster, while the `db.user` and `db.database` parameter may have the same value or a unique value. For example, the `acctg.properties` file may specify: - - `db.user=efm_user` - - `db.password.encrypted=7c801b32a05c0c5cb2ad4ffbda5e8f9a` - - `db.port=5444` - - `db.database=acctg_db` - -While the `sales.properties` file may specify: - - `db.user=efm_user` - - `db.password.encrypted=e003fea651a8b4a80fb248a22b36f334` - - `db.port=5445` - - `db.database=sales_db` - -Some parameters require special attention when setting up more than one Failover Manager cluster agent on the same node. If multiple agents reside on the same node, each port must be unique. Any two ports will work, but it may be easier to keep the information clear if using ports that are not too close to each other. - -When creating the cluster properties file for each cluster, the `db.data.dir` parameters must also specify values that are unique for each respective database cluster. - -The following parameters are used when assigning the virtual IP address to a node. If your Failover Manager cluster does not use a virtual IP address, leave these parameters blank. - - `virtual.ip` - - `virtual.ip.interface` - - `virtual.ip.prefix` - -This parameter value is determined by the virtual IP addresses being used and may or may not be the same for both `acctg.properties` and `sales.properties`. - -After creating the `acctg.properties` and `sales.properties` files, create a service script or unit file for each cluster that points to the respective property files; this step is platform specific. If you are using RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x, see [RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x](#rhelcentos-7x-or-rhelcentos-8x). - -!!! Note - If you are using a unit file, you must manually update the file to reflect the new service name when you upgrade Failover Manager. - -### RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x - -If you are using RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x, you should copy the `edb-efm-3.10` unit file to new file with a name that is unique for each cluster. For example, if you have two clusters (named acctg and sales), the unit file names might be: - -```text -/usr/lib/systemd/system/efm-acctg.service - -/usr/lib/systemd/system/efm-sales.service -``` - -Then, edit the `CLUSTER` variable within each unit file, changing the specified cluster name from `efm` to the new cluster name. For example, for a cluster named `acctg`, the value would specify: - -```text -Environment=CLUSTER=acctg -``` - -You must also update the value of the `PIDfile` parameter to specify the new cluster name. For example: - -```text -PIDFile=/var/run/efm-3.10/acctg.pid -``` - -After copying the service scripts, use the following commands to enable the services: - -```text -# systemctl enable efm-acctg.service - -# systemctl enable efm-sales.service -``` - -Then, use the new service scripts to start the agents. For example, you can start the `acctg` agent with the command: - -```text -# systemctl start efm-acctg` -``` - -For information about customizing a unit file, please visit: - - diff --git a/product_docs/docs/efm/3/efm_user/06_monitoring_efm_cluster.mdx b/product_docs/docs/efm/3/efm_user/06_monitoring_efm_cluster.mdx deleted file mode 100644 index 84c0a320935..00000000000 --- a/product_docs/docs/efm/3/efm_user/06_monitoring_efm_cluster.mdx +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: "Monitoring a Failover Manager Cluster" - - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/monitoring_efm_cluster.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/monitoring_efm_cluster.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/monitoring_efm_cluster.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/monitoring_efm_cluster.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/monitoring_efm_cluster.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.21.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.20.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.21.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.20.html" ---- - - - -You can use either the Failover Manager `efm cluster-status` command or the PEM Client graphical interface to check the current status of a monitored node of a Failover Manager cluster. - -## Reviewing the Cluster Status Report - -The efm cluster-status [cluster properties file](07_using_efm_utility/#efm_cluster_status) command returns a report that contains information about the status of the Failover Manager cluster. To invoke the command, enter: - -```text -# efm cluster-status -``` - -The following status report is for a cluster named edb that has three nodes running: - -```text -Agent Type Address Agent DB VIP ------------------------------------------------------------------------ -Standby 172.19.10.2 UP UP 192.168.225.190 -Standby 172.19.12.163 UP UP 192.168.225.190 -Primary 172.19.14.9 UP UP 192.168.225.190* - - -Allowed node host list: -172.19.14.9 172.19.12.163 172.19.10.2 - - -Membership coordinator: 172.19.14.9 - - -Standby priority host list: -172.19.12.163 172.19.10.2 - -Promote Status: - -DB Type Address WAL Received LSN WAL Replayed LSN Info --------------------------------------------------------------------- -Primary 172.19.14.9 0/4000638 -Standby 172.19.12.163 0/4000638 0/4000638 -Standby 172.19.10.2 0/4000638 0/4000638 - - -Standby database(s) in sync with primary. It is safe to promote. -``` - -The cluster status section provides an overview of the status of the agents that reside on each node of the cluster: - -```text -Agent Type Address Agent DB VIP ------------------------------------------------------------------------ -Standby 172.19.10.2 UP UP 192.168.225.190 -Standby 172.19.12.163 UP UP 192.168.225.190 -Primary 172.19.14.9 UP UP 192.168.225.190* -``` - -The asterisk (\*) after the VIP address indicates that the address is available for connections. If a VIP address is not followed by an asterisk, the address has been associated with the node (in the properties file), but the address is not currently in use. - -Failover Manager agents provide the information displayed in the Cluster Status section. - -The `Allowed node host list` and `Standby priority host list` provide an easy way to tell which nodes are allowed to join the cluster, and the promotion order of the nodes. The IP address of the Membership coordinator is also displayed in the report: - -```text -Allowed node host list: -172.19.14.9 172.19.12.163 172.19.10.2 -Membership coordinator: 172.19.14.9 -Standby priority host list: -172.19.12.163 172.19.10.2 -``` - -The `Promote Status` section of the report is the result of a direct query from the node on which you are invoking the cluster-status command to each database in the cluster; the query also returns the transaction log location of each database. Because the queries to each database return at different points in time, the LSNs may not match even if streaming replication is working normally for the cluster. - -```text -Promote Status: - -DB Type Address WAL Received LSN WAL Replayed LSN Info -------------------------------------------------------------------- -Primary 172.19.14.9 0/4000638 -Standby 172.19.12.163 0/4000638 0/4000638 -Standby 172.19.10.2 0/4000638 0/4000638 -``` - -If a database is down (or if the database has been restarted, but the resume command has not yet been invoked), the state of the agent that resides on that host will be Idle. If an agent is idle, the cluster status report will include a summary of the condition of the idle node. For example: - -```text -Agent Type Address Agent DB VIP ------------------------------------------------------ -Idle 172.19.18.105 UP UP 172.19.13.105 -``` - -**Exit Codes** - -The cluster status process returns an exit code that is based on the state of the cluster: - -- An exit code of `0` indicates that all agents are running, and the databases on the Primary and Standby nodes are running and in sync. - -- A non-zero exit code indicates that there is a problem. The following problems can trigger a non-zero exit code: - - A database is down or unknown (or has an idle agent). - - Failover Manager cannot decrypt the provided database password. - - There is a problem contacting the databases to get WAL locations. - - There is no Primary agent. - - There are no Standby agents. - - One or more Standby nodes are not in sync with the Primary. - -## Monitoring Streaming Replication with Postgres Enterprise Manager - -If you use Postgres Enterprise Manager (PEM) to monitor your servers, you can configure the Streaming Replication Analysis dashboard (part of the PEM graphical interface) to display the state of a Primary or Standby node that is part of a Streaming Replication scenario. - -![The Streaming Replication dashboard (Primary node)](images/str_replication_dashboard_master.png) - -The Streaming Replication Analysis Dashboard displays statistical information about activity for any monitored server on which streaming replication is enabled. The dashboard header identifies the status of the monitored server (either Replication Primary or Replication Slave), and displays the date and time that the server was last started, the date and time that the page was last updated, and a current count of triggered alerts for the server. - -When reviewing the dashboard for a Replication Slave (a Standby node), a label at the bottom of the dashboard confirms the status of the server. - -![The Streaming Replication dashboard (Standby node)](images/str_replication_dashboard_standby.png) - -By default, the PEM replication probes that provide information for the Streaming Replication Analysis dashboard are disabled. - -To view the Streaming Replication Analysis dashboard for the Primary node of a replication scenario, you must enable the following probes: - -- Streaming Replication -- WAL Archive Status - -To view the Streaming Replication Analysis dashboard for the Standby node of a replication scenario, you must enable the following probes: - -- Streaming Replication Lag Time - -For more information about PEM, please visit the EnterpriseDB website at: - - diff --git a/product_docs/docs/efm/3/efm_user/07_using_efm_utility.mdx b/product_docs/docs/efm/3/efm_user/07_using_efm_utility.mdx deleted file mode 100644 index c7ac10c8da2..00000000000 --- a/product_docs/docs/efm/3/efm_user/07_using_efm_utility.mdx +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: "Using the efm Utility" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/using_efm_utility.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/using_efm_utility.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/using_efm_utility.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/using_efm_utility.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/using_efm_utility.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.26.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.26.html" ---- - - - -Failover Manager provides the efm utility to assist with cluster management. The RPM installer adds the utility to the `/usr/edb/efm-3.10/bin` directory when you install Failover Manager. - -**efm allow-node** - - - -```text -efm allow-node -``` - -Invoke the `efm allow-node` command to allow the specified node to join the cluster. When invoking the command, provide the name of the cluster and the IP address of the joining node. - -This command must be invoked by `efm`, a member of the `efm` group, or root. - -**efm disallow-node** - - - -```text -efm disallow-node
-``` - -Invoke the `efm disallow-node` command to remove the specified node from the allowed hosts list, and prevent the node from joining a cluster. Provide the name of the cluster and the address of the node when calling the `efm disallow-node` command. This command must be invoked by `efm`, a member of the `efm` group, or root. - -**efm cluster-status** - - - -```text -efm cluster-status -``` - -Invoke the `efm cluster-status` command to display the status of a Failover Manager cluster. For more information about the status report, see [Monitoring a Failover Manager Cluster](06_monitoring_efm_cluster/#monitoring_efm_cluster). - -**efm cluster-status-json** - - - -```text -efm cluster-status-json -``` - -Invoke the `efm cluster-status-json` command to display the status of a Failover Manager cluster in json format. While the format of the displayed information is different than the display generated by the efm cluster-status command, the information source is the same. - -The following example is generated by querying the status of a healthy cluster with three nodes: - -```text -{ - "nodes": { - "172.16.144.176": { - "type": "Witness", - "agent": "UP", - "db": "N\/A", - "vip": "", - "vip_active": false - }, - "172.16.144.177": { - "type": "Primary", - "agent": "UP", - "db": "UP", - "vip": "", - "vip_active : false" - "xlogReceive : 0/14001478" - "xlog : 0/14001478" - "xloginfo :" - }, - "172.16.144.180": { - "type": "Standby", - "agent": "UP", - "db": "UP", - "vip": "", - "vip_active : false" - "xlogReceive : 0/14001478" - "xlog : 0/14001478" - "xloginfo :" - } - }, - "allowednodes": [ - "172.16.144.177", - "172.16.144.160", - "172.16.144.180", - "172.16.144.176" - ], - "membershipcoordinator": "172.16.144.177", - "failoverpriority": [ - "172.16.144.180" - ], - "minimumstandbys": 0, - "missingnodes": [], - "messages": [] -} -``` - -**efm encrypt** - - - -```text -efm encrypt [--from-env] -``` - -Invoke the `efm encrypt` command to encrypt the database password before include the password in the cluster properties file. Include the `--from-env` option to instruct Failover Manager to use the value specified in the `EFMPASS` environment variable, and execute without user input. For more information, see [Encrypting Your Database Password](04_configuring_efm/01_cluster_properties/01_encrypting_database_password/#encrypting_database_password). - -**efm promote** - - - -```text -efm promote cluster_name [-switchover [-sourcenode
][-quiet][-noscripts] -``` - -The `efm promote` command instructs Failover Manager to perform a manual failover of standby to primary. - -Manual promotion should only be attempted if the status command reports that the cluster includes a Standby node that is up-to-date with the Primary. If there is no up-to-date Standby, Failover Manager will prompt you before continuing. - -Include the `–switchover` clause to promote a standby node, and reconfigure a primary node as a standby node. Include the `-sourcenode` keyword, and specify a node address to indicate the node whose recovery settings will be copied to the old primary node (making it a standby). Include the `-quiet` keyword to suppress notifications during the switchover process. Include the `-noscripts` keyword to instruct Failover Manager to not invoke fencing or post-promotion scripts. - -This command must be invoked by `efm`, a member of the `efm` group, or root. - -!!! Note - This command instructs the service to ignore the value specified in the `auto.failover` parameter in the cluster properties file. - -**efm resume** - - - -```text -efm resume -``` - -Invoke the `efm resume` command to resume monitoring a previously stopped database. This command must be invoked by efm, a member of the efm group, or root. - -**efm set-priority** - - - -```text -efm set-priority
-``` - -Invoke the `efm set-priority` command to assign a failover priority to a standby node. The value specifies the order in which the node will be used in the event of a failover. This command must be invoked by `efm`, a member of the `efm` group, or root. - -Use the priority option to specify the place for the node in the priority list. For example, specify a value of 1 to indicate that the node is the primary standby, and will be the first node promoted in the event of a failover. A priority value of 0 instructs Failover Manager to not promote the standby. - -**efm stop-cluster** - - - -```text -efm stop-cluster -``` - -Invoke the `efm stop-cluster` command to stop Failover Manager on all nodes. This command instructs Failover Manager to connect to each node on the cluster and instruct the existing members to shut down. The command has no effect on running databases, but when the command completes, there is no failover protection in place. - -!!! Note - When you invoke the `efm stop-cluster` command, all authorized node information is removed from the Allowed node host list. - -This command must be invoked by `efm`, a member of the `efm` group, or root. - -**efm upgrade-conf** - - - -```text -efm upgrade-conf [-source ] -``` - -Invoke the `efm upgrade-conf` command to copy the configuration files from an existing Failover Manager installation, and add parameters required by a Failover Manager installation. Provide the name of the previous cluster when invoking the utility. This command must be invoked with root privileges. - -If you are upgrading from a Failover Manager configuration that does not use sudo, include the `-source` flag and specify the name of the *directory* in which the configuration files reside when invoking upgrade-conf. - -**efm node-status-json** - - - -```text -efm node-status-json -``` - -Invoke the `efm node-status-json` command to display the status of a local node in json format. A successful execution of this command returns `0` as its exit code. In case of a database failure or an agent status becoming IDLE, the command returns `1` as exit code. - -The following is an example output of the `efm node-status-json` command: - -> ```text -> { -> "type":"Standby", -> "address":"172.16.144.130", -> "agent":"UP", -> "db":"UP", -> "vip":"", -> "vip_active":"false" -> } -> ``` - -**efm --help** - - - -```text -efm --help -``` - -Invoke the `efm --help` command to display online help for the Failover Manager utility commands. diff --git a/product_docs/docs/efm/3/efm_user/08_controlling_efm_service.mdx b/product_docs/docs/efm/3/efm_user/08_controlling_efm_service.mdx deleted file mode 100644 index ab979019bd8..00000000000 --- a/product_docs/docs/efm/3/efm_user/08_controlling_efm_service.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "Controlling the Failover Manager Service" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/controlling_efm_service.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/controlling_efm_service.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/controlling_efm_service.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/controlling_efm_service.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/controlling_efm_service.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.23.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.23.html" ---- - - - -Each node in a Failover Manager cluster hosts a Failover Manager agent that is controlled by a service script. By default, the service script expects to find: - -- A configuration file named `efm.properties` that contains the properties used by the Failover Manager service. Each node of a replication scenario must contain a properties file that provides information about the node. -- A cluster members file named `efm.nodes` that contains a list of the cluster members. Each node of a replication scenario must contain a cluster members list. - -Note that if you are running multiple clusters on a single node you will need to manually create configuration files with cluster-specific names and modify the service script for the corresponding clusters. - -The commands that control the Failover Manager service are platform-specific. - - - -## Using the systemctl Utility on RHEL/CentOS 7.x and RHEL/Rocky Linux/AlmaLinux 8.x - -On RHEL/CentOS 7.x and RHEL/Rocky Linux/AlmaLinux 8.x, Failover Manager runs as a Linux service named (by default) `edb-efm-3.10.service` that is located in `/usr/lib/systemd/system`. Each database cluster monitored by Failover Manager will run a copy of the service on each node of the replication cluster. - -Use the following systemctl commands to control a Failover Manager agent that resides on a RHEL/CentOS 7.x and RHEL/Rocky Linux/AlmaLinux 8.x host: - -```text -systemctl start edb-efm-3.10 -``` - -The start command starts the Failover Manager agent on the current node. The local Failover Manager agent monitors the local database and communicates with Failover Manager on the other nodes. You can start the nodes in a Failover Manager cluster in any order. This command must be invoked by root. - -```text -systemctl stop edb-efm-3.10 -``` - -Stop the Failover Manager on the current node. This command must be invoked by root. - -```text -systemctl status edb-efm-3.10 -``` - -The status command returns the status of the Failover Manager agent on which it is invoked. You can invoke the status command on any node to instruct Failover Manager to return status and server startup information. - -```text -[root@ONE ~]}> systemctl status edb-efm-3.10 - edb-efm-3.10.service - EnterpriseDB Failover Manager 3.10 - Loaded: loaded (/usr/lib/systemd/system/edb-efm-3.10.service; disabled; vendor preset: disabled) - Active: active (running) since Wed 2013-02-14 14:02:16 EST; 4s ago - Process: 58125 ExecStart=/bin/bash -c /usr/edb/edb-efm-3.10/bin/runefm.sh start ${CLUSTER} (code=exited, status=0/SUCCESS) - Main PID: 58180 (java) - CGroup: /system.slice/edb-efm-3.10.service - └─58180 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.161-0.b14.el7_4.x86_64/jre/bin/java -cp /usr/edb/edb-efm-3.10/lib/EFM-3.10.0.jar -Xmx128m... -``` diff --git a/product_docs/docs/efm/3/efm_user/09_controlling_logging.mdx b/product_docs/docs/efm/3/efm_user/09_controlling_logging.mdx deleted file mode 100644 index 5375966ed35..00000000000 --- a/product_docs/docs/efm/3/efm_user/09_controlling_logging.mdx +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: "Controlling Logging" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/controlling_logging.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/controlling_logging.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/controlling_logging.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/controlling_logging.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/controlling_logging.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.28.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.27.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.27.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.28.html" ---- - - - -Failover Manager writes and stores one log file per agent and one startup log per agent in `/var/log/-3.10` (where `` specifies the name of the cluster). - -You can control the level of detail written to the agent log by modifying the `jgroups.loglevel` and `efm.loglevel` parameters in the [cluster properties file](04_configuring_efm/01_cluster_properties/#loglevel): - -```text -# Logging levels for JGroups and EFM. -# Valid values are: TRACE, DEBUG, INFO, WARN, ERROR -# Default value: INFO -# It is not necessary to increase these values unless debugging a -# specific issue. If nodes are not discovering each other at -# startup, increasing the jgroups level to DEBUG will show -# information about the TCP connection attempts that may help -# diagnose the connection failures. -jgroups.loglevel=INFO -efm.loglevel=INFO -``` - -The logging facilities use the Java logging library and logging levels. The log levels (in order from most logging output to least) are: - -> - `TRACE` -> - `DEBUG` -> - `INFO` -> - `WARN` -> - `ERROR` - -For example, if you set the `efm.loglevel` parameter to `WARN`, Failover Manager will only log messages at the `WARN` level and above (`WARN` and `ERROR`). - -By default, Failover Manager log files are rotated daily, compressed, and stored for a week. You can modify the file rotation schedule by changing settings in the log rotation file (`/etc/logrotate.d/efm-3.10`). For more information about modifying the log rotation schedule, consult the logrotate man page: - -> `$ man logrotate` - - - -## Enabling syslog Log File Entries - -Failover Manager supports syslog logging. To implement syslog logging, you must configure syslog to allow UDP or TCP connections. - -To allow a connection to syslog, edit the `/etc/rsyslog.conf` file and uncomment the protocol you wish to use. You must also ensure that the `UDPServerRun` or `TCPServerRun` entry associated with the protocol includes the port number to which log entries will be sent. For example, the following configuration file entries enable UDP connections to port 514: - -```text -# Provides UDP syslog reception -$ModLoad imudp -$UDPServerRun 514 -``` - -The following configuration file entries enable TCP connections to port 514: - -```text -# Provides TCP syslog reception -$ModLoad imtcp -$InputTCPServerRun 514 -``` - -After modifying the syslog configuration file, restart the `rsyslog` service to enable the connections: - -> `systemctl restart rsyslog.service` - -After modifying the `rsyslog.conf` file on the Failover Manager host, you must modify the Failover Manager properties to enable logging. Use your choice of editor to [modify the properties file](04_configuring_efm/01_cluster_properties/#logtype_enabled) (`/etc/edb/efm-3.10/efm.properties.in`) specifying the type of logging that you wish to implement: - -```text -# Which logging is enabled. -file.log.enabled=true -syslog.enabled=false -``` - -You must also [specify syslog details](04_configuring_efm/01_cluster_properties/#syslog_logging) for your system. Use the `syslog.protocol` parameter to specify the protocol type (UDP or TCP) and the `syslog.port` parameter to specify the listener port of the syslog host. The `syslog.facility` value may be used as an identifier for the process that created the entry; the value must be between `LOCAL0` and `LOCAL7`. - -```text -# Syslog information. The syslog service must be listening # on the - port for the given protocol, which can be UDP or -# TCP. The facilities supported are LOCAL0 through LOCAL7. -# syslog.host=localhost -syslog.port=514 -syslog.protocol=UDP -syslog.facility=LOCAL1 -``` - -For more information about syslog, please see the syslog man page: - -> `syslog man` diff --git a/product_docs/docs/efm/3/efm_user/10_notifications.mdx b/product_docs/docs/efm/3/efm_user/10_notifications.mdx deleted file mode 100644 index 077cc76f2bf..00000000000 --- a/product_docs/docs/efm/3/efm_user/10_notifications.mdx +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: "Notifications" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/notifications.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/notifications.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/notifications.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/notifications.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/notifications.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.29.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.29.html" ---- - - - -Failover Manager will send e-mail notifications and/or invoke a notification script when a notable event occurs that affects the cluster. If you have configured Failover Manager to send an email notification, you must have an SMTP server running on port 25 on each node of the cluster. Use the following parameters to configure notification behavior for Failover Manager: - -```text -user.email -script.notification -from.email -``` - -For more information about editing the configuration properties, see [Specifying Cluster Properties](04_configuring_efm/01_cluster_properties/#cluster_properties). - -The body of the notification contains details about the event that triggered the notification, and about the current state of the cluster. For example: - -```text -EFM node: 10.0.1.11 -Cluster name: acctg -Database name: postgres -VIP: ip_address (Active|Inactive) -Database health is not being monitored. -``` - -The VIP field displays the IP address and state of the virtual IP if implemented for the node. - -Failover Manager assigns a severity level to each notification. The following levels indicate increasing levels of attention required: - -- `INFO` indicates an informational message about the agent and does not require any manual intervention (for example, Failover Manager has started or stopped). See [List of INFO level notifications](#notifications_info) -- `WARNING` indicates that an event has happened that requires the administrator to check on the system (for example, failover has occurred). See [List of WARNING level notifications](#notifications_warning) -- `SEVERE` indicates that a serious event has happened and requires the immediate attention of the administrator (for example, failover was attempted, but was unable to complete). See [List of SEVERE level notifications](#notifications_severe) - -The severity level designates the urgency of the notification. A notification with a severity level of `SEVERE` requires user attention immediately, while a notification with a severity level of `INFO` will call your attention to operational information about your cluster that does not require user action. Notification severity levels are not related to logging levels; all notifications are sent regardless of the log level detail specified in the configuration file. - -You can use the [notification.level](04_configuring_efm/01_cluster_properties/#notification_level) property to specify the minimum severity level that will trigger a notification. - -!!! Note - In addition to sending notices to the administrative email address, all notifications are recorded in the cluster log file (`/var/log/efm-3.10/.log`). - -The conditions listed in the table below will trigger an `INFO` level notification: - - - -| Subject | Description | -| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Executed fencing script | Executed fencing script script_name Results: script_results | -| Executed post-promotion script | Executed post-promotion script script_name Results: script_results | -| Executed remote pre-promotion script | Executed remote pre-promotion script script_name Results: script_results | -| Executed remote post-promotion script | Executed remote post-promotion script script_name Results: script_results | -| Executed post-database failure script | Executed post-database failure script script_name Results: script_results | -| Executed primary isolation script | Executed primary isolation script script_name Results: script_results | -| Witness agent running on node_address for cluster cluster_name | Witness agent is running. | -| Primary agent running on node_address for cluster cluster_name | Primary agent is running and database health is being monitored. | -| Standby agent running on node_address for cluster cluster_name | Standby agent is running and database health is being monitored. | -| Idle agent running on node node_address for cluster cluster_name | Idle agent is running. After starting the local database, the agent can be resumed. | -| Assigning VIP to node node_address | Assigning VIP VIP_address to node node_address Results: script_results | -| Releasing VIP from node node_address | Releasing VIP VIP_address from node node_address Results: script_results | -| Starting auto resume check for cluster cluster_name | The agent on this node will check every auto.resume.period seconds to see if it can resume monitoring the failed database. The cluster should be checked during this time and the agent stopped if the database will not be started again. See the agent log for more details. | -| Executed agent resumed script | Executed agent resumed script script_name Results: script_results | -| WAL logs backed up during promotion | When reconfiguring this standby to follow the new primary, the pg_xlog or pg_wal contents were backed up in the pgdata directory. This backup should be removed when convenient to free up disk space. | - -The conditions listed in the table below will trigger a *WARNING* level notification: - - - -| Subject | Description | Comments | | -| ------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| Witness agent exited on node_address for cluster cluster_name | Witness agent has exited. | -| Primary agent exited on node_address for cluster cluster_name | Database health is not being monitored. | -| Cluster cluster_name notified that primary node has left | Failover is disabled for the cluster until the primary agent is restarted. | -| Standby agent exited on node_address for cluster cluster_name | Database health is not being monitored. | -| Agent exited during promotion on node_address for cluster cluster_name | Database health is not being monitored. | -| Agent exited on node_address for cluster cluster_name | The agent has exited. This is generated by an agent in the Idle state. | -| Agent exited for cluster cluster_name | The agent has exited. This notification is usually generated during startup when an agent exits before startup has completed. | -| Virtual IP address assigned to non-primary node | The virtual IP address appears to be assigned to a non-primary node. To avoid any conflicts, Failover Manager will release the VIP. You should confirm that the VIP is assigned to your primary node and manually reassign the address if it is not. | -| Virtual IP address not assigned to primary node | The virtual IP address appears to not be assigned to a primary node. EDB Postgres Failover Manager will attempt to reacquire the VIP. | -| No standby agent in cluster for cluster cluster_name | The standbys on cluster_name have left the cluster. | -| Standby agent failed for cluster cluster_name | A standby agent on cluster_name has left the cluster, but the coordinator has detected that the standby database is still running. | -| Standby database failed for cluster cluster_name | A standby agent has signaled that its database has failed. The other nodes also cannot reach the standby database. | -| Standby agent cannot reach database for cluster cluster_name | A standby agent has signaled database failure, but the other nodes have detected that the standby database is still running. | -| Cluster cluster_name has dropped below three nodes | At least three nodes are required for full failover protection. Please add witness or agent node to the cluster. | -| Subset of cluster cluster_name disconnected from primary | This node is no longer connected to the majority of the cluster cluster_name. Because this node is part of a subset of the cluster, failover will not be attempted. Current nodes that are visible are: node_address | -| Promotion has started on cluster cluster_name | The promotion of a standby has started on cluster cluster_name. | -| Witness failure for cluster cluster_name | Witness running at node_address has left the cluster. | -| Idle agent failure for cluster cluster_name | Idle agent running at node_address has left the cluster. | -| One or more nodes isolated from network for cluster cluster_name | This node appears to be isolated from the network. Other members seen in the cluster are: node_name | -| Node no longer isolated from network for cluster cluster_name. | This node is no longer isolated from the network. | -| Standby agent tried to promote, but primary DB is still running | The standby EFM agent tried to promote itself, but detected that the primary DB is still running on node_address. This usually indicates that the primary EFM agent has exited. Failover has NOT occurred. | -| Standby agent started to promote, but primary has rejoined. | The standby EFM agent started to promote itself, but found that a primary agent has rejoined the cluster. Failover has NOT occurred. | -| Standby agent tried to promote, but could not verify primary DB | The standby EFM agent tried to promote itself, but could not detect whether or not the primary DB is still running on node_address. Failover has NOT occurred. | -| Standby agent tried to promote, but VIP appears to still be assigned | The standby EFM agent tried to promote itself, but could not because the virtual IP address (VIP_address) appears to still be assigned to another node. Promoting under these circumstances could cause data corruption. Failover has NOT occurred. | -| Standby agent tried to promote, but appears to be orphaned | The standby EFM agent tried to promote itself, but could not because the well-known server (server_address) could not be reached. This usually indicates a network issue that has separated the standby agent from the other agents. Failover has NOT occurred. | -| Failover has not occurred | An agent has detected that the master database is no longer available in cluster cluster_name, but there are no standby nodes available for failover. | -| Potential manual failover required on cluster cluster_name | A potential failover situation was detected for cluster cluster_name. Automatic failover has been disabled for this cluster, so manual intervention is required. | -| Failover has completed on cluster cluster_name | Failover has completed on cluster cluster_name. | -| Lock file for cluster cluster_name has been removed | The lock file for cluster cluster_name has been removed from: path_name on node node_address. This lock prevents multiple agents from monitoring the same cluster on the same node. Please restore this file to prevent accidentally starting another agent for cluster. | -| A recovery file for cluster cluster_name has been found on primary node | A recovery file for cluster cluster_name has been found at: path_name on primary node node_address. This may be problematic should you attempt to restart the DB on this node. | -| recovery_target_timeline is not set to latest in recovery settings | The recovery_target_timeline parameter is not set to latest in the recovery settings. The standby server will not be able to follow a timeline change that occurs when a new primary is promoted. | -| trigger_file path given in recovery.conf is not writable | The path provided for the trigger_file parameter in the recovery.conf file is not writable by the db_service_owner user. Failover Manager will not be able to promote the database if needed. | Not available in EFM 3.10. | -| Promotion has not occurred for cluster cluster_name | A promotion was attempted but there is already a node being promoted: ip_address. | -| Standby not reconfigured after failover in cluster cluster_name | The auto.reconfigure property has been set to false for this node. The node has not been reconfigured to follow the new primary node after a failover. | -| Could not resume replay for cluster cluster_name | Could not resume replay for standby being promoted. Manual intervention may be required. Error: error_description This error is returned if the server encounters an error when invoking replay during the promotion of a standby. | -| Could not resume replay for standby standby_id | Could not resume replay for standby. Manual intervention may be required. Error: error_message. | -| Possible problem with database timeout values | Your remote.timeout value (value) is higher than your local.timeout value (value). If the local database takes too long to respond, the local agent could assume that the database has failed though other agents can connect. While this will not cause a failover, it could force the local agent to stop monitoring, leaving you without failover protection. | -| No standbys available for promotion in cluster cluster_name | The current number of standby nodes in the cluster has dropped to the minimum number: number. There cannot be a failover unless another standby node(s) is added or made promotable. | -| No promotable standby for cluster cluster_name | The current failover priority list in the cluster is empty. You have removed the only promotable standby for the cluster cluster_name. There cannot be a failover unless another promotable standby node(s) is added or made promotable by adding to failover priority list. | Available in EFM 3.9 and later. | | -| Synchronous replication has been disabled for cluster cluster_name | The number of synchronous standby nodes in the cluster has dropped below count. The primary has been taken out of synchronous replication mode. | -| Could not reload database configuration. | Could not reload database configuration. Manual intervention is required. Error: error_message. | -| Custom monitor timeout for cluster cluster_name | The following custom monitoring script has timed out: script_name | -| Custom monitor 'safe mode' failure for cluster cluster_name | The following custom monitor script has failed, but is being run in "safe mode": script_name. Output: script_results | - - - - -The conditions listed in the table below will trigger a *SEVERE* notification: - -| Subject | Description | Notes | -| -------------------------------------------------------------------------- | -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ----- | -| Standby database restarted but EFM cannot connect | The start or restart command for the database ran successfully but the database is not accepting connections. EFM will keep trying to connect for up to restart.connection.timeout seconds. | -| Unable to connect to DB on node_address | The maximum connections limit has been reached. | -| Unable to connect to DB on node_address | Invalid password for db.user=user_name. | -| Unable to connect to DB on node_address | Invalid authorization specification. | -| Master cannot ping local database for cluster cluster_name | The primary agent can no longer reach the local database running at node_address. Other nodes are able to access the database remotely, so the primary will not release the VIP and/or create a recovery.conf file. The primary agent will remain IDLE until the resume command is run to resume monitoring the database. | -| Fencing script error | Fencing script script_name failed to execute successfully. Exit Value: exit_code Results: script_results Failover has NOT occurred. | -| Post-promotion script failed | Post-promotion script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | -| Remote post-promotion script failed | Remote post-promotion script script_name failed to execute successfully Exit Value: exit_code Results: script_resultsNode: node_address | -| Remote pre-promotion script failed | Remote pre-promotion script script_name failed to execute successfully Exit Value: exit_code Results: script_resultsNode: node_address | -| Post-database failure script error | Post-database failure script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | -| Agent resumed script error | Agent resumed script script_name failed to execute successfully. Results: script_results | -| Primary isolation script failed | Primary isolation script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | -| Could not promote standby | The promote command failed on node. Could not promote standby. Error details: error_details | Description applicable to EFM 3.10. | -| Could not promote standby | The trigger file file_name could not be created on node. Could not promote standby. Error details: message_details | Description applicable to EFM 3.9 and earlier. | | -| Error creating recovery.conf file on node_address for cluster cluster_name | There was an error creating the recovery.conf file on primary node node_address during promotion. Promotion has continued, but requires manual intervention to ensure that the old primary node can not be restarted. Error details: message_details | -| An unexpected error has occurred for cluster cluster_name | An unexpected error has occurred on this node. Please check the agent log for more information. Error: error_details | -| Primary database being fenced off for cluster cluster_name | The primary database has been isolated from the majority of the cluster. The cluster is telling the primary agent at ip_address to fence off the primary database to prevent two primarys when the rest of the failover manager cluster promotes a standby. | -| Isolated primary database shutdown. | The isolated primary database has been shutdown by failover manager. | -| Primary database being fenced off for cluster cluster_name | The primary database has been isolated from the majority of the cluster. Before the primary could finish detecting isolation, a standby was promoted and has rejoined this node in the cluster. This node is isolating itself to avoid more than one primary database. | -| Could not assign VIP to node node_address | Failover manager could not assign the VIP address for some reason. | -| primary_or_standby database failure for cluster cluster_name | The database has failed on the specified node. | -| Agent is timing out for cluster cluster_name | This agent has timed out trying to reach the local database. After the timeout, the agent could successfully ping the database and has resumed monitoring. However, the node should be checked to make sure it is performing normally to prevent a possible database or agent failure. | -| Resume timed out for cluster cluster_name | This agent could not resume monitoring after reconfiguring and restarting the local database. See agent log for details. | -| Internal state mismatch for cluster cluster_name | The failover manager cluster's internal state did not match the actual state of the cluster members. This is rare and can be caused by a timing issue of nodes joining the cluster and/or changing their state. The problem should be resolved, but you should check the cluster status as well to verify. Details of the mismatch can be found in the agent log file. | -| Failover has not occurred | An agent has detected that the primary database is no longer available in cluster cluster_name, but there are no standby nodes available for failover. | -| Database in wrong state on node_address | The standby agent has detected that the local database is no longer in recovery. The agent will now become idle. Manual intervention is required. | -| Database in wrong state on node_address | The primary agent has detected that the local database is in recovery. The agent will now become idle. Manual intervention is required. | -| Database connection failure for cluster cluster_name | This node is unable to connect to the database running on: node_addressUntil this is fixed, failover may not work properly because this node will not be able to check if the database is running or not. | -| Standby custom monitor failure for cluster cluster_name | The following custom monitor script has failed on a standby node. The agent will stop monitoring the local database. Script location: script_name Script output: script_results | -| master.shutdown.as.failure set to true for master node | The master.shutdown.as.failure property has been set to true for this cluster. Stopping the primary agent without stopping the entire cluster will be treated by the rest of the cluster as an immediate primary agent failure. If maintenance is required on the primary database, shut down the primary agent and wait for a notification from the remaining nodes that failover will not happen.| -| Primary custom monitor failure for cluster cluster_name | The following custom monitor script has failed on a primary node. EFM will attempt to promote a standby. Script location: script_name Script output: script_results | -| Loopback address set for ping.server.ip | Loopback address is set for ping.server.ip property. This setting can interfere with the network isolation detection and hence it should be changed. | Available in EFM 3.10 | -| Load balancer attach script error | Load balancer attach script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | -| Load balancer detach script error | Load balancer detach script script_name failed to execute successfully. Exit Value: exit_code Results: script_results | -| Not enough synchronous standbys available in cluster cluster_name. | The number of synchronous standby nodes in the cluster has dropped to count. All write queries on the primary will be blocked until enough synchronous standby nodes are added. | Changed from *WARNING* to *SEVERE* in EFM 3.9. | diff --git a/product_docs/docs/efm/3/efm_user/11_supported_scenarios.mdx b/product_docs/docs/efm/3/efm_user/11_supported_scenarios.mdx deleted file mode 100644 index 78ba8b39740..00000000000 --- a/product_docs/docs/efm/3/efm_user/11_supported_scenarios.mdx +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: "Supported Failover and Failure Scenarios" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/supported_scenarios.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/supported_scenarios.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/supported_scenarios.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/supported_scenarios.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/supported_scenarios.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.34.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.32.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.36.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.35.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.30.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.30.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.32.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.34.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.35.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.36.html" ---- - - - -Failover Manager monitors a cluster for failures that may or may not result in failover. - -Failover Manager supports a very specific and limited set of failover scenarios. Failover can occur: - -- if the Primary database crashes or is shutdown. -- if the node hosting the Primary database crashes or becomes unreachable. - -Failover Manager makes every attempt to verify the accuracy of these conditions. If agents cannot confirm that the Primary database or node has failed, Failover Manager will not perform any failover actions on the cluster. - -Failover Manager also supports a *no* *auto*-*failover* mode for situations where you want Failover Manager to monitor and detect failover conditions, but not perform an automatic failover to a Standby. In this mode, a notification is sent to the administrator when failover conditions are met. To disable automatic failover, modify the cluster properties file, setting the [auto.failover](04_configuring_efm/01_cluster_properties/#auto_failover) parameter to false. - -Failover Manager will alert an administrator to situations that require administrator intervention, but that do not merit promoting a Standby database to Primary. - - - -## Primary Database is Down - -If the agent running on the Primary database node detects a failure of the Primary database, Failover Manager begins the process of confirming the failure. - -![Confirming the Failure of the Primary Database.](images/supported_scenarios_master_db_down.png) - -If the agent on the Primary node detects that the Primary database has failed, all agents attempt to connect directly to the Primary database. If an agent can connect to the database, Failover Manager sends a notification about the state of the Primary node. If no agent can connect, the Primary agent declares database failure and releases the VIP (if applicable). - -If no agent can reach the virtual IP address or the database server, Failover Manager starts the failover process. The Standby agent on the most up-to-date node runs a fencing script (if applicable), promotes the Standby database to Primary database, and assigns the virtual IP address to the Standby node. Any additional Standby nodes are configured to replicate from the new primary unless auto.reconfigure is set to false. If applicable, the agent runs a post-promotion script. - -**Returning the Node to the Cluster** - -To recover from this scenario without restarting the entire cluster, you should: - -1. Restart the database on the original Primary node as a Standby database. -2. Invoke the `efm resume` command on the original Primary node. - -**Returning the Node to the Role of Primary** - -After returning the node to the cluster as a Standby, you can easily return the node to the role of Primary: - -1. If the cluster has more than one Standby node, use the `efm set-priority` command to set the node's failover priority to 1. -2. Invoke the [efm promote -switchover](07_using_efm_utility/#efm_promote) command to promote the node to its original role of Primary node. - -!!! Note - - Failover Manager does not rebuild a failed primary database to become a standby. Before rebuilding, it is important to determine why the primary failed, and ensure that all the data is available on the new primary. - Once the server is ready to be reinstated as a standby, the old data directory can be removed and the server can be reinstated. For more information, refer to the PostgreSQL documentation on [Setting up a standby server](https://www.postgresql.org/docs/current/warm-standby.html#STANDBY-SERVER-SETUP). In some cases, you can also reinstate the server using [pg_rewind](https://www.postgresql.org/docs/current/app-pgrewind.html). - - - - -## Standby Database is Down - -If a Standby agent detects a failure of its database, the agent notifies the other agents; the other agents confirm the state of the database. - -![Confirming the failure of a Standby Database.](images/supported_scenarios_standby_db_down.png) - -After returning the Standby database to a healthy state, invoke the `efm resume` command to return the Standby to the cluster. - - - -## Primary Agent Exits or Node Fails - -If the Failover Manager Primary agent crashes or the node fails, a Standby agent will detect the failure and (if appropriate) initiate a failover. - -![Confirming the failure of the Primary Agent.](images/supported_scenarios_master_agent_exits.png) - -If an agent detects that the Primary agent has left, all agents attempt to connect directly to the Primary database. If any agent can connect to the database, an agent sends a notification about the failure of the Primary agent. If no agent can connect, the agents attempt to ping the virtual IP address to determine if it has been released. - -If no agent can reach the virtual IP address or the database server, Failover Manager starts the failover process. The Standby agent on the most up-to-date node runs a fencing script (if applicable), promotes the Standby database to Primary database, and assigns the virtual IP address to the Standby node; if applicable, the agent runs a post-promotion script. Any additional Standby nodes are configured to replicate from the new primary unless auto.reconfigure is set to false. - -If this scenario has occurred because the primary has been isolated from network, the Primary agent will detect the isolation and release the virtual IP address and create the recovery.conf file. Failover Manager will perform the previously listed steps on the remaining nodes of the cluster. - -To recover from this scenario without restarting the entire cluster, you should: - -1. Restart the original Primary node. -2. Bring the original Primary database up as a Standby node. -3. Start the service on the original Primary node. - -Please note that stopping an agent does not signal the cluster that the agent has failed. - - - -## Standby Agent Exits or Node Fails - -If a Standby agent exits or a Standby node fails, the other agents will detect that it is no longer connected to the cluster. - -![Failure of Standby Agent.](images/supported_scenarios_standby_agent_exits.png) - -When the failure is detected, the agents attempt to contact the database that resides on the node; if the agents confirm that there is a problem, Failover Manager sends the appropriate notification to the administrator. - -If there is only one Primary and one Standby remaining, there is no failover protection in the case of a Primary node failure. In the case of a Primary database failure, the Primary and Standby agents can agree that the database failed and proceed with failover. - - - -## Dedicated Witness Agent Exits / Node Fails - -The following scenario details the actions taken if a dedicated Witness (a node that is not hosting a database) fails. - -![Confirming the Failure of a dedicated Witness.](images/supported_scenarios_witness_agent_exits.png) - -When an agent detects that the Witness node cannot be reached, Failover Manager notifies the administrator of the state of the Witness. - -!!! Note - If the witness fails and the cluster only has two nodes, then there is no failover protection because the standby node has no way to know if the primary failed or was disconnected. In a two node cluster, if the primary database fails but the nodes are still connected, failover will still occur since the standby can confirm the condition of the primary database. - - - -## Nodes Become Isolated from the Cluster - -The following scenario details the actions taken if one or more nodes (a minority of the cluster) become isolated from the majority of the cluster. - -![If members of the cluster become isolated.](images/supported_scenarios_node_becomes_isolated.png) - -If one or more nodes (but less than half of the cluster) become isolated from the rest of the cluster, the remaining cluster behaves as if the nodes have failed. The agents attempt to discern if the Primary node is among the isolated nodes; it is, the Primary fences itself off from the cluster, while a Standby node (from within the cluster majority) is promoted to replace it. Other Standby nodes are configured to replicate from the new primary unless `auto.reconfigure` is set to `false`. - -Failover Manager then notifies an administrator, and the isolated nodes rejoin the cluster when they are able. When the nodes rejoin the cluster, the failover priority may change. diff --git a/product_docs/docs/efm/3/efm_user/12_upgrading_existing_cluster.mdx b/product_docs/docs/efm/3/efm_user/12_upgrading_existing_cluster.mdx deleted file mode 100644 index 6b1410999af..00000000000 --- a/product_docs/docs/efm/3/efm_user/12_upgrading_existing_cluster.mdx +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: "Upgrading an Existing Cluster" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/upgrading_existing_cluster.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/upgrading_existing_cluster.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/upgrading_existing_cluster.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/upgrading_existing_cluster.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/upgrading_existing_cluster.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.38.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.39.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.37.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.37.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.38.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.39.html" ---- - - - -Failover Manager provides a utility to assist you when upgrading a Failover Manager cluster. To upgrade an existing cluster, you must: - -1. Install Failover Manager 3.10 on each node of the cluster. For detailed information about installing Failover Manager, see [Installing Failover Manager](03_installing_efm/#installing_efm). -2. After installing Failover Manager, invoke the `efm upgrade-conf` utility to create the `.properties` and `.nodes` files for Failover Manager 3.10. The Failover Manager installer installs the upgrade utility ([efm upgrade-conf](07_using_efm_utility/#efm_upgrade_conf)) to the `/usr/edb/efm-3.10/bin directory`. To invoke the utility, assume root privileges, and invoke the command: - -```text -efm upgrade-conf -``` - -The efm `upgrade-conf` utility locates the `.properties` and `.nodes` files of pre-existing clusters and copies the parameter values to a new configuration file for use by Failover Manager. The utility saves the updated copy of the configuration files in the `/etc/edb/efm-3.10` directory. - -1. Modify the `.properties` and `.nodes` files for EFM 3.10, specifying any new preferences. Use your choice of editor to modify any additional properties in the properties file (located in the `/etc/edb/efm-3.10` directory) before starting the service for that node. For detailed information about property settings, see [The Cluster Properties File](04_configuring_efm/01_cluster_properties/#cluster_properties). - -!!! Note - `db.bin` is a required property. When modifying the properties file, ensure that the `db.bin` property specifies the location of the Postgres `bin` directory. - -1. Use a version-specific command to stop the old Failover Manager cluster; for example, you can use the following command to stop a version 3.10 cluster: - -```text -/usr/efm-3.10/bin/efm stop-cluster efm -``` - -1. Start the new [Failover manager service](08_controlling_efm_service/#controlling_efm_service) (`edb-efm-3.10`) on each node of the cluster. - -The following example demonstrates invoking the upgrade utility to create the `.properties` and `.nodes` files for a Failover Manager installation: - -```text -# /usr/edb/efm-3.10/bin/efm upgrade-conf efm -Checking directory /etc/edb/efm-3.9 -Processing efm.properties file -The following properties were added in addition to those in previous installed version: - external.address - update.physical.slots.period -Checking directory /etc/edb/efm-3.9 -Processing efm.nodes file -Upgrade of files is finished. The owner and group for properties and nodes files have been set as 'efm'. -``` - -If you are [using a Failover Manager configuration without sudo](04_configuring_efm/04_extending_efm_permissions/#running_efm_without_sudo), include the `-source` flag and specify the name of the directory in which the configuration files reside when invoking `upgrade-conf`. If the directory is not the configuration default directory, the upgraded files will be created in the directory from which the `upgrade-conf` command was invoked. - -**Please note**: If you are using a unit file, you must manually update the file to reflect the new Failover Manager service name when you perform an upgrade. - -## Un-installing Failover Manager - -After upgrading to Failover Manager 3.10, you can use your native package manager to remove previous installations of Failover Manager. For example, use the following command to remove Failover Manager 3.9 and any unneeded dependencies: - -- On RHEL or CentOS 7.x: - -```text -yum remove edb-efm39 -``` - -- On RHEL or Rocky Linux or AlmaLinux 8.x: - -```text -dnf remove edb-efm39 -``` - -- On Debian or Ubuntu: - -```text -apt-get remove edb-efm39 -``` - -- On SLES: - -```text -zypper remove edb-efm39 -``` - -## Performing a Database Update (Minor Version) - -This section describes how to perform a quick minor database version upgrade. You can use the steps that follow to upgrade from one minor version to another (for example, from 10.1.5 to version 10.2.7), or to apply a patch release for a version. - -You should first update the database server on each Standby node of the Failover Manager cluster. Then, perform a switchover, promoting a Standby node to the role of Primary within the Failover Manager cluster. Then, perform a database update on the old primary node. - -On each node of the cluster you must perform the following steps to update the database server: - -1. Stop the Failover Manager agent. -2. Stop the database server. -3. Update the database server. -4. Start the database service. -5. Start the Failover Manager agent. - -For detailed information about controlling the Advanced Server service, or upgrading your version of Advanced Server, please see the EDB Postgres Advanced Server Guide, available at: - -[https://www.enterprisedb.com/docs](/epas/latest/) - -When your updates are complete, you can use the [efm set-priority](07_using_efm_utility/#efm_set_priority) command to add the old primary to the front of the standby list (if needed), and then switchover to return the cluster to its original state. diff --git a/product_docs/docs/efm/3/efm_user/13_troubleshooting.mdx b/product_docs/docs/efm/3/efm_user/13_troubleshooting.mdx deleted file mode 100644 index f7393b6ace8..00000000000 --- a/product_docs/docs/efm/3/efm_user/13_troubleshooting.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: "Troubleshooting" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/troubleshooting.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/troubleshooting.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/troubleshooting.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/troubleshooting.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/troubleshooting.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.40.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.40.html" ---- - - - -**Authorization file not found. Is the local agent running?** - -If you invoke an EFM cluster management command and EFM is not running on the node, the `efm` command will display an error: - -```text -Authorization file not found. Is the local agent running? -``` - -**Not authorized to run this command. User '<os user>' is not a member of the \`efm\` group.** - -You must have special privileges to invoke some of the `efm` commands documented in [Using the efm Utility](07_using_efm_utility/#using_efm_utility). If these commands are invoked by a user who isn't authorized to run them, the `efm` command will display an error: - -```text -Not authorized to run this command. User '' is not a member of the `efm` group. -``` - -**Notification; Unexpected error message** - -If you receive a notification message about an unexpected error message, check the [Failover Manager log file](09_controlling_logging/#controlling_logging) for an `OutOfMemory` message. Failover Manager runs with the default memory value set by this property: - -```text -# Extra information that will be passed to the JVM when starting the agent. -jvm.options=-Xmx128m -``` - -If you are running with less than 128 megabytes allocated, you should increase the value and restart the Failover Manager agent. - -**Confirming the OpenJDK version** - -Failover Manager is tested with OpenJDK; we strongly recommend using OpenJDK. You can use the following command to check the type of your Java installation: - -```text -# java -version -openjdk version "1.8.0_191" -OpenJDK Runtime Environment (build 1.8.0_191-b12) -OpenJDK 64-Bit Server VM (build 25.191-b12, mixed mode) -``` diff --git a/product_docs/docs/efm/3/efm_user/14_configuring_streaming_replication.mdx b/product_docs/docs/efm/3/efm_user/14_configuring_streaming_replication.mdx deleted file mode 100644 index ef79bf0b612..00000000000 --- a/product_docs/docs/efm/3/efm_user/14_configuring_streaming_replication.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: "Configuring Streaming Replication" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/configuring_streaming_replication.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/configuring_streaming_replication.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/configuring_streaming_replication.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/configuring_streaming_replication.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/configuring_streaming_replication.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide_v3.5.1.42.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/EDB_Failover_Manager_Guide.1.42.html" ---- - - - -Configuring a replication scenario can be complex; for detailed information about configuration options, please see the PostgreSQL core documentation, available at: - - - -You may want to use a `.pgpass` file to enable md5 authentication for the replication user – this may or may not be the safest authentication method for your environment. For more information about the supported authentication options, please see the PostgreSQL core documentation at: - - - -!!! Note - From Version 3.10 onwards, EFM uses `pg_ctl` utility for standby promotion. You do not need to set the `trigger_file` or `promote_trigger_file` parameter for promotion of a standby server. - -## Limited Support for Cascading Replication - -While Failover Manager does not provide full support for cascading replication, it does provide limited support for simple failover in a cascading replication scenario. Cascading replication allows a Standby node to stream to another Standby node, reducing the number of connections (and processing overhead) to the primary node. - -![Cascading replication.](images/cascading_replication.png) - -For detailed information about configuring cascading replication, please see the PostgreSQL documentation at: - - - -To use Failover Manager in a cascading replication scenario, you should modify the cluster properties file, setting the following property values on Standby Node #2: - -```text -promotable=false -auto.reconfigure=false -``` - -In the event of a Failover, Standby Node #1 will be promoted to the role of Primary node. Should failover occur, Standby Node #2 will continue to act as a read-only replica for the new Primary node until you take actions to manually reconfigure the replication scenario to contain 3 nodes. - -In the event of a failure of Standby Node #1, you will not have failover protection, but you will receive an email notifying you of the failure of the node. - -!!! Note - Performing a switchover and switch back to the original primary may not preserve the cascading replication scenario. diff --git a/product_docs/docs/efm/3/efm_user/15_configuring_ssl_authentication.mdx b/product_docs/docs/efm/3/efm_user/15_configuring_ssl_authentication.mdx deleted file mode 100644 index ab59859ec33..00000000000 --- a/product_docs/docs/efm/3/efm_user/15_configuring_ssl_authentication.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "Configuring SSL Authentication on a Failover Manager Cluster" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/configuring_ssl_authentication.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/configuring_ssl_authentication.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/configuring_ssl_authentication.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/configuring_ssl_authentication.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/configuring_ssl_authentication.html" ---- - - - -The following steps enable SSL authentication for Failover Manager. Note that all connecting clients will be required to use SSL authentication when connecting to any database server within the cluster; you will be required to modify the connection methods currently used by existing clients. - -To enable SSL on a Failover Manager cluster, you must: - -1. Place a `server.crt` and `server.key` file in the `data` directory (under your Advanced Server installation). You can purchase a certificate signed by an authority, or create your own self-signed certificate. For information about creating a self-signed certificate, see the PostgreSQL core documentation at: - - - -2. Modify the `postgresql.conf` file on each database within the Failover Manager cluster, enabling SSL: - - ```text - ssl=on - ``` - -> After modifying the postgresql.conf file, you must restart the server. - -1. Modify the `pg_hba.conf` file on each node of the Failover Manager cluster, adding the following line to the beginning of the file: - - ```text - hostnossl all all all reject - ``` - -> The line instructs the server to reject any connections that are not using SSL authentication; this enforces SSL authentication for any connecting clients. For information about modifying the pg_hba.conf file, see the PostgreSQL core documentation at: -> -> > - -1. After placing the server.crt and server.key file in the data directory, convert the certificate to a form that Java understands; you can use the command: - - ```text - openssl x509 -in server.crt -out server.crt.der -outform der - ``` - -> For more information, visit: -> -> > - -1. Then, add the certificate to the Java trusted certificates file: - - ```text - keytool -keystore $JAVA_HOME/lib/security/cacerts -alias -import -file server.crt.der - ``` - -> Where -> -> > `$JAVA_HOME` is the home directory of your Java installation. -> > -> > <alias_name> can be any string, but must be unique for each certificate. -> > -> > You can use the `keytool` command to review a list of the available certificates or retrieve information about a specific certificate. For more information about using the keytool command, enter: -> > -> > > ```text -> > > man keytool -> > > ``` -> -> The certificate from each database server must be imported into the trusted certificates file of each agent. Note that the location of the cacerts file may vary on each system. For more information, visit: -> -> > - -1. Modify the [efm.properties file](04_configuring_efm/01_cluster_properties/#jdbc_sslmode) on each node within the cluster, setting the `jdbc.sslmode` property. diff --git a/product_docs/docs/efm/3/efm_user/images/cascading_replication.png b/product_docs/docs/efm/3/efm_user/images/cascading_replication.png deleted file mode 100644 index 9f70a4f63fd..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/cascading_replication.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a9cc1551b6cb7ea81a2d3cae4593cb13bd34477c2882cc6ca5a63597fdf2af1b -size 53120 diff --git a/product_docs/docs/efm/3/efm_user/images/cascading_replication1.png b/product_docs/docs/efm/3/efm_user/images/cascading_replication1.png deleted file mode 100644 index 2477de6eba6..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/cascading_replication1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:64acefbc9cbf3a086eec5c019e268e463357d6bb0620e7f7be7a34ffd906b49c -size 36920 diff --git a/product_docs/docs/efm/3/efm_user/images/edb_logo.png b/product_docs/docs/efm/3/efm_user/images/edb_logo.png deleted file mode 100755 index 3c3bf2a4365..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/edb_logo.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 -size 16849 diff --git a/product_docs/docs/efm/3/efm_user/images/failover_manager_overview.png b/product_docs/docs/efm/3/efm_user/images/failover_manager_overview.png deleted file mode 100644 index a15a28d3cf3..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/failover_manager_overview.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d17e3763dc0e81372a7377e6cf7578e693cfeef91e21637b85f7e4818a37a03d -size 116126 diff --git a/product_docs/docs/efm/3/efm_user/images/placeholder.png b/product_docs/docs/efm/3/efm_user/images/placeholder.png deleted file mode 100755 index 3c3bf2a4365..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/placeholder.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4e550b08552b088ef55bc9c72dcbc8ff962f6c1f69fde405abdaf98864ab3967 -size 16849 diff --git a/product_docs/docs/efm/3/efm_user/images/str_replication_dashboard_master.png b/product_docs/docs/efm/3/efm_user/images/str_replication_dashboard_master.png deleted file mode 100644 index 435cc08ba1d..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/str_replication_dashboard_master.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f4e40f70e02570dc7a8d3f1591f2311f431009719887073ed57585a593a76ac6 -size 327010 diff --git a/product_docs/docs/efm/3/efm_user/images/str_replication_dashboard_standby.png b/product_docs/docs/efm/3/efm_user/images/str_replication_dashboard_standby.png deleted file mode 100644 index c8a11e4fa42..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/str_replication_dashboard_standby.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:22525d4998a13c62071ea69c33eea474b6e773f3af6535bd8c62a2e36d906ca0 -size 337248 diff --git a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_master_agent_exits.png b/product_docs/docs/efm/3/efm_user/images/supported_scenarios_master_agent_exits.png deleted file mode 100644 index f57c544993e..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_master_agent_exits.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:216f36072c4fe21a71d5277fb2c0868f685b77ea1cb14b3092a16a8a6f3055e8 -size 217408 diff --git a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_master_db_down.png b/product_docs/docs/efm/3/efm_user/images/supported_scenarios_master_db_down.png deleted file mode 100644 index df22dc9aa92..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_master_db_down.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6a84eda4b8e16846448db35f2921da3ad6bb2b24ec5f0ebb828b1b3f0cb87fcf -size 266651 diff --git a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_node_becomes_isolated.png b/product_docs/docs/efm/3/efm_user/images/supported_scenarios_node_becomes_isolated.png deleted file mode 100644 index 269eeb1ea0f..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_node_becomes_isolated.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e5c49c7e6672c7fc897dc156d3899f49efe3cf90087cf4aa9e6e2544f88e9508 -size 148435 diff --git a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_standby_agent_exits.png b/product_docs/docs/efm/3/efm_user/images/supported_scenarios_standby_agent_exits.png deleted file mode 100644 index 1b0a90cbe14..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_standby_agent_exits.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:07ad86e12732575d5698c652d7caadccc847d3e567f9109270b918b144527cd7 -size 56094 diff --git a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_standby_db_down.png b/product_docs/docs/efm/3/efm_user/images/supported_scenarios_standby_db_down.png deleted file mode 100644 index e5ad35bae7a..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_standby_db_down.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d26fd9fe629f7acff573eddc0a7c81c697b1659345dc951dc1c24a0cc14787a1 -size 64713 diff --git a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_witness_agent_exits.png b/product_docs/docs/efm/3/efm_user/images/supported_scenarios_witness_agent_exits.png deleted file mode 100644 index 356b9a3912e..00000000000 --- a/product_docs/docs/efm/3/efm_user/images/supported_scenarios_witness_agent_exits.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9565b2be9589b2d12536820247f1b5719f63a19d72e8dbe902b3bad5ca093cbd -size 37332 diff --git a/product_docs/docs/efm/3/efm_user/index.mdx b/product_docs/docs/efm/3/efm_user/index.mdx deleted file mode 100644 index eecb3135eac..00000000000 --- a/product_docs/docs/efm/3/efm_user/index.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "EDB Failover Manager User Guide" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/conclusion.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/whats_new.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/genindex.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.8/index.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/conclusion.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/index.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/whats_new.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.9/genindex.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/genindex.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/whats_new.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/index.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.10/conclusion.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.5/toc.html" ---- - -**EDB Failover Manager** - -EDB Postgres Failover Manager (EFM) is a high-availability module from EnterpriseDB that enables a Postgres primary node to automatically failover to a Standby node in the event of a software or hardware failure on the primary. - -This guide provides information about installing, configuring and using Failover Manager . For information about the platforms and versions supported by Failover Manager , see [Platform Compatibility](https://www.enterprisedb.com/platform-compatibility#efm). - -This document uses Postgres to mean either the PostgreSQL or EDB Postgres Advanced Server database. - -
- -whats_new failover_manager_overview installing_efm configuring_efm using_efm monitoring_efm_cluster using_efm_utility controlling_efm_service controlling_logging notifications supported_scenarios upgrading_existing_cluster troubleshooting configuring_streaming_replication configuring_ssl_authentication conclusion - -
diff --git a/product_docs/docs/efm/3/index.mdx b/product_docs/docs/efm/3/index.mdx deleted file mode 100644 index e04eb42c3cd..00000000000 --- a/product_docs/docs/efm/3/index.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "EDB Postgres Failover Manager" -#productStub: true -directoryDefaults: - description: "EDB Postgres Failover Manager Version 3.10 Documentation and release notes. PostgreSQL replication and failover manager for achieving high availability." -navigation: - - efm_rel_notes - - "#Getting Started" - - 03_installing_efm - - efm_quick_start - - "#Guides" - - efm_pgpool_ha_guide - - efm_user -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/whats_new.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/index.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.6/genindex.html" - - "/edb-docs/p/edb-postgres-failover-manager/3.6" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/conclusion.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/whats_new.html" - - "/edb-docs/p/edb-postgres-failover-manager/3.7" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/index.html" - - "/edb-docs/d/edb-postgres-failover-manager/user-guides/user-guide/3.7/genindex.html" - - "/edb-docs/p/edb-postgres-failover-manager/3.8" - - "/edb-docs/p/edb-postgres-failover-manager/3.9" - - "/edb-docs/p/edb-postgres-failover-manager/3.10" - - "/edb-docs/p/edb-postgres-failover-manager/3.5" ---- - diff --git a/static/_redirects b/static/_redirects index 88780c5f7a2..8925e5bb15e 100644 --- a/static/_redirects +++ b/static/_redirects @@ -69,6 +69,7 @@ # EOL'd versions /docs/efm/3.6/* /docs/efm/latest/:splat 301 /docs/efm/3.7/* /docs/efm/latest/:splat 301 +/docs/efm/3/* /docs/efm/latest/:splat 301 # collapsed versions /docs/efm/3.8/* /docs/efm/3/:splat 301 /docs/efm/3.9/* /docs/efm/3/:splat 301 From 40a9325238bbb159acbae6de0d43b1e1d6fb2708 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Sat, 12 Mar 2022 14:28:14 -0500 Subject: [PATCH 02/12] Mongo FDW version consolidation --- .../mongo_data_adapter/5.2.8/01_whats_new.mdx | 13 - .../5.2.8/02_requirements_overview.mdx | 27 -- .../5.2.8/03_architecture_overview.mdx | 12 - .../04_installing_the_mongo_data_adapter.mdx | 341 -------------- .../05_updating_the_mongo_data_adapter.mdx | 37 -- .../5.2.8/06_features_of_mongo_fdw.mdx | 74 --- .../07_configuring_the_mongo_data_adapter.mdx | 443 ------------------ ...8_example_using_the_mongo_data_adapter.mdx | 116 ----- .../09_identifying_data_adapter_version.mdx | 22 - ...11_uninstalling_the_mongo_data_adapter.mdx | 27 -- .../docs/mongo_data_adapter/5.2.8/index.mdx | 22 - .../mongo_data_adapter/5.2.9/01_whats_new.mdx | 10 - .../02_requirements_overview.mdx | 0 .../03_architecture_overview.mdx | 0 .../04_installing_the_mongo_data_adapter.mdx | 0 .../05_updating_the_mongo_data_adapter.mdx | 0 .../06_features_of_mongo_fdw.mdx | 0 .../07_configuring_the_mongo_data_adapter.mdx | 0 ...8_example_using_the_mongo_data_adapter.mdx | 0 .../09_identifying_data_adapter_version.mdx | 0 .../{5.2.8 => 5.2}/10_limitations.mdx | 0 ...11_uninstalling_the_mongo_data_adapter.mdx | 0 .../{5.2.8 => 5.2}/images/EDB_logo.png | 0 .../ambari_administrative_interface.png | 0 .../{5.2.8 => 5.2}/images/edb_logo.svg | 0 .../images/installation_complete.png | 0 .../installation_wizard_welcome_screen.png | 0 .../images/mongo_server_with_postgres.png | 0 .../progress_as_the_servers_restart.png | 0 .../images/restart_the_server.png | 0 .../images/setup_wizard_ready.png | 0 .../specify_an_installation_directory.png | 0 ...the_installation_wizard_welcome_screen.png | 0 .../{5.3.0 => 5.2}/index.mdx | 6 +- .../5.2/mongo_rel_notes/index.mdx | 17 + .../mongo_rel_notes/mongo5.2.8_rel_notes.mdx | 25 + .../mongo_rel_notes/mongo5.2.9_rel_notes.mdx | 20 + .../5.3.0/10_limitations.mdx | 11 - .../5.3.0/images/EDB_logo.png | 3 - .../ambari_administrative_interface.png | 3 - .../5.3.0/images/edb_logo.svg | 19 - .../5.3.0/images/installation_complete.png | 3 - .../installation_wizard_welcome_screen.png | 3 - .../images/mongo_server_with_postgres.png | 3 - .../progress_as_the_servers_restart.png | 3 - .../5.3.0/images/restart_the_server.png | 3 - .../5.3.0/images/setup_wizard_ready.png | 3 - .../specify_an_installation_directory.png | 3 - ...the_installation_wizard_welcome_screen.png | 3 - .../{5.3.0 => 5.3}/01_5.3.0_rel_notes.mdx | 0 .../02_requirements_overview.mdx | 0 .../03_architecture_overview.mdx | 0 .../01_mongo_rhel8_x86.mdx | 0 .../02_mongo_other_linux8_x86.mdx | 0 .../03_mongo_rhel7_x86.mdx | 0 .../04_mongo_centos7_x86.mdx | 0 .../05_mongo_sles15_x86.mdx | 0 .../07_mongo_sles12_x86.mdx | 0 .../09_mongo_ubuntu20_deb10_x86.mdx | 0 .../11_mongo_ubuntu18_deb9_x86.mdx | 0 .../13_mongo_rhel8_ppcle.mdx | 0 .../15_mongo_rhel7_ppcle.mdx | 0 .../17_mongo_sles15_ppcle.mdx | 0 .../19_mongo_sles12_ppcle.mdx | 0 .../index.mdx | 0 .../05_updating_the_mongo_data_adapter.mdx | 0 .../06_features_of_mongo_fdw.mdx | 0 .../07_configuring_the_mongo_data_adapter.mdx | 0 ...8_example_using_the_mongo_data_adapter.mdx | 0 .../08a_example_join_pushdown.mdx | 0 .../09_identifying_data_adapter_version.mdx | 0 .../{5.2.9 => 5.3}/10_limitations.mdx | 0 ...11_uninstalling_the_mongo_data_adapter.mdx | 0 .../{5.2.9 => 5.3}/images/EDB_logo.png | 0 .../ambari_administrative_interface.png | 0 .../{5.2.9 => 5.3}/images/edb_logo.svg | 0 .../images/installation_complete.png | 0 .../installation_wizard_welcome_screen.png | 0 .../images/mongo_server_with_postgres.png | 0 .../progress_as_the_servers_restart.png | 0 .../images/restart_the_server.png | 0 .../images/setup_wizard_ready.png | 0 .../specify_an_installation_directory.png | 0 ...the_installation_wizard_welcome_screen.png | 0 .../{5.2.9 => 5.3}/index.mdx | 0 static/_redirects | 4 + 86 files changed, 68 insertions(+), 1208 deletions(-) delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/01_whats_new.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/03_architecture_overview.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/04_installing_the_mongo_data_adapter.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/05_updating_the_mongo_data_adapter.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/06_features_of_mongo_fdw.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/07_configuring_the_mongo_data_adapter.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/08_example_using_the_mongo_data_adapter.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/09_identifying_data_adapter_version.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/11_uninstalling_the_mongo_data_adapter.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/index.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.2}/02_requirements_overview.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.2}/03_architecture_overview.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.2}/04_installing_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.2}/05_updating_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.2}/06_features_of_mongo_fdw.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.2}/07_configuring_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.2}/08_example_using_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.2}/09_identifying_data_adapter_version.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/10_limitations.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.2}/11_uninstalling_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/images/EDB_logo.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/images/ambari_administrative_interface.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/images/edb_logo.svg (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/images/installation_complete.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/images/installation_wizard_welcome_screen.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/images/mongo_server_with_postgres.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/images/progress_as_the_servers_restart.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/images/restart_the_server.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/images/setup_wizard_ready.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/images/specify_an_installation_directory.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.8 => 5.2}/images/the_installation_wizard_welcome_screen.png (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.2}/index.mdx (65%) create mode 100644 product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/index.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.8_rel_notes.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.9_rel_notes.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.3.0/10_limitations.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.3.0/images/EDB_logo.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3.0/images/ambari_administrative_interface.png delete mode 100644 product_docs/docs/mongo_data_adapter/5.3.0/images/edb_logo.svg delete mode 100755 product_docs/docs/mongo_data_adapter/5.3.0/images/installation_complete.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3.0/images/installation_wizard_welcome_screen.png delete mode 100644 product_docs/docs/mongo_data_adapter/5.3.0/images/mongo_server_with_postgres.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3.0/images/progress_as_the_servers_restart.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3.0/images/restart_the_server.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3.0/images/setup_wizard_ready.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3.0/images/specify_an_installation_directory.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3.0/images/the_installation_wizard_welcome_screen.png rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/01_5.3.0_rel_notes.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/02_requirements_overview.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/03_architecture_overview.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/01_mongo_rhel8_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/02_mongo_other_linux8_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/03_mongo_rhel7_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/04_mongo_centos7_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/05_mongo_sles15_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/07_mongo_sles12_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/09_mongo_ubuntu20_deb10_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/11_mongo_ubuntu18_deb9_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/13_mongo_rhel8_ppcle.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/15_mongo_rhel7_ppcle.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/17_mongo_sles15_ppcle.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/19_mongo_sles12_ppcle.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/04_installing_the_mongo_data_adapter/index.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/05_updating_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/06_features_of_mongo_fdw.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/07_configuring_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/08_example_using_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/08a_example_join_pushdown.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/09_identifying_data_adapter_version.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/10_limitations.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3.0 => 5.3}/11_uninstalling_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/images/EDB_logo.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/images/ambari_administrative_interface.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/images/edb_logo.svg (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/images/installation_complete.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/images/installation_wizard_welcome_screen.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/images/mongo_server_with_postgres.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/images/progress_as_the_servers_restart.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/images/restart_the_server.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/images/setup_wizard_ready.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/images/specify_an_installation_directory.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/images/the_installation_wizard_welcome_screen.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2.9 => 5.3}/index.mdx (100%) diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/01_whats_new.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/01_whats_new.mdx deleted file mode 100644 index f7218af622f..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/01_whats_new.mdx +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "What’s New" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/whats_new.html" ---- - - - -The following features are added to create MongoDB Foreign Data Wrapper `5.2.8`: - -- Support for EDB Postgres Advanced Server 13. -- Support for Ubuntu 20.04 LTS platform. diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx deleted file mode 100644 index 0b47264336e..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Requirements Overview" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/requirements_overview.html" ---- - -## Supported Versions - -The MongoDB Foreign Data Wrapper is certified with EDB Postgres Advanced Server 10 and above. - -## Supported Platforms - -The MongoDB Foreign Data Wrapper is supported on the following platforms: - -**Linux x86-64** - - - RHEL 8.x/7.x - - Rocky Linux/AlmaLinux 8.x - - CentOS 7.x - - OL 8.x/7.x - - Ubuntu 20.04/18.04 LTS - - Debian 10.x/9.x - -**Linux on IBM Power8/9 (LE)** - - - RHEL 7.x diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/03_architecture_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/03_architecture_overview.mdx deleted file mode 100644 index e516abd07e5..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/03_architecture_overview.mdx +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: "Architecture Overview" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/architecture_overview.html" ---- - - - -The MongoDB data wrapper provides an interface between a MongoDB server and a Postgres database. It transforms a Postgres statement (`SELECT`/`INSERT`/`DELETE`/`UPDATE`) into a query that is understood by the MongoDB database. - -![Using MongoDB FDW with Postgres](images/mongo_server_with_postgres.png) diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/04_installing_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/04_installing_the_mongo_data_adapter.mdx deleted file mode 100644 index 8f4a3b68c4d..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/04_installing_the_mongo_data_adapter.mdx +++ /dev/null @@ -1,341 +0,0 @@ ---- -title: "Installing the MongoDB Foreign Data Wrapper" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/installing_the_mongo_data_adapter.html" ---- - - - -The MongoDB Foreign Data Wrapper can be installed with an RPM package. During the installation process, the installer will satisfy software prerequisites. - - - -## Installing the MongoDB Foreign Data Wrapper using an RPM Package - -You can install the MongoDB Foreign Data Wrapper using an RPM package on the following platforms: - -- [RHEL 7](#rhel7) -- [RHEL 8](#rhel8) -- [CentOS 7](#centos7) -- [Rocky Linux/AlmaLinux 8](#centos8) - - - -### On RHEL 7 - -Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: - -Install the `epel-release` package: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` - -Enable the optional, extras, and HA repositories: - - ```text - subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" - ``` - -You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - - - -After receiving your repository credentials: - -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install `edb-as-mongo_fdw`. - -**Creating a Repository Configuration File** - -To create the repository configuration file, assume superuser privileges, and invoke the following command: - - ```text - yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. - -**Modifying the file to provide your user name and password** - -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` - -**Installing the MongoDB Foreign Data Wrapper** - -After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: - - ``` - yum install edb-as-mongo_fdw - ``` - -where `xx` is the server version number. - -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. - -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. - - - -### On RHEL 8 - -Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: - -Install the `epel-release` package: - - ```text - dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - ``` - -Enable the `codeready-builder-for-rhel-8-\*-rpms` repository: - - ```text - ARCH=$( /bin/arch ) - subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" - ``` - -You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - - - -After receiving your repository credentials: - -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install `edb-as-mongo_fdw`. - -**Creating a Repository Configuration File** - -To create the repository configuration file, assume superuser privileges, and invoke the following command: - - ```text - dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. - -**Modifying the file to provide your user name and password** - -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` - -**Installing the MongoDB Foreign Data Wrapper** - -After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: - - ```text - dnf install edb-as-mongo_fdw - ``` - -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. - -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. - - - -### On CentOS 7 - -Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: - -Install the `epel-release` package: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` - -!!! Note - You may need to enable the `[extras]` repository definition in the `CentOS-Base.repo` file (located in `/etc/yum.repos.d`). - -You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - - - -After receiving your repository credentials you can: - -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install `edb-as-mongo_fdw`. - -**Creating a Repository Configuration File** - -To create the repository configuration file, assume superuser privileges, and invoke the following command: - - ```text - yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. - -**Modifying the file to provide your user name and password** - -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` - -**Installing the MongoDB Foreign Data Wrapper** - -After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: - - ```text - yum install edb-as-mongo_fdw - ``` - -where `xx` is the server version number. - -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. - -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. - - - -### On Rocky Linux/AlmaLinux 8 - -Before installing the MongoDB Foreign Data Wrapper, you must install the following prerequisite packages, and request credentials from EDB: - -Install the `epel-release` package: - - ```text - dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - ``` - -Enable the `PowerTools` repository: - - ```text - dnf config-manager --set-enabled PowerTools - ``` - -You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - - - -After receiving your repository credentials: - -1. Create the repository configuration file. -2. Modify the file, providing your user name and password. -3. Install `edb-as-mongo_fdw`. - -**Creating a Repository Configuration File** - -To create the repository configuration file, assume superuser privileges, and invoke the following command: - - ```text - dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. - -**Modifying the file to provide your user name and password** - -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` - -**Installing the MongoDB Foreign Data Wrapper** - -After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: - - ```text - dnf install edb-as-mongo_fdw - ``` - -where `xx` is the server version number. - -When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. - -During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. - -## Installing the MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host - -To install the MongoDB Foreign Data Wrapper on a Debian or Ubuntu host, you must have credentials that allow access to the EDB repository. To request credentials for the repository, visit the [EDB website](https://www.enterprisedb.com/repository-access-request/). - -The following steps will walk you through using the EDB apt repository to install a Debian package. When using the commands, replace the `username` and `password` with the credentials provided by EDB. - -1. Assume superuser privileges: - - ```text - sudo su – - ``` - -2. Configure the EDB repository: - - On Debian 9 and Ubuntu: - - ```text - sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` - - On Debian 10: - - 1. Set up the EDB repository: - - ```text - sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` - - 1. Substitute your EDB credentials for the `username` and `password` in the following command: - - ```text - sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' - ``` - -3. Add support to your system for secure APT repositories: - - ```text - apt-get install apt-transport-https - ``` - -4. Add the EDB signing key: - - ```text - wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - - ``` - -5. Update the repository metadata: - - ```text - apt-get update - ``` - -6. Install the Debian package: - - ```text - apt-get install edb-as-mongo-fdw - ``` - -where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/05_updating_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/05_updating_the_mongo_data_adapter.mdx deleted file mode 100644 index 9245f7f3e7f..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/05_updating_the_mongo_data_adapter.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Updating the MongoDB Foreign Data Wrapper" ---- - - - -**Updating an RPM Installation** - -If you have an existing RPM installation of MongoDB Foreign Data Wrapper, you can use yum or dnf to upgrade your repository configuration file and update to a more recent product version. To update the `edb.repo` file, assume superuser privileges and enter: - -- On RHEL or CentOS 7: - - > `yum upgrade edb-repo` - -- On RHEL or Rocky Linux or AlmaLinux 8: - - > `dnf upgrade edb-repo` - -yum or dnf will update the `edb.repo` file to enable access to the current EDB repository, configured to connect with the credentials specified in your `edb.repo` file. Then, you can use yum or dnf to upgrade any installed packages: - -- On RHEL or CentOS 7: - - > `yum upgrade edb-as-mongo_fdw` - -- On RHEL or Rocky Linux or AlmaLinux 8: - - > `dnf upgrade edb-as-mongo_fdw` - - where `xx` is the server version number. - -**Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** - -To update MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host, use the following command: - -> `apt-get --only-upgrade install edb-as-mongo-fdw edb-libmongoc` -> -> where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/06_features_of_mongo_fdw.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/06_features_of_mongo_fdw.mdx deleted file mode 100644 index 9e8e4cf4fb8..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/06_features_of_mongo_fdw.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: "Features of the MongoDB Foreign Data Wrapper" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/features_of_mongo_fdw.html" ---- - - - -The key features of the MongoDB Foreign Data Wrapper are listed below: - -## Writable FDW - -The MongoDB Foreign Data Wrapper allows you to modify data on a MongoDB server. Users can `INSERT`, `UPDATE` and `DELETE` data in the remote MongoDB collections by inserting, updating and deleting data locally in foreign tables. See also: - -[Example: Using the MongoDB Foreign Data Wrapper](08_example_using_the_mongo_data_adapter/#example_using_the_mongo_data_adapter) - -[Data Type Mappings](07_configuring_the_mongo_data_adapter/#data-type-mappings) - -## Where Clause Push-down - -MongoDB Foreign Data Wrapper allows the push-down of `WHERE` clause only when clauses include comparison expressions that have a column and a constant as arguments. WHERE clause push-down is not supported where constant is an array. - -## Connection Pooling - -Mongo_FDW establishes a connection to a foreign server during the first query that uses a foreign table associated with the foreign server. This connection is kept and reused for subsequent queries in the same session. - -## Automated Cleanup - -The MongoDB Foreign Data Wrapper allows the cleanup of foreign tables in a single operation using the `DROP EXTENSION` command. This feature is especially useful when a foreign table has been created for a temporary purpose. The syntax of a `DROP EXTENSION` command is: - -> `DROP EXTENSION mongo_fdw CASCADE;` - -For more information, see [DROP EXTENSION](https://www.postgresql.org/docs/current/sql-dropextension.html). - -## Full Document Retrieval - -This feature allows to retrieve documents along with all their fields from collection without any knowledge of the fields in BSON document available in MongoDB's collection. Those retrieved documents are in the JSON format. - -You can retrieve all available fields in a collection residing in MongoDB Foreign Data Wrapper as explained in the following example: - -**Example**: - -The collection in MongoDB Foreign Data Wrapper: - -```text -> db.warehouse.find(); -{ "_id" : ObjectId("58a1ebbaf543ec0b90545859"), "warehouse_id" : 1, "warehouse_name" : "UPS", "warehouse_created" : ISODate("2014-12-12T07:12:10Z") } -{ "_id" : ObjectId("58a1ebbaf543ec0b9054585a"), "warehouse_id" : 2, "warehouse_name" : "Laptop", "warehouse_created" : ISODate("2015-11-11T08:13:10Z") } -``` - -Steps for retrieving the document: - -1. Create foreign table with a column name `__doc`. The type of the column could be json, jsonb, text or varchar. - -```text -CREATE FOREIGN TABLE test_json(__doc json) SERVER mongo_server OPTIONS (database 'testdb', collection 'warehouse'); -``` - -1. Retrieve the document. - -```text -SELECT * FROM test_json ORDER BY __doc::text COLLATE "C"; -``` - -The output: - -```text -edb=#SELECT * FROM test_json ORDER BY __doc::text COLLATE "C"; - __doc --------------------------------------------------------------------------------------------------------------------------------------------------------- -{ "_id" : { "$oid" : "58a1ebbaf543ec0b90545859" }, "warehouse_id" : 1, "warehouse_name" : "UPS", "warehouse_created" : { "$date" : 1418368330000 } } -{ "_id" : { "$oid" : "58a1ebbaf543ec0b9054585a" }, "warehouse_id" : 2, "warehouse_name" : "Laptop", "warehouse_created" : { "$date" : 1447229590000 } } -(2 rows) -``` diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/07_configuring_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/07_configuring_the_mongo_data_adapter.mdx deleted file mode 100644 index 000781a74b1..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/07_configuring_the_mongo_data_adapter.mdx +++ /dev/null @@ -1,443 +0,0 @@ ---- -title: "Configuring the MongoDB Foreign Data Wrapper" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/configuring_the_mongo_data_adapter.html" ---- - - - -Before using the MongoDB Foreign Data Wrapper, you must: - -> 1. Use the [CREATE EXTENSION](#create-extension) command to create the MongoDB Foreign Data Wrapper extension on the Postgres host. -> 2. Use the [CREATE SERVER](#create-server) command to define a connection to the MongoDB server. -> 3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. -> 4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Postgres database that corresponds to a database that resides on the MongoDB cluster. - - - -## CREATE EXTENSION - -Use the `CREATE EXTENSION` command to create the `mongo_fdw` extension. To invoke the command, use your client of choice (for example, psql) to connect to the Postgres database from which you will be querying the MongoDB server, and invoke the command: - -```text -CREATE EXTENSION [IF NOT EXISTS] mongo_fdw [WITH] [SCHEMA schema_name]; -``` - -**Parameters** - -`IF NOT EXISTS` - -> Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. - -`schema_name` - -> Optionally specify the name of the schema in which to install the extension's objects. - -**Example** - -The following command installs the MongoDB foreign data wrapper: - -> `CREATE EXTENSION mongo_fdw;` - -For more information about using the foreign data wrapper `CREATE EXTENSION` command, see: - -> . - - - -## CREATE SERVER - -Use the `CREATE SERVER` command to define a connection to a foreign server. The syntax is: - -```text -CREATE SERVER server_name FOREIGN DATA WRAPPER mongo_fdw - [OPTIONS (option 'value' [, ...])] -``` - -The role that defines the server is the owner of the server; use the `ALTER SERVER` command to reassign ownership of a foreign server. To create a foreign server, you must have `USAGE` privilege on the foreign-data wrapper specified in the `CREATE SERVER` command. - -**Parameters** - -`server_name` - -> Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. - -`FOREIGN_DATA_WRAPPER` - -> Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `mongo_fdw` foreign data wrapper when connecting to the cluster. - -`OPTIONS` - -> Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server object. You can include: - -| **Option** | **Description** | -| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| address | The address or hostname of the Mongo server. The default value is `127.0.0.1`. | -| port | The port number of the Mongo Server. Valid range is 0 to 65535. The default value is `27017`. | -| authentication_database | The database against which user will be authenticated. This option is only valid with password based authentication. | -| ssl | Requests an authenticated, encrypted SSL connection. By default, the value is set to `false`. Set the value to `true` to enable ssl. See to understand the options. | -| pem_file | SSL option | -| pem_pwd | SSL option. | -| ca_file | SSL option | -| ca_dir | SSL option | -| crl_file | SSL option | -| weak_cert_validation | SSL option | - -**Example** - -The following command creates a foreign server named `mongo_server` that uses the `mongo_fdw` foreign data wrapper to connect to a host with an IP address of `127.0.0.1`: - -```text -CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw OPTIONS (host '127.0.0.1', port '27017'); -``` - -The foreign server uses the default port (`27017`) for the connection to the client on the MongoDB cluster. - -For more information about using the `CREATE SERVER` command, see: - -> - - - -## CREATE USER MAPPING - -Use the `CREATE USER MAPPING` command to define a mapping that associates a Postgres role with a foreign server: - -```text -CREATE USER MAPPING FOR role_name SERVER server_name - [OPTIONS (option 'value' [, ...])]; -``` - -You must be the owner of the foreign server to create a user mapping for that server. - -**Parameters** - -`role_name` - -> Use `role_name` to specify the role that will be associated with the foreign server. - -`server_name` - -> Use `server_name` to specify the name of the server that defines a connection to the MongoDB cluster. - -`OPTIONS` - -> Use the `OPTIONS` clause to specify connection information for the foreign server. -> -> `username`: the name of the user on the MongoDB server. -> -> `password`: the password associated with the username. - -**Example** - -The following command creates a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mongo_server`: - -> `CREATE USER MAPPING FOR enterprisedb SERVER mongo_server;` - -If the database host uses secure authentication, provide connection credentials when creating the user mapping: - -```text -CREATE USER MAPPING FOR enterprisedb SERVER mongo_server OPTIONS (username 'mongo_user', password 'mongo_pass'); -``` - -The command creates a user mapping for a role named `enterprisedb` that is associated with a server named `mongo_server`. When connecting to the MongoDB server, the server will authenticate as `mongo_user`, and provide a password of `mongo_pass`. - -For detailed information about the `CREATE USER MAPPING` command, see: - -> - - - -## CREATE FOREIGN TABLE - -A foreign table is a pointer to a table that resides on the MongoDB host. Before creating a foreign table definition on the Postgres server, connect to the MongoDB server and create a collection; the columns in the table will map to columns in a table on the Postgres server. Then, use the `CREATE FOREIGN TABLE` command to define a table on the Postgres server with columns that correspond to the collection that resides on the MongoDB host. The syntax is: - -```text -CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name ( [ - { column_name data_type [ OPTIONS ( option 'value' [, ... ] ) ] [ COLLATE collation ] [ column_constraint [ ... ] ] - | table_constraint } - [, ... ] -] ) -[ INHERITS ( parent_table [, ... ] ) ] - SERVER server_name [ OPTIONS ( option 'value' [, ... ] ) ] -``` - -where `column_constraint` is: - -```text -[ CONSTRAINT constraint_name ] -{ NOT NULL | NULL | CHECK (expr) [ NO INHERIT ] | DEFAULT default_expr } -``` - -and `table_constraint` is: - -```text -[ CONSTRAINT constraint_name ] CHECK (expr) [ NO INHERIT ] -``` - -**Parameters** - -`table_name` - -> Specifies the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. - -`IF NOT EXISTS` - -> Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. - -`column_name` - -> Specifies the name of a column in the new table; each column should correspond to a column described on the MongoDB server. - -`data_type` - -> Specifies the data type of the column; when possible, specify the same data type for each column on the Postgres server and the MongoDB server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the MongoDB server. If the server cannot identify a compatible data type, it will return an error. - -`COLLATE collation` - -> Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. - -`INHERITS (parent_table [, ... ])` - -> Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. - -`CONSTRAINT constraint_name` - -> Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. - -`NOT NULL` - -> Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. - -`NULL` - -> Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. - -`CHECK (expr) [NO INHERIT]` - -> Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. -> -> A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. -> -> Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. - -`DEFAULT default_expr` - -> Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. - -`SERVER server_name [OPTIONS (option 'value' [, ... ] ) ]` - -> To create a foreign table that will allow you to query a table that resides on a MongoDB file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the MongoDB data adapter. -> -> Use the `OPTIONS` clause to specify the following `options` and their corresponding values: - -| option | value | -| ---------- | --------------------------------------------------------------------------------- | -| database | The name of the database to query. The default value is `test`. | -| collection | The name of the collection to query. The default value is the foreign table name. | - -**Example** - -To use data that is stored on MongoDB server, you must create a table on the Postgres host that maps the columns of a MongoDB collection to the columns of a Postgres table. For example, for a MongoDB collection with the following definition: - -```text -db.warehouse.find -( - { - "warehouse_id" : 1 - } -).pretty() -{ - "_id" : ObjectId("53720b1904864dc1f5a571a0"), - "warehouse_id" : 1, - "warehouse_name" : "UPS", - "warehouse_created" : ISODate("2014-12-12T07:12:10Z") -} -``` - -You should execute a command on the Postgres server that creates a comparable table on the Postgres server: - -```text -CREATE FOREIGN TABLE warehouse -( - _id NAME, - warehouse_id INT, - warehouse_name TEXT, - warehouse_created TIMESTAMPZ -) -SERVER mongo_server -OPTIONS (database 'db', collection 'warehouse'); -``` - -The first column of the table must be `_id` of the type `name`. - -Include the `SERVER` clause to specify the name of the database stored on the MongoDB server and the name of the table (`warehouse`) that corresponds to the table on the Postgres server. - -For more information about using the `CREATE FOREIGN TABLE` command, see: - -> - -!!! Note - MongoDB foreign data wrapper supports the write capability feature. - - - -### Data Type Mappings - -When using the foreign data wrapper, you must create a table on the Postgres server that mirrors the table that resides on the MongoDB server. The MongoDB data wrapper will automatically convert the following MongoDB data types to the target Postgres type: - -| **MongoDB (BSON Type)** | **Postgres** | -| ---------------------------- | ---------------------------------------- | -| ARRAY | JSON | -| BOOL | BOOL | -| BINARY | BYTEA | -| DATE_TIME | DATE/TIMESTAMP/TIMESTAMPTZ | -| DOCUMENT | JSON | -| DOUBLE | FLOAT/FLOAT4/FLOAT8/DOUBLE PRECISION/NUMERIC | -| INT32 | SMALLINT/INT2/INT/INTEGER/INT4 | -| INT64 | BIGINT/INT8 | -| OID | NAME | -| UTF8 | BPCHAR/VARCHAR/CHARCTER VARYING/TEXT | - -## DROP EXTENSION - -Use the `DROP EXTENSION` command to remove an extension. To invoke the command, use your client of choice (for example, psql) to connect to the Postgres database from which you will be dropping the MongoDB server, and run the command: - -```text -DROP EXTENSION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]; -``` - -**Parameters** - -`IF EXISTS` - -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the specified name doesn't exists. - -`name` - -> Specify the name of the installed extension. It is optional. -> -> `CASCADE` -> -> Automatically drop objects that depend on the extension. It drops all the other dependent objects too. -> -> `RESTRICT` -> -> Do not allow to drop extension if any objects, other than its member objects and extensions listed in the same DROP command are dependent on it. - -**Example** - -The following command removes the extension from the existing database: - -> `DROP EXTENSION mongo_fdw;` - -For more information about using the foreign data wrapper `DROP EXTENSION` command, see: - -> . - -## DROP SERVER - -Use the `DROP SERVER` command to remove a connection to a foreign server. The syntax is: - -```text -DROP SERVER [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] -``` - -The role that drops the server is the owner of the server; use the `ALTER SERVER` command to reassign ownership of a foreign server. To drop a foreign server, you must have `USAGE` privilege on the foreign-data wrapper specified in the `DROP SERVER` command. - -**Parameters** - -`IF EXISTS` - -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if a server with the specified name doesn't exists. - -`name` - -> Specify the name of the installed server. It is optional. -> -> `CASCADE` -> -> Automatically drop objects that depend on the server. It should drop all the other dependent objects too. -> -> `RESTRICT` -> -> Do not allow to drop the server if any objects are dependent on it. - -**Example** - -The following command removes a foreign server named `mongo_server`: - -> `DROP SERVER mongo_server;` - -For more information about using the `DROP SERVER` command, see: - -> - -## DROP USER MAPPING - -Use the `DROP USER MAPPING` command to remove a mapping that associates a Postgres role with a foreign server. You must be the owner of the foreign server to remove a user mapping for that server. - -```text -DROP USER MAPPING [ IF EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } SERVER server_name; -``` - -**Parameters** - -`IF EXISTS` - -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the user mapping doesn't exist. - -`user_name` - -> Specify the user name of the mapping. - -`server_name` - -> Specify the name of the server that defines a connection to the MongoDB cluster. - -**Example** - -The following command drops a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mongo_server`: - -> `DROP USER MAPPING FOR enterprisedb SERVER mongo_server;` - -For detailed information about the `DROP USER MAPPING` command, see: - -> - -## DROP FOREIGN TABLE - -A foreign table is a pointer to a table that resides on the MongoDB host. Use the `DROP FOREIGN TABLE` command to remove a foreign table. Only the owner of the foreign table can drop it. - -```text -DROP FOREIGN TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] -``` - -**Parameters** - -`IF EXISTS` - -> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the foreign table with the specified name doesn't exists. - -`name` - -> Specify the name of the foreign table. - -`CASCADE` - -> Automatically drop objects that depend on the foreign table. It should drop all the other dependent objects too. - -`RESTRICT` - -> Do not allow to drop foreign table if any objects are dependent on it. - -**Example** - -```text -DROP FOREIGN TABLE warehouse; -``` - -For more information about using the `DROP FOREIGN TABLE` command, see: - -> diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/08_example_using_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/08_example_using_the_mongo_data_adapter.mdx deleted file mode 100644 index 35a16445406..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/08_example_using_the_mongo_data_adapter.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: "Example: Using the MongoDB Foreign Data Wrapper" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/example_using_the_mongo_data_adapter.html" ---- - - - -Before using the MongoDB foreign data wrapper, you must connect to your database with a client application. The following examples demonstrate using the wrapper with the psql client. After connecting to psql, you can follow the steps in the example below: - -```text --- load extension first time after install -CREATE EXTENSION mongo_fdw; - --- create server object -CREATE SERVER mongo_server - FOREIGN DATA WRAPPER mongo_fdw - OPTIONS (address '127.0.0.1', port '27017'); - --- create user mapping -CREATE USER MAPPING FOR enterprisedb - SERVER mongo_server - OPTIONS (username 'mongo_user', password 'mongo_pass'); - --- create foreign table -CREATE FOREIGN TABLE warehouse - ( - _id name, - warehouse_id int, - warehouse_name text, - warehouse_created timestamptz - ) - SERVER mongo_server - OPTIONS (database 'db', collection 'warehouse'); - --- Note: first column of the table must be "_id" of type "name". - --- select from table -SELECT * FROM warehouse WHERE warehouse_id = 1; - _id | warehouse_id | warehouse_name | warehouse_created ---------------------------+--------------+----------------+--------------------------- - 53720b1904864dc1f5a571a0 | 1 | UPS | 2014-12-12 12:42:10+05:30 -(1 row) - -db.warehouse.find -( - { - "warehouse_id" : 1 - } -).pretty() -{ - "_id" : ObjectId("53720b1904864dc1f5a571a0"), - "warehouse_id" : 1, - "warehouse_name" : "UPS", - "warehouse_created" : ISODate("2014-12-12T07:12:10Z") -} - --- insert row in table -INSERT INTO warehouse VALUES (0, 2, 'Laptop', '2015-11-11T08:13:10Z'); - -db.warehouse.insert -( - { - "warehouse_id" : NumberInt(2), - "warehouse_name" : "Laptop", - "warehouse_created" : ISODate("2015-11-11T08:13:10Z") - } -) - --- delete row from table -DELETE FROM warehouse WHERE warehouse_id = 2; - -db.warehouse.remove -( - { - "warehouse_id" : 2 - } -) - --- update a row of table -UPDATE warehouse SET warehouse_name = 'UPS_NEW' WHERE warehouse_id = 1; - -db.warehouse.update -( - { - "warehouse_id" : 1 - }, - { - "warehouse_id" : 1, - "warehouse_name" : "UPS_NEW", - "warehouse_created" : ISODate("2014-12-12T07:12:10Z") - } -) - --- explain a table -EXPLAIN SELECT * FROM warehouse WHERE warehouse_id = 1; - QUERY PLAN ------------------------------------------------------------------ - Foreign Scan on warehouse (cost=0.00..0.00 rows=1000 width=84) - Filter: (warehouse_id = 1) - Foreign Namespace: db.warehouse -(3 rows) - --- collect data distribution statistics -ANALYZE warehouse; - --- drop foreign table -DROP FOREIGN TABLE warehouse; - --- drop user mapping -DROP USER MAPPING FOR enterprisedb SERVER mongo_server; - --- drop server -DROP SERVER mongo_server; -``` diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/09_identifying_data_adapter_version.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/09_identifying_data_adapter_version.mdx deleted file mode 100644 index efa8210a445..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/09_identifying_data_adapter_version.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "Identifying the MongoDB Foreign Data Wrapper Version" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/identifying_data_adapter_version.html" ---- - - - -The MongoDB Foreign Data Wrapper includes a function that you can use to identify the currently installed version of the `.so` file for the data wrapper. To use the function, connect to the Postgres server, and enter: - -```text -SELECT mongo_fdw_version(); -``` - -The function returns the version number: - -```text -mongo_fdw_version ------------------ - -``` diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/11_uninstalling_the_mongo_data_adapter.mdx deleted file mode 100644 index 3d6dc8ce772..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/11_uninstalling_the_mongo_data_adapter.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Uninstalling the MongoDB Foreign Data Wrapper" ---- - - - -**Uninstalling an RPM Package** - -You can use the `yum remove` or `dnf remove` command to remove a package installed by `yum` or `dnf`. To remove a package, open a terminal window, assume superuser privileges, and enter the command: - -- On RHEL or CentOS 7: - - `yum remove edb-as-mongo_fdw` - -- On RHEL or Rocky Linux or AlmaLinux 8: - - `dnf remove edb-as-mongo_fdw` - -Where `xx` is the server version number. - -**Uninstalling MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** - -- To uninstall MongoDB Foreign Data Wrapper on a Debian or Ubuntu host, invoke the following command. - - `apt-get remove edb-as-mongo-fdw` - -Where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/index.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/index.mdx deleted file mode 100644 index 8686be5294c..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/index.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "MongoDB Foreign Data Wrapper Guide" -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/index.html" - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/conclusion.html" - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/whats_new.html" - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/genindex.html" - - "/edb-docs/p/edb-postgres-mongodb-data-adapter/5.2.8" ---- - -The MongoDB Foreign Data Wrapper (`mongo_fdw`) is a Postgres extension that allows you to access data that resides on a MongoDB database from EDB Postgres Advanced Server. It is a writable foreign data wrapper that you can use with Postgres functions and utilities, or in conjunction with other data that resides on a Postgres host. - -The MongoDB Foreign Data Wrapper can be installed with an RPM package. You can download an installer from the [EDB website](https://www.enterprisedb.com/software-downloads-postgres/). - -This guide uses the term `Postgres` to refer to an instance of EDB Postgres Advanced Server. - -
- -whats_new requirements_overview architecture_overview installing_the_mongo_data_adapter updating_the_mongo_data_adapter features_of_mongo_fdw configuring_the_mongo_data_adapter example_using_the_mongo_data_adapter identifying_data_adapter_version limitations uninstalling_the_mongo_data_adapter conclusion - -
diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx b/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx deleted file mode 100644 index 3ba10216617..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.9/01_whats_new.mdx +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: "What’s New" ---- - - - -The following features are added to create MongoDB Foreign Data Wrapper `5.2.9`: - -- Updated mongo-c-driver to 1.17.3 -- Updated json-c to 0.15 diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/02_requirements_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2/02_requirements_overview.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/02_requirements_overview.mdx rename to product_docs/docs/mongo_data_adapter/5.2/02_requirements_overview.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/03_architecture_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2/03_architecture_overview.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/03_architecture_overview.mdx rename to product_docs/docs/mongo_data_adapter/5.2/03_architecture_overview.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2/04_installing_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/04_installing_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5.2/04_installing_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2/05_updating_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/05_updating_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5.2/05_updating_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/06_features_of_mongo_fdw.mdx b/product_docs/docs/mongo_data_adapter/5.2/06_features_of_mongo_fdw.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/06_features_of_mongo_fdw.mdx rename to product_docs/docs/mongo_data_adapter/5.2/06_features_of_mongo_fdw.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/07_configuring_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2/07_configuring_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/07_configuring_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5.2/07_configuring_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/08_example_using_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2/08_example_using_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/08_example_using_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5.2/08_example_using_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/09_identifying_data_adapter_version.mdx b/product_docs/docs/mongo_data_adapter/5.2/09_identifying_data_adapter_version.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/09_identifying_data_adapter_version.mdx rename to product_docs/docs/mongo_data_adapter/5.2/09_identifying_data_adapter_version.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/10_limitations.mdx b/product_docs/docs/mongo_data_adapter/5.2/10_limitations.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/10_limitations.mdx rename to product_docs/docs/mongo_data_adapter/5.2/10_limitations.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2/11_uninstalling_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/11_uninstalling_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5.2/11_uninstalling_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/images/EDB_logo.png b/product_docs/docs/mongo_data_adapter/5.2/images/EDB_logo.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/images/EDB_logo.png rename to product_docs/docs/mongo_data_adapter/5.2/images/EDB_logo.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/images/ambari_administrative_interface.png b/product_docs/docs/mongo_data_adapter/5.2/images/ambari_administrative_interface.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/images/ambari_administrative_interface.png rename to product_docs/docs/mongo_data_adapter/5.2/images/ambari_administrative_interface.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/images/edb_logo.svg b/product_docs/docs/mongo_data_adapter/5.2/images/edb_logo.svg similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/images/edb_logo.svg rename to product_docs/docs/mongo_data_adapter/5.2/images/edb_logo.svg diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/images/installation_complete.png b/product_docs/docs/mongo_data_adapter/5.2/images/installation_complete.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/images/installation_complete.png rename to product_docs/docs/mongo_data_adapter/5.2/images/installation_complete.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/images/installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5.2/images/installation_wizard_welcome_screen.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/images/installation_wizard_welcome_screen.png rename to product_docs/docs/mongo_data_adapter/5.2/images/installation_wizard_welcome_screen.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/images/mongo_server_with_postgres.png b/product_docs/docs/mongo_data_adapter/5.2/images/mongo_server_with_postgres.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/images/mongo_server_with_postgres.png rename to product_docs/docs/mongo_data_adapter/5.2/images/mongo_server_with_postgres.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/images/progress_as_the_servers_restart.png b/product_docs/docs/mongo_data_adapter/5.2/images/progress_as_the_servers_restart.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/images/progress_as_the_servers_restart.png rename to product_docs/docs/mongo_data_adapter/5.2/images/progress_as_the_servers_restart.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/images/restart_the_server.png b/product_docs/docs/mongo_data_adapter/5.2/images/restart_the_server.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/images/restart_the_server.png rename to product_docs/docs/mongo_data_adapter/5.2/images/restart_the_server.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/images/setup_wizard_ready.png b/product_docs/docs/mongo_data_adapter/5.2/images/setup_wizard_ready.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/images/setup_wizard_ready.png rename to product_docs/docs/mongo_data_adapter/5.2/images/setup_wizard_ready.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/images/specify_an_installation_directory.png b/product_docs/docs/mongo_data_adapter/5.2/images/specify_an_installation_directory.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/images/specify_an_installation_directory.png rename to product_docs/docs/mongo_data_adapter/5.2/images/specify_an_installation_directory.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/images/the_installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5.2/images/the_installation_wizard_welcome_screen.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.8/images/the_installation_wizard_welcome_screen.png rename to product_docs/docs/mongo_data_adapter/5.2/images/the_installation_wizard_welcome_screen.png diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/index.mdx b/product_docs/docs/mongo_data_adapter/5.2/index.mdx similarity index 65% rename from product_docs/docs/mongo_data_adapter/5.3.0/index.mdx rename to product_docs/docs/mongo_data_adapter/5.2/index.mdx index 640c3591f3f..1d4674e69a0 100644 --- a/product_docs/docs/mongo_data_adapter/5.3.0/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2/index.mdx @@ -1,5 +1,7 @@ --- title: "MongoDB Foreign Data Wrapper Guide" +navigation: +- mongo_rel_notes --- The MongoDB Foreign Data Wrapper (`mongo_fdw`) is a Postgres extension that allows you to access data that resides on a MongoDB database from EDB Postgres Advanced Server. It is a writable foreign data wrapper that you can use with Postgres functions and utilities, or in conjunction with other data that resides on a Postgres host. @@ -8,8 +10,4 @@ The MongoDB Foreign Data Wrapper can be installed with an RPM package. You can d This guide uses the term `Postgres` to refer to an instance of EDB Postgres Advanced Server. -
-whats_new requirements_overview architecture_overview installing_the_mongo_data_adapter updating_the_mongo_data_adapter features_of_mongo_fdw configuring_the_mongo_data_adapter example_using_the_mongo_data_adapter identifying_data_adapter_version limitations uninstalling_the_mongo_data_adapter conclusion - -
diff --git a/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/index.mdx b/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/index.mdx new file mode 100644 index 00000000000..72f85025012 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/index.mdx @@ -0,0 +1,17 @@ +--- +title: "Release notes" +redirects: + - ../01_whats_new/ +navigation: +- mongo5.2.9_rel_notes +- mongo5.2.8_rel_notes +--- + +The Mongo Foreign Data Wrapper documentation describes the latest version of Foreign Data Wrapper 5.2 including minor releases and patches. The release notes in this section provide information on what was new in each release. For new functionality introduced in a minor or patch release, there are also indicators within the content about what release introduced the feature. + +| Version | Release Date | +| ----------------------------- | ------------ | +| [5.2.9](mongo5.2.9_rel_notes) | 2021 Jun 24 | +| [5.2.8](mongo5.2.8_rel_notes) | 2020 Nov 23 | + + diff --git a/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.8_rel_notes.mdx b/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.8_rel_notes.mdx new file mode 100644 index 00000000000..cc69d8943c4 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.8_rel_notes.mdx @@ -0,0 +1,25 @@ +--- +title: "Version 5.2.8" +--- + +New features, enhancements, bug fixes, and other changes in Mongo Foreign Data Wrapper 5.2.8 include: + +| Type | Description | +| ----------- |--------------------------------------------- | +| Enhancement | Support for EDB Postgres Advanced Server 13. | +| Enhancement | Support for Ubuntu 20.04 LTS platform. | +| Enhancement | Updated LICENSE file. | +| Bug Fix | Fixed crash with COPY FROM and/or foreign partition routing operations. The crash was caused by Mongo Foreign Data Wrapper not supporting routable foreign-table partitions and/or executing COPY FROM on foreign tables. Instead of crashing, Mongo Foreign Data Wrapper now throws an error. | +| Bug Fix | Fixed issue where casting target list produces 'NULL'. Correct results are returned not only for have an explicit casts, but also for function calls or operators in the target list. | +| Bug Fix | Fixed ReScanForeignScan API to make the parameterized query work correctly. Sub-select or correlated queries now use a parameterized plan. | +| Bug Fix | Changed the server port option's type from int32 to int16 to resolve compilation warnings. Meta driver APIs expect port value in unsigned short type, which resulted in a compilation warning on some gcc versions. | + + + + + + + + + + diff --git a/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.9_rel_notes.mdx b/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.9_rel_notes.mdx new file mode 100644 index 00000000000..48bde0205e0 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.9_rel_notes.mdx @@ -0,0 +1,20 @@ +--- +title: "Version 5.2.9" +--- + +New features, enhancements, bug fixes, and other changes in Mongo Foreign Data Wrapper 5.2.9 include: + +| Type | Description | +| ----------- |--------------------------------------------- | +| Enhancement | Updated mongo-c-driver to 1.17.3. | +| Enhancement | Updated json-c to 0.15. | +| Enhancement | Updated LICENSE file. | +| Bug Fix | Fixed crash with the queries involving LEFT JOIN LATERAL. | +| Bug Fix | Restrict fetching PostgreSQL-specific system attributes from the remote relation to avoid a server crash. | +| Bug Fix | Improved WHERE pushdown so that more conditions can be sent to the remote server. | + + + + + + diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/10_limitations.mdx b/product_docs/docs/mongo_data_adapter/5.3.0/10_limitations.mdx deleted file mode 100644 index acdd2f2383c..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/10_limitations.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "Limitations" ---- - - - -The following limitations apply to MongoDB Foreign Data Wrapper: - -- If the BSON document key contains uppercase letters or occurs within a nested document, MongoDB Foreign Data Wrapper requires the corresponding column names to be declared in double quotes. -- PostgreSQL limits column names to 63 characters by default. You can increase the `NAMEDATALEN` constant in `src/include/pg_config_manual.h`, compile, and re-install when column names extend beyond 63 characters. -- MongoDB Foreign Data Wrapper errors out on BSON field which is not listed in the known types (For example: byte, arrays). It throws an error: `Cannot convert BSON type to column type`. diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/images/EDB_logo.png b/product_docs/docs/mongo_data_adapter/5.3.0/images/EDB_logo.png deleted file mode 100644 index f4a93cf57f5..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/images/EDB_logo.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:07423b012a855204780fe5a2a5a1e33607304a5c3020ae4acbf3d575691dedd6 -size 12136 diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/images/ambari_administrative_interface.png b/product_docs/docs/mongo_data_adapter/5.3.0/images/ambari_administrative_interface.png deleted file mode 100755 index d44e42a740e..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/images/ambari_administrative_interface.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b4acb08665b6a1df9494f91f9ab64a8f4d0979f61947e19162f419d134e351ea -size 150222 diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/images/edb_logo.svg b/product_docs/docs/mongo_data_adapter/5.3.0/images/edb_logo.svg deleted file mode 100644 index f24d1dfefee..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/images/edb_logo.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - edb-logo-disc-dark - - - - \ No newline at end of file diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/images/installation_complete.png b/product_docs/docs/mongo_data_adapter/5.3.0/images/installation_complete.png deleted file mode 100755 index 311d632a71e..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/images/installation_complete.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e52a4437577b7a64d7f36c4f837b9a0fab90b163b201055bd817f0e3cbaf112a -size 39463 diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/images/installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5.3.0/images/installation_wizard_welcome_screen.png deleted file mode 100755 index aaf582bc781..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/images/installation_wizard_welcome_screen.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:85ea24919ac97d6f8ebb882da665c22e4d5c0942b8491faa5e07be8b93007b60 -size 38341 diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/images/mongo_server_with_postgres.png b/product_docs/docs/mongo_data_adapter/5.3.0/images/mongo_server_with_postgres.png deleted file mode 100644 index 76915580c4c..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/images/mongo_server_with_postgres.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:480cdbe86e1f31a6fd03d26a86425a25d681e515e747217c0c3961cb0a36027c -size 49128 diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/images/progress_as_the_servers_restart.png b/product_docs/docs/mongo_data_adapter/5.3.0/images/progress_as_the_servers_restart.png deleted file mode 100755 index 43523c7d1ad..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/images/progress_as_the_servers_restart.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:46a0feaf37642c3aa87fe8267259687dfa9c9571f1c2663297159ef98356e2fd -size 85080 diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/images/restart_the_server.png b/product_docs/docs/mongo_data_adapter/5.3.0/images/restart_the_server.png deleted file mode 100755 index 2518b46d46d..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/images/restart_the_server.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9e612201379d56b4dffcfb4222ceb765532ca5d097504c1dbabdc6a812afaba9 -size 33996 diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/images/setup_wizard_ready.png b/product_docs/docs/mongo_data_adapter/5.3.0/images/setup_wizard_ready.png deleted file mode 100755 index 922e318868d..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/images/setup_wizard_ready.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3ba6a1a88fe8a91b94571b57a36077fce7b3346e850a38f9bf015166ace93e36 -size 16833 diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/images/specify_an_installation_directory.png b/product_docs/docs/mongo_data_adapter/5.3.0/images/specify_an_installation_directory.png deleted file mode 100755 index 208c85c46af..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/images/specify_an_installation_directory.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dae28ab7f567617da49816514a3fa5eb6161e611c416295cfe2f829cd941f98e -size 20596 diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/images/the_installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5.3.0/images/the_installation_wizard_welcome_screen.png deleted file mode 100755 index 2da19033b0e..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3.0/images/the_installation_wizard_welcome_screen.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7fd52b490dd37c86dca15975a7dbc9bdd47c7ae4ab0912d1bf570d785c521f79 -size 33097 diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/01_5.3.0_rel_notes.mdx b/product_docs/docs/mongo_data_adapter/5.3/01_5.3.0_rel_notes.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/01_5.3.0_rel_notes.mdx rename to product_docs/docs/mongo_data_adapter/5.3/01_5.3.0_rel_notes.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/02_requirements_overview.mdx b/product_docs/docs/mongo_data_adapter/5.3/02_requirements_overview.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/02_requirements_overview.mdx rename to product_docs/docs/mongo_data_adapter/5.3/02_requirements_overview.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/03_architecture_overview.mdx b/product_docs/docs/mongo_data_adapter/5.3/03_architecture_overview.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/03_architecture_overview.mdx rename to product_docs/docs/mongo_data_adapter/5.3/03_architecture_overview.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/01_mongo_rhel8_x86.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/01_mongo_rhel8_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/01_mongo_rhel8_x86.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/01_mongo_rhel8_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/02_mongo_other_linux8_x86.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/02_mongo_other_linux8_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/02_mongo_other_linux8_x86.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/02_mongo_other_linux8_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/03_mongo_rhel7_x86.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/03_mongo_rhel7_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/03_mongo_rhel7_x86.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/03_mongo_rhel7_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/04_mongo_centos7_x86.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/04_mongo_centos7_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/04_mongo_centos7_x86.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/04_mongo_centos7_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/05_mongo_sles15_x86.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/05_mongo_sles15_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/05_mongo_sles15_x86.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/05_mongo_sles15_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/07_mongo_sles12_x86.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/07_mongo_sles12_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/07_mongo_sles12_x86.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/07_mongo_sles12_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/09_mongo_ubuntu20_deb10_x86.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/09_mongo_ubuntu20_deb10_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/09_mongo_ubuntu20_deb10_x86.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/09_mongo_ubuntu20_deb10_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/11_mongo_ubuntu18_deb9_x86.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/11_mongo_ubuntu18_deb9_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/11_mongo_ubuntu18_deb9_x86.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/11_mongo_ubuntu18_deb9_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/13_mongo_rhel8_ppcle.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/13_mongo_rhel8_ppcle.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/13_mongo_rhel8_ppcle.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/13_mongo_rhel8_ppcle.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/15_mongo_rhel7_ppcle.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/15_mongo_rhel7_ppcle.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/15_mongo_rhel7_ppcle.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/15_mongo_rhel7_ppcle.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/17_mongo_sles15_ppcle.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/17_mongo_sles15_ppcle.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/17_mongo_sles15_ppcle.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/17_mongo_sles15_ppcle.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/19_mongo_sles12_ppcle.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/19_mongo_sles12_ppcle.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/19_mongo_sles12_ppcle.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/19_mongo_sles12_ppcle.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/index.mdx b/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/index.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/04_installing_the_mongo_data_adapter/index.mdx rename to product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/index.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/05_updating_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.3/05_updating_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/05_updating_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5.3/05_updating_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/06_features_of_mongo_fdw.mdx b/product_docs/docs/mongo_data_adapter/5.3/06_features_of_mongo_fdw.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/06_features_of_mongo_fdw.mdx rename to product_docs/docs/mongo_data_adapter/5.3/06_features_of_mongo_fdw.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/07_configuring_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.3/07_configuring_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/07_configuring_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5.3/07_configuring_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/08_example_using_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.3/08_example_using_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/08_example_using_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5.3/08_example_using_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/08a_example_join_pushdown.mdx b/product_docs/docs/mongo_data_adapter/5.3/08a_example_join_pushdown.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/08a_example_join_pushdown.mdx rename to product_docs/docs/mongo_data_adapter/5.3/08a_example_join_pushdown.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/09_identifying_data_adapter_version.mdx b/product_docs/docs/mongo_data_adapter/5.3/09_identifying_data_adapter_version.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/09_identifying_data_adapter_version.mdx rename to product_docs/docs/mongo_data_adapter/5.3/09_identifying_data_adapter_version.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/10_limitations.mdx b/product_docs/docs/mongo_data_adapter/5.3/10_limitations.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/10_limitations.mdx rename to product_docs/docs/mongo_data_adapter/5.3/10_limitations.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3.0/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.3/11_uninstalling_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3.0/11_uninstalling_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5.3/11_uninstalling_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/EDB_logo.png b/product_docs/docs/mongo_data_adapter/5.3/images/EDB_logo.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/images/EDB_logo.png rename to product_docs/docs/mongo_data_adapter/5.3/images/EDB_logo.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/ambari_administrative_interface.png b/product_docs/docs/mongo_data_adapter/5.3/images/ambari_administrative_interface.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/images/ambari_administrative_interface.png rename to product_docs/docs/mongo_data_adapter/5.3/images/ambari_administrative_interface.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/edb_logo.svg b/product_docs/docs/mongo_data_adapter/5.3/images/edb_logo.svg similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/images/edb_logo.svg rename to product_docs/docs/mongo_data_adapter/5.3/images/edb_logo.svg diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/installation_complete.png b/product_docs/docs/mongo_data_adapter/5.3/images/installation_complete.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/images/installation_complete.png rename to product_docs/docs/mongo_data_adapter/5.3/images/installation_complete.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5.3/images/installation_wizard_welcome_screen.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/images/installation_wizard_welcome_screen.png rename to product_docs/docs/mongo_data_adapter/5.3/images/installation_wizard_welcome_screen.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/mongo_server_with_postgres.png b/product_docs/docs/mongo_data_adapter/5.3/images/mongo_server_with_postgres.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/images/mongo_server_with_postgres.png rename to product_docs/docs/mongo_data_adapter/5.3/images/mongo_server_with_postgres.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/progress_as_the_servers_restart.png b/product_docs/docs/mongo_data_adapter/5.3/images/progress_as_the_servers_restart.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/images/progress_as_the_servers_restart.png rename to product_docs/docs/mongo_data_adapter/5.3/images/progress_as_the_servers_restart.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/restart_the_server.png b/product_docs/docs/mongo_data_adapter/5.3/images/restart_the_server.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/images/restart_the_server.png rename to product_docs/docs/mongo_data_adapter/5.3/images/restart_the_server.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/setup_wizard_ready.png b/product_docs/docs/mongo_data_adapter/5.3/images/setup_wizard_ready.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/images/setup_wizard_ready.png rename to product_docs/docs/mongo_data_adapter/5.3/images/setup_wizard_ready.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/specify_an_installation_directory.png b/product_docs/docs/mongo_data_adapter/5.3/images/specify_an_installation_directory.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/images/specify_an_installation_directory.png rename to product_docs/docs/mongo_data_adapter/5.3/images/specify_an_installation_directory.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/images/the_installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5.3/images/the_installation_wizard_welcome_screen.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/images/the_installation_wizard_welcome_screen.png rename to product_docs/docs/mongo_data_adapter/5.3/images/the_installation_wizard_welcome_screen.png diff --git a/product_docs/docs/mongo_data_adapter/5.2.9/index.mdx b/product_docs/docs/mongo_data_adapter/5.3/index.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2.9/index.mdx rename to product_docs/docs/mongo_data_adapter/5.3/index.mdx diff --git a/static/_redirects b/static/_redirects index 88780c5f7a2..4c61005828f 100644 --- a/static/_redirects +++ b/static/_redirects @@ -127,6 +127,10 @@ /docs/ocl_connector/13.1.4.1/* /docs/ocl_connector/latest/ 301 /docs/odbc_connector/12.0.0.1/* /docs/odbc_connector/latest/ 301 /docs/odbc_connector/12.2.0.1/* /docs/odbc_connector/latest/ 301 +# Collapsed versions +/docs/mongo_data_adapter/5.2.7/* /docs/mongo_data_adapter/5.2/:splat 301 +/docs/mongo_data_adapter/5.2.8/* /docs/mongo_data_adapter/5.2/:splat 301 +/docs/mongo_data_adapter/5.3.0/* /docs/mongo_data_adapter/5.3/:splat 301 # BigAnimal /docs/edbcloud/* /docs/biganimal/:splat 301 From 3c8c6bd5379f3407703a6c77e02f9dd6818bcc0e Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Mon, 14 Mar 2022 17:00:41 -0400 Subject: [PATCH 03/12] consolidated to major (5) --- .../5.2/02_requirements_overview.mdx | 24 - .../5.2/03_architecture_overview.mdx | 9 - .../04_installing_the_mongo_data_adapter.mdx | 291 ------------ .../05_updating_the_mongo_data_adapter.mdx | 45 -- .../5.2/06_features_of_mongo_fdw.mdx | 71 --- .../07_configuring_the_mongo_data_adapter.mdx | 440 ------------------ .../5.3/02_requirements_overview.mdx | 29 -- ...8_example_using_the_mongo_data_adapter.mdx | 113 ----- .../09_identifying_data_adapter_version.mdx | 19 - .../mongo_data_adapter/5.3/10_limitations.mdx | 11 - ...11_uninstalling_the_mongo_data_adapter.mdx | 27 -- .../5.3/images/EDB_logo.png | 3 - .../ambari_administrative_interface.png | 3 - .../5.3/images/edb_logo.svg | 19 - .../5.3/images/installation_complete.png | 3 - .../installation_wizard_welcome_screen.png | 3 - .../5.3/images/mongo_server_with_postgres.png | 3 - .../progress_as_the_servers_restart.png | 3 - .../5.3/images/restart_the_server.png | 3 - .../5.3/images/setup_wizard_ready.png | 3 - .../specify_an_installation_directory.png | 3 - ...the_installation_wizard_welcome_screen.png | 3 - .../docs/mongo_data_adapter/5.3/index.mdx | 15 - .../5/02_requirements_overview.mdx | 14 + .../{5.3 => 5}/03_architecture_overview.mdx | 0 .../01_mongo_rhel8_x86.mdx | 0 .../02_mongo_other_linux8_x86.mdx | 0 .../03_mongo_rhel7_x86.mdx | 0 .../04_mongo_centos7_x86.mdx | 0 .../05_mongo_sles15_x86.mdx | 0 .../07_mongo_sles12_x86.mdx | 0 .../09_mongo_ubuntu20_deb10_x86.mdx | 0 .../11_mongo_ubuntu18_deb9_x86.mdx | 0 .../13_mongo_rhel8_ppcle.mdx | 0 .../15_mongo_rhel7_ppcle.mdx | 0 .../17_mongo_sles15_ppcle.mdx | 0 .../19_mongo_sles12_ppcle.mdx | 0 .../index.mdx | 0 .../05_updating_the_mongo_data_adapter.mdx | 0 .../{5.3 => 5}/06_features_of_mongo_fdw.mdx | 0 .../07_configuring_the_mongo_data_adapter.mdx | 0 ...8_example_using_the_mongo_data_adapter.mdx | 0 .../{5.3 => 5}/08a_example_join_pushdown.mdx | 0 .../09_identifying_data_adapter_version.mdx | 0 .../{5.2 => 5}/10_limitations.mdx | 0 ...11_uninstalling_the_mongo_data_adapter.mdx | 0 .../{5.2 => 5}/images/EDB_logo.png | 0 .../ambari_administrative_interface.png | 0 .../{5.2 => 5}/images/edb_logo.svg | 0 .../images/installation_complete.png | 0 .../installation_wizard_welcome_screen.png | 0 .../images/mongo_server_with_postgres.png | 0 .../progress_as_the_servers_restart.png | 0 .../{5.2 => 5}/images/restart_the_server.png | 0 .../{5.2 => 5}/images/setup_wizard_ready.png | 0 .../specify_an_installation_directory.png | 0 ...the_installation_wizard_welcome_screen.png | 0 .../mongo_data_adapter/{5.2 => 5}/index.mdx | 2 +- .../{5.2 => 5}/mongo_rel_notes/index.mdx | 5 +- .../mongo_rel_notes/mongo5.2.8_rel_notes.mdx | 0 .../mongo_rel_notes/mongo5.2.9_rel_notes.mdx | 0 .../mongo_rel_notes/mongo5.3.0_rel_notes.mdx} | 4 +- static/_redirects | 6 +- 63 files changed, 24 insertions(+), 1150 deletions(-) delete mode 100644 product_docs/docs/mongo_data_adapter/5.2/02_requirements_overview.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2/03_architecture_overview.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2/04_installing_the_mongo_data_adapter.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2/05_updating_the_mongo_data_adapter.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2/06_features_of_mongo_fdw.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2/07_configuring_the_mongo_data_adapter.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.3/02_requirements_overview.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.3/08_example_using_the_mongo_data_adapter.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.3/09_identifying_data_adapter_version.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.3/10_limitations.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.3/11_uninstalling_the_mongo_data_adapter.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.3/images/EDB_logo.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3/images/ambari_administrative_interface.png delete mode 100644 product_docs/docs/mongo_data_adapter/5.3/images/edb_logo.svg delete mode 100755 product_docs/docs/mongo_data_adapter/5.3/images/installation_complete.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3/images/installation_wizard_welcome_screen.png delete mode 100644 product_docs/docs/mongo_data_adapter/5.3/images/mongo_server_with_postgres.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3/images/progress_as_the_servers_restart.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3/images/restart_the_server.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3/images/setup_wizard_ready.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3/images/specify_an_installation_directory.png delete mode 100755 product_docs/docs/mongo_data_adapter/5.3/images/the_installation_wizard_welcome_screen.png delete mode 100644 product_docs/docs/mongo_data_adapter/5.3/index.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5/02_requirements_overview.mdx rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/03_architecture_overview.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/01_mongo_rhel8_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/02_mongo_other_linux8_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/03_mongo_rhel7_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/04_mongo_centos7_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/05_mongo_sles15_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/07_mongo_sles12_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/09_mongo_ubuntu20_deb10_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/11_mongo_ubuntu18_deb9_x86.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/13_mongo_rhel8_ppcle.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/15_mongo_rhel7_ppcle.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/17_mongo_sles15_ppcle.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/19_mongo_sles12_ppcle.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/04_installing_the_mongo_data_adapter/index.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/05_updating_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/06_features_of_mongo_fdw.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/07_configuring_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/08_example_using_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3 => 5}/08a_example_join_pushdown.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/09_identifying_data_adapter_version.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/10_limitations.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/11_uninstalling_the_mongo_data_adapter.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/images/EDB_logo.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/images/ambari_administrative_interface.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/images/edb_logo.svg (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/images/installation_complete.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/images/installation_wizard_welcome_screen.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/images/mongo_server_with_postgres.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/images/progress_as_the_servers_restart.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/images/restart_the_server.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/images/setup_wizard_ready.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/images/specify_an_installation_directory.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/images/the_installation_wizard_welcome_screen.png (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/index.mdx (98%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/mongo_rel_notes/index.mdx (80%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/mongo_rel_notes/mongo5.2.8_rel_notes.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.2 => 5}/mongo_rel_notes/mongo5.2.9_rel_notes.mdx (100%) rename product_docs/docs/mongo_data_adapter/{5.3/01_5.3.0_rel_notes.mdx => 5/mongo_rel_notes/mongo5.3.0_rel_notes.mdx} (89%) diff --git a/product_docs/docs/mongo_data_adapter/5.2/02_requirements_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2/02_requirements_overview.mdx deleted file mode 100644 index 3152648b763..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2/02_requirements_overview.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "Requirements Overview" ---- - -## Supported Versions - -The MongoDB Foreign Data Wrapper is certified with EDB Postgres Advanced Server 10 and above. - -## Supported Platforms - -The MongoDB Foreign Data Wrapper is supported on the following platforms: - -**Linux x86-64** - - - RHEL 8.x/7.x - - Rocky Linux/AlmaLinux 8.x - - CentOS 7.x - - OL 8.x/7.x - - Ubuntu 20.04/18.04 LTS - - Debian 10.x/9.x - -**Linux on IBM Power8/9 (LE)** - - - RHEL 7.x diff --git a/product_docs/docs/mongo_data_adapter/5.2/03_architecture_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2/03_architecture_overview.mdx deleted file mode 100644 index 3e48035f7a0..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2/03_architecture_overview.mdx +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: "Architecture Overview" ---- - - - -The MongoDB data wrapper provides an interface between a MongoDB server and a Postgres database. It transforms a Postgres statement (`SELECT`/`INSERT`/`DELETE`/`UPDATE`) into a query that is understood by the MongoDB database. - -![Using MongoDB FDW with Postgres](images/mongo_server_with_postgres.png) diff --git a/product_docs/docs/mongo_data_adapter/5.2/04_installing_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2/04_installing_the_mongo_data_adapter.mdx deleted file mode 100644 index 8f2ce92d8c5..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2/04_installing_the_mongo_data_adapter.mdx +++ /dev/null @@ -1,291 +0,0 @@ ---- -title: "Installing the MongoDB Foreign Data Wrapper" ---- - - - - -The MongoDB Foreign Data Wrapper can be installed with an RPM package. During the installation process, the installer will satisfy software prerequisites. If yum encounters a dependency that it cannot resolve, it will provide a list of the required dependencies that you must manually resolve. - - - -## Installing the MongoDB Foreign Data Wrapper using an RPM Package - -You can install the MongoDB Foreign Data Wrapper using an RPM package on the following platforms: - -- [RHEL or CentOS 7 PPCLE](#rhel_centos7_PPCLE) -- [RHEL 7](#rhel7) -- [RHEL 8](#rhel8) -- [CentOS 7](#centos7) -- [Rocky Linux/AlmaLinux 8](#centos8) - - - -### On RHEL or CentOS 7 PPCLE - -1. Use the following command to create a configuration file and install Advance Toolchain: - - ```text - rpm --import https://public.dhe.ibm.com/software/server/POWER/Linux/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b - - cat > /etc/yum.repos.d/advance-toolchain.repo <:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -4. Install the EPEL repository: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` - -5. On RHEL 7 PPCLE, enable the additional repositories to resolve EPEL dependencies: - - ```text - subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" - ``` - -6. Install the selected package: - - ```text - dnf install edb-as-mongo_fdw - ``` - - where `xx` is the server version number. - - - - -### On RHEL 7 - -1. To create the repository configuration file, assume superuser privileges, and invoke the following command: - - ```text - yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: - - ```text - sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -3. Install the EPEL repository: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` - -4. Enable the additional repositories to resolve dependencies: - - ```text - subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" - ``` - -5. Install the selected package: - - ```text - dnf install edb-as-mongo_fdw - ``` - - where `xx` is the server version number. - - - - - - -### On RHEL 8 - -1. To create the repository configuration file, assume superuser privileges, and invoke the following command: - - ```text - dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: - - ```text - sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -3. Install the EPEL repository: - - ```text - dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - ``` - -4. Enable the additional repositories to resolve dependencies: - - ```text - ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" - ``` - -5. Disable the built-in PostgreSQL module: - - ```text - dnf -qy module disable postgresql - ``` -6. Install the selected package: - - ```text - dnf install edb-as-mongo_fdw - ``` - - where `xx` is the server version number. - - - - - -### On CentOS 7 - -1. To create the repository configuration file, assume superuser privileges, and invoke the following command: - - ```text - yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: - - ```text - sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -3. Install the EPEL repository: - - ```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - ``` - -4. Install the selected package: - - ```text - dnf install edb-as-mongo_fdw - ``` - - where `xx` is the server version number. - - - - -### On Rocky Linux/AlmaLinux 8 - - -1. To create the repository configuration file, assume superuser privileges, and invoke the following command: - - ```text - dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm - ``` - -2. Replace ‘USERNAME:PASSWORD’ below with your username and password for the EDB repositories: - - ```text - sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo - ``` - -3. Install the EPEL repository: - - ```text - dnf -y install epel-release - ``` - -4. Enable the additional repositories to resolve dependencies: - - ```text - dnf config-manager --set-enabled PowerTools - ``` - -5. Disable the built-in PostgreSQL module: - - ```text - dnf -qy module disable postgresql - ``` -6. Install the selected package: - - ```text - dnf install edb-as-mongo_fdw - ``` - - where `xx` is the server version number. - - -## Installing the MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host - -To install the MongoDB Foreign Data Wrapper on a Debian or Ubuntu host, you must have credentials that allow access to the EDB repository. To request credentials for the repository, visit the [EDB website](https://www.enterprisedb.com/repository-access-request/). - -The following steps will walk you through using the EDB apt repository to install a Debian package. When using the commands, replace the `username` and `password` with the credentials provided by EDB. - -1. Assume superuser privileges: - - ```text - sudo su – - ``` - -2. Configure the EDB repository: - - On Debian 9 and Ubuntu: - - ```text - sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` - - On Debian 10: - - 1. Set up the EDB repository: - - ```text - sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` - - 1. Substitute your EDB credentials for the `username` and `password` in the following command: - - ```text - sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' - ``` - -3. Add support to your system for secure APT repositories: - - ```text - apt-get install apt-transport-https - ``` - -4. Add the EDB signing key: - - ```text - wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - - ``` - -5. Update the repository metadata: - - ```text - apt-get update - ``` - -6. Install the Debian package: - - ```text - apt-get install edb-as-mongo-fdw - ``` - - where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2/05_updating_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2/05_updating_the_mongo_data_adapter.mdx deleted file mode 100644 index d5924581f4f..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2/05_updating_the_mongo_data_adapter.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "Updating the MongoDB Foreign Data Wrapper" ---- - - - -## Updating an RPM Installation - -If you have an existing RPM installation of MongoDB Foreign Data Wrapper, you can use yum or dnf to upgrade your repository configuration file and update to a more recent product version. To update the `edb.repo` file, assume superuser privileges and enter: - -- On RHEL or CentOS 7: - - `yum upgrade edb-repo` - -- On RHEL or CentOS 7 on PPCLE: - - `yum upgrade edb-repo` - -- On RHEL or Rocky Linux or AlmaLinux 8: - - `dnf upgrade edb-repo` - -yum or dnf will update the `edb.repo` file to enable access to the current EDB repository, configured to connect with the credentials specified in your `edb.repo` file. Then, you can use yum or dnf to upgrade any installed packages: - -- On RHEL or CentOS 7: - - `yum upgrade edb-as-mongo_fdw edb-libmongoc-libs` - -- On RHEL or CentOS 7 on PPCLE: - - `yum upgrade edb-as-mongo_fdw edb-libmongoc-libs` - -- On RHEL or Rocky Linux or AlmaLinux 8: - - `dnf upgrade edb-as-mongo_fdw` - - where `xx` is the server version number. - -## Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host - -To update MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host, use the following command: - - `apt-get --only-upgrade install edb-as-mongo-fdw edb-libmongoc` - - where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2/06_features_of_mongo_fdw.mdx b/product_docs/docs/mongo_data_adapter/5.2/06_features_of_mongo_fdw.mdx deleted file mode 100644 index 972cdcb480d..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2/06_features_of_mongo_fdw.mdx +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: "Features of the MongoDB Foreign Data Wrapper" ---- - - - -The key features of the MongoDB Foreign Data Wrapper are listed below: - -## Writable FDW - -The MongoDB Foreign Data Wrapper allows you to modify data on a MongoDB server. Users can `INSERT`, `UPDATE` and `DELETE` data in the remote MongoDB collections by inserting, updating and deleting data locally in foreign tables. - -See also: - -- [Example: Using the MongoDB Foreign Data Wrapper](08_example_using_the_mongo_data_adapter/#example_using_the_mongo_data_adapter) - -- [Data Type Mappings](07_configuring_the_mongo_data_adapter/#data-type-mappings) - -## WHERE Clause Push-down - -MongoDB Foreign Data Wrapper allows the push-down of the `WHERE` clause only when clauses include the comparison expressions that have a column and a constant as arguments. `WHERE` clause push-down is not supported where the constant is an array. - -## Connection Pooling - -The MongoDB Foreign Data Wrapper establishes a connection to a foreign server during the first query that uses a foreign table associated with the foreign server. This connection is kept and reused for subsequent queries in the same session. - -## Automated Cleanup - -The MongoDB Foreign Data Wrapper allows the cleanup of foreign tables in a single operation using the `DROP EXTENSION` command. This feature is especially useful when a foreign table has been created for a temporary purpose. The syntax of a `DROP EXTENSION` command is: - - `DROP EXTENSION mongo_fdw CASCADE;` - -For more information, see [DROP EXTENSION](https://www.postgresql.org/docs/current/sql-dropextension.html). - -## Full Document Retrieval - -This feature allows you to retrieve documents along with all their fields from collection without any knowledge of the fields in the BSON document available in MongoDB's collection. Those retrieved documents are in JSON format. - -You can retrieve all available fields in a collection residing in MongoDB Foreign Data Wrapper as explained in the following example: - -**Example**: - -```text -> db.warehouse.find(); -{ "_id" : ObjectId("58a1ebbaf543ec0b90545859"), "warehouse_id" : 1, "warehouse_name" : "UPS", "warehouse_created" : ISODate("2014-12-12T07:12:10Z") } -{ "_id" : ObjectId("58a1ebbaf543ec0b9054585a"), "warehouse_id" : 2, "warehouse_name" : "Laptop", "warehouse_created" : ISODate("2015-11-11T08:13:10Z") } -``` - -Steps for retrieving the document: - -1. Create foreign table with a column name `__doc`. The type of the column could be json, jsonb, text, or varchar. - -```text -CREATE FOREIGN TABLE test_json(__doc json) SERVER mongo_server OPTIONS (database 'testdb', collection 'warehouse'); -``` - -2. Retrieve the document. - -```text -SELECT * FROM test_json ORDER BY __doc::text COLLATE "C"; -``` - -The output: - -```text -edb=#SELECT * FROM test_json ORDER BY __doc::text COLLATE "C"; - __doc --------------------------------------------------------------------------------------------------------------------------------------------------------- -{ "_id" : { "$oid" : "58a1ebbaf543ec0b90545859" }, "warehouse_id" : 1, "warehouse_name" : "UPS", "warehouse_created" : { "$date" : 1418368330000 } } -{ "_id" : { "$oid" : "58a1ebbaf543ec0b9054585a" }, "warehouse_id" : 2, "warehouse_name" : "Laptop", "warehouse_created" : { "$date" : 1447229590000 } } -(2 rows) -``` diff --git a/product_docs/docs/mongo_data_adapter/5.2/07_configuring_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2/07_configuring_the_mongo_data_adapter.mdx deleted file mode 100644 index 8d00a8308c2..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2/07_configuring_the_mongo_data_adapter.mdx +++ /dev/null @@ -1,440 +0,0 @@ ---- -title: "Configuring the MongoDB Foreign Data Wrapper" ---- - - - -Before using the MongoDB Foreign Data Wrapper, you must: - - 1. Use the [CREATE EXTENSION](#create-extension) command to create the MongoDB Foreign Data Wrapper extension on the Postgres host. - 2. Use the [CREATE SERVER](#create-server) command to define a connection to the MongoDB server. - 3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. - 4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Postgres database that corresponds to a database that resides on the MongoDB cluster. - - - -## CREATE EXTENSION - -Use the `CREATE EXTENSION` command to create the `mongo_fdw` extension. To invoke the command, use your client of choice (for example, psql) to connect to the Postgres database from which you will be querying the MongoDB server, and invoke the command: - -```text -CREATE EXTENSION [IF NOT EXISTS] mongo_fdw [WITH] [SCHEMA schema_name]; -``` - -**Parameters** - -`IF NOT EXISTS` - - Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. - -`schema_name` - - Optionally specify the name of the schema in which to install the extension's objects. - -**Example** - -The following command installs the MongoDB foreign data wrapper: - - `CREATE EXTENSION mongo_fdw;` - -For more information about using the foreign data wrapper `CREATE EXTENSION` command, see: - - . - - - -## CREATE SERVER - -Use the `CREATE SERVER` command to define a connection to a foreign server. The syntax is: - -```text -CREATE SERVER server_name FOREIGN DATA WRAPPER mongo_fdw - [OPTIONS (option 'value' [, ...])] -``` - -The role that defines the server is the owner of the server; use the `ALTER SERVER` command to reassign ownership of a foreign server. To create a foreign server, you must have `USAGE` privilege on the foreign-data wrapper specified in the `CREATE SERVER` command. - -**Parameters** - -`server_name` - - Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. - -`FOREIGN_DATA_WRAPPER` - - Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `mongo_fdw` foreign data wrapper when connecting to the cluster. - -`OPTIONS` - - Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server object. You can include: - -| **Option** | **Description** | -| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| address | The address or hostname of the Mongo server. The default value is `127.0.0.1`. | -| port | The port number of the Mongo Server. Valid range is 0 to 65535. The default value is `27017`. | -| authentication_database | The database against which user will be authenticated. This option is only valid with password based authentication. | -| ssl | Requests an authenticated, encrypted SSL connection. By default, the value is set to `false`. Set the value to `true` to enable ssl. See to understand the options. | -| pem_file | SSL option. | -| pem_pwd | SSL option. | -| ca_file | SSL option. | -| ca_dir | SSL option. | -| crl_file | SSL option. | -| weak_cert_validation | SSL option. | - -**Example** - -The following command creates a foreign server named `mongo_server` that uses the `mongo_fdw` foreign data wrapper to connect to a host with an IP address of `127.0.0.1`: - -```text -CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw OPTIONS (host '127.0.0.1', port '27017'); -``` - -The foreign server uses the default port (`27017`) for the connection to the client on the MongoDB cluster. - -For more information about using the `CREATE SERVER` command, see: - - - - - -## CREATE USER MAPPING - -Use the `CREATE USER MAPPING` command to define a mapping that associates a Postgres role with a foreign server: - -```text -CREATE USER MAPPING FOR role_name SERVER server_name - [OPTIONS (option 'value' [, ...])]; -``` - -You must be the owner of the foreign server to create a user mapping for that server. - -**Parameters** - -`role_name` - - Use `role_name` to specify the role that will be associated with the foreign server. - -`server_name` - - Use `server_name` to specify the name of the server that defines a connection to the MongoDB cluster. - -`OPTIONS` - - Use the `OPTIONS` clause to specify connection information for the foreign server. - - `username`: the name of the user on the MongoDB server. - - `password`: the password associated with the username. - -**Example** - -The following command creates a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mongo_server`: - - `CREATE USER MAPPING FOR enterprisedb SERVER mongo_server;` - -If the database host uses secure authentication, provide connection credentials when creating the user mapping: - -```text -CREATE USER MAPPING FOR enterprisedb SERVER mongo_server OPTIONS (username 'mongo_user', password 'mongo_pass'); -``` - -The command creates a user mapping for a role named `enterprisedb` that is associated with a server named `mongo_server`. When connecting to the MongoDB server, the server will authenticate as `mongo_user`, and provide a password of `mongo_pass`. - -For detailed information about the `CREATE USER MAPPING` command, see: - - - - - -## CREATE FOREIGN TABLE - -A foreign table is a pointer to a table that resides on the MongoDB host. Before creating a foreign table definition on the Postgres server, connect to the MongoDB server and create a collection; the columns in the table will map to columns in a table on the Postgres server. Then, use the `CREATE FOREIGN TABLE` command to define a table on the Postgres server with columns that correspond to the collection that resides on the MongoDB host. The syntax is: - -```text -CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name ( [ - { column_name data_type [ OPTIONS ( option 'value' [, ... ] ) ] [ COLLATE collation ] [ column_constraint [ ... ] ] - | table_constraint } - [, ... ] -] ) -[ INHERITS ( parent_table [, ... ] ) ] - SERVER server_name [ OPTIONS ( option 'value' [, ... ] ) ] -``` - -where `column_constraint` is: - -```text -[ CONSTRAINT constraint_name ] -{ NOT NULL | NULL | CHECK (expr) [ NO INHERIT ] | DEFAULT default_expr } -``` - -and `table_constraint` is: - -```text -[ CONSTRAINT constraint_name ] CHECK (expr) [ NO INHERIT ] -``` - -**Parameters** - -`table_name` - - Specify the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. - -`IF NOT EXISTS` - - Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. - -`column_name` - - Specify the name of a column in the new table; each column should correspond to a column described on the MongoDB server. - -`data_type` - - Specify the data type of the column; when possible, specify the same data type for each column on the Postgres server and the MongoDB server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the MongoDB server. If the server cannot identify a compatible data type, it will return an error. - -`COLLATE collation` - - Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. - -`INHERITS (parent_table [, ... ])` - - Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. - -`CONSTRAINT constraint_name` - - Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. - -`NOT NULL` - - Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. - -`NULL` - - Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. - -`CHECK (expr) [NO INHERIT]` - - Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. - - A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. - - Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. - -`DEFAULT default_expr` - - Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. - -`SERVER server_name [OPTIONS (option 'value' [, ... ] ) ]` - - To create a foreign table that will allow you to query a table that resides on a MongoDB file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the MongoDB data adapter. - - Use the `OPTIONS` clause to specify the following `options` and their corresponding values: - -| option | value | -| ---------- | --------------------------------------------------------------------------------- | -| database | The name of the database to query. The default value is `test`. | -| collection | The name of the collection to query. The default value is the foreign table name. | - -**Example** - -To use data that is stored on MongoDB server, you must create a table on the Postgres host that maps the columns of a MongoDB collection to the columns of a Postgres table. For example, for a MongoDB collection with the following definition: - -```text -db.warehouse.find -( - { - "warehouse_id" : 1 - } -).pretty() -{ - "_id" : ObjectId("53720b1904864dc1f5a571a0"), - "warehouse_id" : 1, - "warehouse_name" : "UPS", - "warehouse_created" : ISODate("2014-12-12T07:12:10Z") -} -``` - -You should execute a command on the Postgres server that creates a comparable table on the Postgres server: - -```text -CREATE FOREIGN TABLE warehouse -( - _id NAME, - warehouse_id INT, - warehouse_name TEXT, - warehouse_created TIMESTAMPZ -) -SERVER mongo_server -OPTIONS (database 'db', collection 'warehouse'); -``` - -The first column of the table must be `_id` of the type `name`. - -Include the `SERVER` clause to specify the name of the database stored on the MongoDB server and the name of the table (`warehouse`) that corresponds to the table on the Postgres server. - -For more information about using the `CREATE FOREIGN TABLE` command, see: - - - -!!! Note - MongoDB foreign data wrapper supports the write capability feature. - - - -### Data Type Mappings - -When using the foreign data wrapper, you must create a table on the Postgres server that mirrors the table that resides on the MongoDB server. The MongoDB data wrapper will automatically convert the following MongoDB data types to the target Postgres type: - -| **MongoDB (BSON Type)** | **Postgres** | -| ---------------------------- | ---------------------------------------- | -| ARRAY | JSON | -| BOOL | BOOL | -| BINARY | BYTEA | -| DATE_TIME | DATE/TIMESTAMP/TIMESTAMPTZ | -| DOCUMENT | JSON | -| DOUBLE | FLOAT/FLOAT4/FLOAT8/DOUBLE PRECISION/NUMERIC | -| INT32 | SMALLINT/INT2/INT/INTEGER/INT4 | -| INT64 | BIGINT/INT8 | -| OID | NAME | -| UTF8 | BPCHAR/VARCHAR/CHARCTER VARYING/TEXT | - -## DROP EXTENSION - -Use the `DROP EXTENSION` command to remove an extension. To invoke the command, use your client of choice (for example, psql) to connect to the Postgres database from which you will be dropping the MongoDB server, and run the command: - -```text -DROP EXTENSION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]; -``` - -**Parameters** - -`IF EXISTS` - - Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the specified name doesn't exists. - -`name` - - Specify the name of the installed extension. It is optional. - - `CASCADE` - - Automatically drop objects that depend on the extension. It drops all the other dependent objects too. - - `RESTRICT` - - Do not allow to drop extension if any objects, other than its member objects and extensions listed in the same DROP command are dependent on it. - -**Example** - -The following command removes the extension from the existing database: - - `DROP EXTENSION mongo_fdw;` - -For more information about using the foreign data wrapper `DROP EXTENSION` command, see: - - . - -## DROP SERVER - -Use the `DROP SERVER` command to remove a connection to a foreign server. The syntax is: - -```text -DROP SERVER [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] -``` - -The role that drops the server is the owner of the server; use the `ALTER SERVER` command to reassign ownership of a foreign server. To drop a foreign server, you must have `USAGE` privilege on the foreign-data wrapper specified in the `DROP SERVER` command. - -**Parameters** - -`IF EXISTS` - - Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if a server with the specified name doesn't exists. - -`name` - - Specify the name of the installed server. It is optional. - - `CASCADE` - - Automatically drop objects that depend on the server. It should drop all the other dependent objects too. - - `RESTRICT` - - Do not allow to drop the server if any objects are dependent on it. - -**Example** - -The following command removes a foreign server named `mongo_server`: - - `DROP SERVER mongo_server;` - -For more information about using the `DROP SERVER` command, see: - - - -## DROP USER MAPPING - -Use the `DROP USER MAPPING` command to remove a mapping that associates a Postgres role with a foreign server. You must be the owner of the foreign server to remove a user mapping for that server. - -```text -DROP USER MAPPING [ IF EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } SERVER server_name; -``` - -**Parameters** - -`IF EXISTS` - - Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the user mapping doesn't exist. - -`user_name` - - Specify the user name of the mapping. - -`server_name` - - Specify the name of the server that defines a connection to the MongoDB cluster. - -**Example** - -The following command drops a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mongo_server`: - - `DROP USER MAPPING FOR enterprisedb SERVER mongo_server;` - -For detailed information about the `DROP USER MAPPING` command, see: - - - -## DROP FOREIGN TABLE - -A foreign table is a pointer to a table that resides on the MongoDB host. Use the `DROP FOREIGN TABLE` command to remove a foreign table. Only the owner of the foreign table can drop it. - -```text -DROP FOREIGN TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] -``` - -**Parameters** - -`IF EXISTS` - - Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the foreign table with the specified name doesn't exists. - -`name` - - Specify the name of the foreign table. - -`CASCADE` - - Automatically drop objects that depend on the foreign table. It should drop all the other dependent objects too. - -`RESTRICT` - - Do not allow to drop foreign table if any objects are dependent on it. - -**Example** - -```text -DROP FOREIGN TABLE warehouse; -``` - -For more information about using the `DROP FOREIGN TABLE` command, see: - - diff --git a/product_docs/docs/mongo_data_adapter/5.3/02_requirements_overview.mdx b/product_docs/docs/mongo_data_adapter/5.3/02_requirements_overview.mdx deleted file mode 100644 index 556aa99216f..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/02_requirements_overview.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "Requirements Overview" ---- - -## Supported Versions - -The MongoDB Foreign Data Wrapper is certified with EDB Postgres Advanced Server 10 and above. - -## Supported Platforms - -The MongoDB Foreign Data Wrapper is supported on the following platforms: - -**Linux x86-64** - - - RHEL 8/OL 8 - - Rocky Linux 8/AlmaLinux 8 - - RHEL 7/OL 7 - - CentOS 7 - - SLES 15 - - SLES 12 - - Ubuntu 20.04/18.04 LTS - - Debian 10.x/9.x - -**Linux on IBM Power (ppc64le)** - - - RHEL 8 - - RHEL 7 - - SLES 15 - - SLES 12 diff --git a/product_docs/docs/mongo_data_adapter/5.3/08_example_using_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.3/08_example_using_the_mongo_data_adapter.mdx deleted file mode 100644 index 38f2f35b122..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/08_example_using_the_mongo_data_adapter.mdx +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: "Example: Using the MongoDB Foreign Data Wrapper" ---- - - - -Before using the MongoDB foreign data wrapper, you must connect to your database with a client application. The following examples demonstrate using the wrapper with the psql client. After connecting to psql, you can follow the steps in the example below: - -```text --- load extension first time after install -CREATE EXTENSION mongo_fdw; - --- create server object -CREATE SERVER mongo_server - FOREIGN DATA WRAPPER mongo_fdw - OPTIONS (address '127.0.0.1', port '27017'); - --- create user mapping -CREATE USER MAPPING FOR enterprisedb - SERVER mongo_server - OPTIONS (username 'mongo_user', password 'mongo_pass'); - --- create foreign table -CREATE FOREIGN TABLE warehouse - ( - _id name, - warehouse_id int, - warehouse_name text, - warehouse_created timestamptz - ) - SERVER mongo_server - OPTIONS (database 'db', collection 'warehouse'); - --- Note: first column of the table must be "_id" of type "name". - --- select from table -SELECT * FROM warehouse WHERE warehouse_id = 1; - _id | warehouse_id | warehouse_name | warehouse_created ---------------------------+--------------+----------------+--------------------------- - 53720b1904864dc1f5a571a0 | 1 | UPS | 2014-12-12 12:42:10+05:30 -(1 row) - -db.warehouse.find -( - { - "warehouse_id" : 1 - } -).pretty() -{ - "_id" : ObjectId("53720b1904864dc1f5a571a0"), - "warehouse_id" : 1, - "warehouse_name" : "UPS", - "warehouse_created" : ISODate("2014-12-12T07:12:10Z") -} - --- insert row in table -INSERT INTO warehouse VALUES (0, 2, 'Laptop', '2015-11-11T08:13:10Z'); - -db.warehouse.insert -( - { - "warehouse_id" : NumberInt(2), - "warehouse_name" : "Laptop", - "warehouse_created" : ISODate("2015-11-11T08:13:10Z") - } -) - --- delete row from table -DELETE FROM warehouse WHERE warehouse_id = 2; - -db.warehouse.remove -( - { - "warehouse_id" : 2 - } -) - --- update a row of table -UPDATE warehouse SET warehouse_name = 'UPS_NEW' WHERE warehouse_id = 1; - -db.warehouse.update -( - { - "warehouse_id" : 1 - }, - { - "warehouse_id" : 1, - "warehouse_name" : "UPS_NEW", - "warehouse_created" : ISODate("2014-12-12T07:12:10Z") - } -) - --- explain a table -EXPLAIN SELECT * FROM warehouse WHERE warehouse_id = 1; - QUERY PLAN ------------------------------------------------------------------ - Foreign Scan on warehouse (cost=0.00..0.00 rows=1000 width=84) - Filter: (warehouse_id = 1) - Foreign Namespace: db.warehouse -(3 rows) - --- collect data distribution statistics -ANALYZE warehouse; - --- drop foreign table -DROP FOREIGN TABLE warehouse; - --- drop user mapping -DROP USER MAPPING FOR enterprisedb SERVER mongo_server; - --- drop server -DROP SERVER mongo_server; -``` diff --git a/product_docs/docs/mongo_data_adapter/5.3/09_identifying_data_adapter_version.mdx b/product_docs/docs/mongo_data_adapter/5.3/09_identifying_data_adapter_version.mdx deleted file mode 100644 index b1d0564acc4..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/09_identifying_data_adapter_version.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "Identifying the MongoDB Foreign Data Wrapper Version" ---- - - - -The MongoDB Foreign Data Wrapper includes a function that you can use to identify the currently installed version of the `.so` file for the data wrapper. To use the function, connect to the Postgres server, and enter: - -```text -SELECT mongo_fdw_version(); -``` - -The function returns the version number: - -```text -mongo_fdw_version ------------------ - -``` diff --git a/product_docs/docs/mongo_data_adapter/5.3/10_limitations.mdx b/product_docs/docs/mongo_data_adapter/5.3/10_limitations.mdx deleted file mode 100644 index acdd2f2383c..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/10_limitations.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "Limitations" ---- - - - -The following limitations apply to MongoDB Foreign Data Wrapper: - -- If the BSON document key contains uppercase letters or occurs within a nested document, MongoDB Foreign Data Wrapper requires the corresponding column names to be declared in double quotes. -- PostgreSQL limits column names to 63 characters by default. You can increase the `NAMEDATALEN` constant in `src/include/pg_config_manual.h`, compile, and re-install when column names extend beyond 63 characters. -- MongoDB Foreign Data Wrapper errors out on BSON field which is not listed in the known types (For example: byte, arrays). It throws an error: `Cannot convert BSON type to column type`. diff --git a/product_docs/docs/mongo_data_adapter/5.3/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.3/11_uninstalling_the_mongo_data_adapter.mdx deleted file mode 100644 index 5312c8fb933..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/11_uninstalling_the_mongo_data_adapter.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Uninstalling the MongoDB Foreign Data Wrapper" ---- - - - -## Uninstalling an RPM Package - -You can use the `yum remove` or `dnf remove` command to remove a package installed by `yum` or `dnf`. To remove a package, open a terminal window, assume superuser privileges, and enter the command: - -- On RHEL or CentOS 7: - - `yum remove edb-as-mongo_fdw` - -- On RHEL or Rocky Linux or AlmaLinux 8: - - `dnf remove edb-as-mongo_fdw` - -Where `xx` is the server version number. - -## Uninstalling MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host - -- To uninstall MongoDB Foreign Data Wrapper on a Debian or Ubuntu host, invoke the following command. - - `apt-get remove edb-as-mongo-fdw` - -Where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.3/images/EDB_logo.png b/product_docs/docs/mongo_data_adapter/5.3/images/EDB_logo.png deleted file mode 100644 index f4a93cf57f5..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/images/EDB_logo.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:07423b012a855204780fe5a2a5a1e33607304a5c3020ae4acbf3d575691dedd6 -size 12136 diff --git a/product_docs/docs/mongo_data_adapter/5.3/images/ambari_administrative_interface.png b/product_docs/docs/mongo_data_adapter/5.3/images/ambari_administrative_interface.png deleted file mode 100755 index d44e42a740e..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/images/ambari_administrative_interface.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b4acb08665b6a1df9494f91f9ab64a8f4d0979f61947e19162f419d134e351ea -size 150222 diff --git a/product_docs/docs/mongo_data_adapter/5.3/images/edb_logo.svg b/product_docs/docs/mongo_data_adapter/5.3/images/edb_logo.svg deleted file mode 100644 index f24d1dfefee..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/images/edb_logo.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - edb-logo-disc-dark - - - - \ No newline at end of file diff --git a/product_docs/docs/mongo_data_adapter/5.3/images/installation_complete.png b/product_docs/docs/mongo_data_adapter/5.3/images/installation_complete.png deleted file mode 100755 index 311d632a71e..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/images/installation_complete.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e52a4437577b7a64d7f36c4f837b9a0fab90b163b201055bd817f0e3cbaf112a -size 39463 diff --git a/product_docs/docs/mongo_data_adapter/5.3/images/installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5.3/images/installation_wizard_welcome_screen.png deleted file mode 100755 index aaf582bc781..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/images/installation_wizard_welcome_screen.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:85ea24919ac97d6f8ebb882da665c22e4d5c0942b8491faa5e07be8b93007b60 -size 38341 diff --git a/product_docs/docs/mongo_data_adapter/5.3/images/mongo_server_with_postgres.png b/product_docs/docs/mongo_data_adapter/5.3/images/mongo_server_with_postgres.png deleted file mode 100644 index 76915580c4c..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/images/mongo_server_with_postgres.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:480cdbe86e1f31a6fd03d26a86425a25d681e515e747217c0c3961cb0a36027c -size 49128 diff --git a/product_docs/docs/mongo_data_adapter/5.3/images/progress_as_the_servers_restart.png b/product_docs/docs/mongo_data_adapter/5.3/images/progress_as_the_servers_restart.png deleted file mode 100755 index 43523c7d1ad..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/images/progress_as_the_servers_restart.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:46a0feaf37642c3aa87fe8267259687dfa9c9571f1c2663297159ef98356e2fd -size 85080 diff --git a/product_docs/docs/mongo_data_adapter/5.3/images/restart_the_server.png b/product_docs/docs/mongo_data_adapter/5.3/images/restart_the_server.png deleted file mode 100755 index 2518b46d46d..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/images/restart_the_server.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9e612201379d56b4dffcfb4222ceb765532ca5d097504c1dbabdc6a812afaba9 -size 33996 diff --git a/product_docs/docs/mongo_data_adapter/5.3/images/setup_wizard_ready.png b/product_docs/docs/mongo_data_adapter/5.3/images/setup_wizard_ready.png deleted file mode 100755 index 922e318868d..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/images/setup_wizard_ready.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3ba6a1a88fe8a91b94571b57a36077fce7b3346e850a38f9bf015166ace93e36 -size 16833 diff --git a/product_docs/docs/mongo_data_adapter/5.3/images/specify_an_installation_directory.png b/product_docs/docs/mongo_data_adapter/5.3/images/specify_an_installation_directory.png deleted file mode 100755 index 208c85c46af..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/images/specify_an_installation_directory.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dae28ab7f567617da49816514a3fa5eb6161e611c416295cfe2f829cd941f98e -size 20596 diff --git a/product_docs/docs/mongo_data_adapter/5.3/images/the_installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5.3/images/the_installation_wizard_welcome_screen.png deleted file mode 100755 index 2da19033b0e..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/images/the_installation_wizard_welcome_screen.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7fd52b490dd37c86dca15975a7dbc9bdd47c7ae4ab0912d1bf570d785c521f79 -size 33097 diff --git a/product_docs/docs/mongo_data_adapter/5.3/index.mdx b/product_docs/docs/mongo_data_adapter/5.3/index.mdx deleted file mode 100644 index 640c3591f3f..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.3/index.mdx +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: "MongoDB Foreign Data Wrapper Guide" ---- - -The MongoDB Foreign Data Wrapper (`mongo_fdw`) is a Postgres extension that allows you to access data that resides on a MongoDB database from EDB Postgres Advanced Server. It is a writable foreign data wrapper that you can use with Postgres functions and utilities, or in conjunction with other data that resides on a Postgres host. - -The MongoDB Foreign Data Wrapper can be installed with an RPM package. You can download an installer from the [EDB website](https://www.enterprisedb.com/software-downloads-postgres/). - -This guide uses the term `Postgres` to refer to an instance of EDB Postgres Advanced Server. - -
- -whats_new requirements_overview architecture_overview installing_the_mongo_data_adapter updating_the_mongo_data_adapter features_of_mongo_fdw configuring_the_mongo_data_adapter example_using_the_mongo_data_adapter identifying_data_adapter_version limitations uninstalling_the_mongo_data_adapter conclusion - -
diff --git a/product_docs/docs/mongo_data_adapter/5/02_requirements_overview.mdx b/product_docs/docs/mongo_data_adapter/5/02_requirements_overview.mdx new file mode 100644 index 00000000000..acbbc27b6fa --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5/02_requirements_overview.mdx @@ -0,0 +1,14 @@ +--- +title: "Supported Platforms" +--- + + This table lists the latest MongoDB Foreign Data Wrapper versions and their supported corresponding EDB Postgres Advanced Server (EPAS) versions. MongoDB Foreign Data Wrapper is supported on the same platforms as EDB Postgres Advanced Server. See [Product Compatibility](https://www.enterprisedb.com/platform-compatibility#epas) for details. + +| MongoDB Foreign Data Wrapper | EPAS 14 | EPAS 13 | EPAS 12 | EPAS 11 | EPAS 10 | +| --------- | ------- | ------- | ------- | ------- | ------- | +| 5.3.0 | Y | Y | Y | Y | Y | +| 5.2.9 | N | Y | Y | Y | Y | +| 5.2.8 | N | N | N | N | N | +| 5.2.6 | N | N | Y | N | N | +| 5.2.3 | N | Y | N | Y | N | + diff --git a/product_docs/docs/mongo_data_adapter/5.3/03_architecture_overview.mdx b/product_docs/docs/mongo_data_adapter/5/03_architecture_overview.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/03_architecture_overview.mdx rename to product_docs/docs/mongo_data_adapter/5/03_architecture_overview.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/01_mongo_rhel8_x86.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/01_mongo_rhel8_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/01_mongo_rhel8_x86.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/01_mongo_rhel8_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/02_mongo_other_linux8_x86.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/02_mongo_other_linux8_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/02_mongo_other_linux8_x86.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/02_mongo_other_linux8_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/03_mongo_rhel7_x86.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/03_mongo_rhel7_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/03_mongo_rhel7_x86.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/03_mongo_rhel7_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/04_mongo_centos7_x86.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/04_mongo_centos7_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/04_mongo_centos7_x86.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/04_mongo_centos7_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/05_mongo_sles15_x86.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/05_mongo_sles15_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/05_mongo_sles15_x86.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/05_mongo_sles15_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/07_mongo_sles12_x86.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/07_mongo_sles12_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/07_mongo_sles12_x86.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/07_mongo_sles12_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/09_mongo_ubuntu20_deb10_x86.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/09_mongo_ubuntu20_deb10_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/09_mongo_ubuntu20_deb10_x86.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/09_mongo_ubuntu20_deb10_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/11_mongo_ubuntu18_deb9_x86.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/11_mongo_ubuntu18_deb9_x86.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/11_mongo_ubuntu18_deb9_x86.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/11_mongo_ubuntu18_deb9_x86.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/13_mongo_rhel8_ppcle.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/13_mongo_rhel8_ppcle.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/13_mongo_rhel8_ppcle.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/13_mongo_rhel8_ppcle.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/15_mongo_rhel7_ppcle.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/15_mongo_rhel7_ppcle.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/15_mongo_rhel7_ppcle.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/15_mongo_rhel7_ppcle.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/17_mongo_sles15_ppcle.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/17_mongo_sles15_ppcle.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/17_mongo_sles15_ppcle.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/17_mongo_sles15_ppcle.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/19_mongo_sles12_ppcle.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/19_mongo_sles12_ppcle.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/19_mongo_sles12_ppcle.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/19_mongo_sles12_ppcle.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/index.mdx b/product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/index.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/04_installing_the_mongo_data_adapter/index.mdx rename to product_docs/docs/mongo_data_adapter/5/04_installing_the_mongo_data_adapter/index.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/05_updating_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5/05_updating_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/05_updating_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5/05_updating_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/06_features_of_mongo_fdw.mdx b/product_docs/docs/mongo_data_adapter/5/06_features_of_mongo_fdw.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/06_features_of_mongo_fdw.mdx rename to product_docs/docs/mongo_data_adapter/5/06_features_of_mongo_fdw.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/07_configuring_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5/07_configuring_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/07_configuring_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5/07_configuring_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2/08_example_using_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5/08_example_using_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/08_example_using_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5/08_example_using_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/08a_example_join_pushdown.mdx b/product_docs/docs/mongo_data_adapter/5/08a_example_join_pushdown.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.3/08a_example_join_pushdown.mdx rename to product_docs/docs/mongo_data_adapter/5/08a_example_join_pushdown.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2/09_identifying_data_adapter_version.mdx b/product_docs/docs/mongo_data_adapter/5/09_identifying_data_adapter_version.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/09_identifying_data_adapter_version.mdx rename to product_docs/docs/mongo_data_adapter/5/09_identifying_data_adapter_version.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2/10_limitations.mdx b/product_docs/docs/mongo_data_adapter/5/10_limitations.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/10_limitations.mdx rename to product_docs/docs/mongo_data_adapter/5/10_limitations.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/11_uninstalling_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2/images/EDB_logo.png b/product_docs/docs/mongo_data_adapter/5/images/EDB_logo.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/images/EDB_logo.png rename to product_docs/docs/mongo_data_adapter/5/images/EDB_logo.png diff --git a/product_docs/docs/mongo_data_adapter/5.2/images/ambari_administrative_interface.png b/product_docs/docs/mongo_data_adapter/5/images/ambari_administrative_interface.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/images/ambari_administrative_interface.png rename to product_docs/docs/mongo_data_adapter/5/images/ambari_administrative_interface.png diff --git a/product_docs/docs/mongo_data_adapter/5.2/images/edb_logo.svg b/product_docs/docs/mongo_data_adapter/5/images/edb_logo.svg similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/images/edb_logo.svg rename to product_docs/docs/mongo_data_adapter/5/images/edb_logo.svg diff --git a/product_docs/docs/mongo_data_adapter/5.2/images/installation_complete.png b/product_docs/docs/mongo_data_adapter/5/images/installation_complete.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/images/installation_complete.png rename to product_docs/docs/mongo_data_adapter/5/images/installation_complete.png diff --git a/product_docs/docs/mongo_data_adapter/5.2/images/installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5/images/installation_wizard_welcome_screen.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/images/installation_wizard_welcome_screen.png rename to product_docs/docs/mongo_data_adapter/5/images/installation_wizard_welcome_screen.png diff --git a/product_docs/docs/mongo_data_adapter/5.2/images/mongo_server_with_postgres.png b/product_docs/docs/mongo_data_adapter/5/images/mongo_server_with_postgres.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/images/mongo_server_with_postgres.png rename to product_docs/docs/mongo_data_adapter/5/images/mongo_server_with_postgres.png diff --git a/product_docs/docs/mongo_data_adapter/5.2/images/progress_as_the_servers_restart.png b/product_docs/docs/mongo_data_adapter/5/images/progress_as_the_servers_restart.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/images/progress_as_the_servers_restart.png rename to product_docs/docs/mongo_data_adapter/5/images/progress_as_the_servers_restart.png diff --git a/product_docs/docs/mongo_data_adapter/5.2/images/restart_the_server.png b/product_docs/docs/mongo_data_adapter/5/images/restart_the_server.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/images/restart_the_server.png rename to product_docs/docs/mongo_data_adapter/5/images/restart_the_server.png diff --git a/product_docs/docs/mongo_data_adapter/5.2/images/setup_wizard_ready.png b/product_docs/docs/mongo_data_adapter/5/images/setup_wizard_ready.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/images/setup_wizard_ready.png rename to product_docs/docs/mongo_data_adapter/5/images/setup_wizard_ready.png diff --git a/product_docs/docs/mongo_data_adapter/5.2/images/specify_an_installation_directory.png b/product_docs/docs/mongo_data_adapter/5/images/specify_an_installation_directory.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/images/specify_an_installation_directory.png rename to product_docs/docs/mongo_data_adapter/5/images/specify_an_installation_directory.png diff --git a/product_docs/docs/mongo_data_adapter/5.2/images/the_installation_wizard_welcome_screen.png b/product_docs/docs/mongo_data_adapter/5/images/the_installation_wizard_welcome_screen.png similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/images/the_installation_wizard_welcome_screen.png rename to product_docs/docs/mongo_data_adapter/5/images/the_installation_wizard_welcome_screen.png diff --git a/product_docs/docs/mongo_data_adapter/5.2/index.mdx b/product_docs/docs/mongo_data_adapter/5/index.mdx similarity index 98% rename from product_docs/docs/mongo_data_adapter/5.2/index.mdx rename to product_docs/docs/mongo_data_adapter/5/index.mdx index 1d4674e69a0..2aa72273a69 100644 --- a/product_docs/docs/mongo_data_adapter/5.2/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5/index.mdx @@ -1,6 +1,6 @@ --- title: "MongoDB Foreign Data Wrapper Guide" -navigation: +navigation: - mongo_rel_notes --- diff --git a/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/index.mdx b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx similarity index 80% rename from product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/index.mdx rename to product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx index 72f85025012..6f8a635e6a9 100644 --- a/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx @@ -11,7 +11,8 @@ The Mongo Foreign Data Wrapper documentation describes the latest version of For | Version | Release Date | | ----------------------------- | ------------ | -| [5.2.9](mongo5.2.9_rel_notes) | 2021 Jun 24 | -| [5.2.8](mongo5.2.8_rel_notes) | 2020 Nov 23 | +| [5.3.0](mongo5.3.0_rel_notes) | 2021 Dec 02 | +| [5.2.9](mongo5.2.9_rel_notes) | 2021 Jun 24 | +| [5.2.8](mongo5.2.8_rel_notes) | 2020 Nov 23 | diff --git a/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.8_rel_notes.mdx b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.8_rel_notes.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.8_rel_notes.mdx rename to product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.8_rel_notes.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.9_rel_notes.mdx b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.9_rel_notes.mdx similarity index 100% rename from product_docs/docs/mongo_data_adapter/5.2/mongo_rel_notes/mongo5.2.9_rel_notes.mdx rename to product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.9_rel_notes.mdx diff --git a/product_docs/docs/mongo_data_adapter/5.3/01_5.3.0_rel_notes.mdx b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.3.0_rel_notes.mdx similarity index 89% rename from product_docs/docs/mongo_data_adapter/5.3/01_5.3.0_rel_notes.mdx rename to product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.3.0_rel_notes.mdx index fb27609bb1b..d8b5b16023b 100644 --- a/product_docs/docs/mongo_data_adapter/5.3/01_5.3.0_rel_notes.mdx +++ b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.3.0_rel_notes.mdx @@ -1,5 +1,7 @@ --- -title: "Release Notes" +title: "Version 5.3.0" +redirects: +- 01_5.3.0_rel_notes --- Enhancements, bug fixes, and other changes in MongoDB Foreign Data Wrapper 5.3.0 diff --git a/static/_redirects b/static/_redirects index 4c61005828f..53c6066a9d7 100644 --- a/static/_redirects +++ b/static/_redirects @@ -128,9 +128,9 @@ /docs/odbc_connector/12.0.0.1/* /docs/odbc_connector/latest/ 301 /docs/odbc_connector/12.2.0.1/* /docs/odbc_connector/latest/ 301 # Collapsed versions -/docs/mongo_data_adapter/5.2.7/* /docs/mongo_data_adapter/5.2/:splat 301 -/docs/mongo_data_adapter/5.2.8/* /docs/mongo_data_adapter/5.2/:splat 301 -/docs/mongo_data_adapter/5.3.0/* /docs/mongo_data_adapter/5.3/:splat 301 +/docs/mongo_data_adapter/5.2.7/* /docs/mongo_data_adapter/5/:splat 301 +/docs/mongo_data_adapter/5.2.8/* /docs/mongo_data_adapter/5/:splat 301 +/docs/mongo_data_adapter/5.3.0/* /docs/mongo_data_adapter/5/:splat 301 # BigAnimal /docs/edbcloud/* /docs/biganimal/:splat 301 From 3713e83a7378120be190750cfaadac048c49d492 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Mon, 14 Mar 2022 17:28:03 -0400 Subject: [PATCH 04/12] fixed ordering of rel notse --- product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx index 6f8a635e6a9..4b36ac6f712 100644 --- a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx @@ -3,6 +3,7 @@ title: "Release notes" redirects: - ../01_whats_new/ navigation: +- mongo5.3.0_rel_notes - mongo5.2.9_rel_notes - mongo5.2.8_rel_notes --- From f949ab5430c7f0d08bf8c5cb45510e8bdff38cef Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Tue, 15 Mar 2022 10:53:41 -0400 Subject: [PATCH 05/12] incorporated Kelly's feedback --- .../5/02_requirements_overview.mdx | 4 ++-- .../5/11_uninstalling_the_mongo_data_adapter.mdx | 6 +++++- .../mongo_data_adapter/5/mongo_rel_notes/index.mdx | 6 +++++- .../5/mongo_rel_notes/mongo5.2.3_rel_notes.mdx | 13 +++++++++++++ .../5/mongo_rel_notes/mongo5.2.6_rel_notes.mdx | 13 +++++++++++++ .../5/mongo_rel_notes/mongo5.3.0_rel_notes.mdx | 9 +++++---- 6 files changed, 43 insertions(+), 8 deletions(-) create mode 100644 product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.3_rel_notes.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.6_rel_notes.mdx diff --git a/product_docs/docs/mongo_data_adapter/5/02_requirements_overview.mdx b/product_docs/docs/mongo_data_adapter/5/02_requirements_overview.mdx index acbbc27b6fa..9ab11614aba 100644 --- a/product_docs/docs/mongo_data_adapter/5/02_requirements_overview.mdx +++ b/product_docs/docs/mongo_data_adapter/5/02_requirements_overview.mdx @@ -1,5 +1,5 @@ --- -title: "Supported Platforms" +title: "Supported Database Versions" --- This table lists the latest MongoDB Foreign Data Wrapper versions and their supported corresponding EDB Postgres Advanced Server (EPAS) versions. MongoDB Foreign Data Wrapper is supported on the same platforms as EDB Postgres Advanced Server. See [Product Compatibility](https://www.enterprisedb.com/platform-compatibility#epas) for details. @@ -8,7 +8,7 @@ title: "Supported Platforms" | --------- | ------- | ------- | ------- | ------- | ------- | | 5.3.0 | Y | Y | Y | Y | Y | | 5.2.9 | N | Y | Y | Y | Y | -| 5.2.8 | N | N | N | N | N | +| 5.2.8 | N | Y | N | N | N | | 5.2.6 | N | N | Y | N | N | | 5.2.3 | N | Y | N | Y | N | diff --git a/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx index 5312c8fb933..ee322899c88 100644 --- a/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx @@ -6,7 +6,7 @@ title: "Uninstalling the MongoDB Foreign Data Wrapper" ## Uninstalling an RPM Package -You can use the `yum remove` or `dnf remove` command to remove a package installed by `yum` or `dnf`. To remove a package, open a terminal window, assume superuser privileges, and enter the command: +You can use the `remove` command to uninstall MongoDB Foreign Data Wrapper packages. To uninstall, open a terminal window, assume superuser privileges, and enter the command applicable to your the operating system and the package manager used for the installation: - On RHEL or CentOS 7: @@ -16,6 +16,10 @@ You can use the `yum remove` or `dnf remove` command to remove a package install `dnf remove edb-as-mongo_fdw` +- On SLES: + + `zipper remove edb-as-mongo_fdw` + Where `xx` is the server version number. ## Uninstalling MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host diff --git a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx index 4b36ac6f712..919eb50fa6b 100644 --- a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx @@ -6,14 +6,18 @@ navigation: - mongo5.3.0_rel_notes - mongo5.2.9_rel_notes - mongo5.2.8_rel_notes +- mongo5.2.6_rel_notes +- mongo5.2.3_rel_notes --- -The Mongo Foreign Data Wrapper documentation describes the latest version of Foreign Data Wrapper 5.2 including minor releases and patches. The release notes in this section provide information on what was new in each release. For new functionality introduced in a minor or patch release, there are also indicators within the content about what release introduced the feature. +The Mongo Foreign Data Wrapper documentation describes the latest version of Foreign Data Wrapper 5 including minor releases and patches. The release notes in this section provide information on what was new in each release. For new functionality introduced in a minor or patch release, there are also indicators within the content about what release introduced the feature. | Version | Release Date | | ----------------------------- | ------------ | | [5.3.0](mongo5.3.0_rel_notes) | 2021 Dec 02 | | [5.2.9](mongo5.2.9_rel_notes) | 2021 Jun 24 | | [5.2.8](mongo5.2.8_rel_notes) | 2020 Nov 23 | +| [5.2.6](mongo5.2.8_rel_notes) | 2019 Sep 27 | +| [5.2.3](mongo5.2.8_rel_notes) | 2018 Nov 01 | diff --git a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.3_rel_notes.mdx b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.3_rel_notes.mdx new file mode 100644 index 00000000000..6cbdc8e7dc2 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.3_rel_notes.mdx @@ -0,0 +1,13 @@ +--- +title: "Version 5.2.3" +--- + +Enhancements, bug fixes, and other changes in MongoDB Foreign Data Wrapper 5.2.3 +include: + +| Type | Description | +| ----------- |------------ | +| Enhancement | Support for EDB Postgres Advanced Server 11. | + + + diff --git a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.6_rel_notes.mdx b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.6_rel_notes.mdx new file mode 100644 index 00000000000..356059c664b --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.2.6_rel_notes.mdx @@ -0,0 +1,13 @@ +--- +title: "Version 5.2.6" +--- + +Enhancements, bug fixes, and other changes in MongoDB Foreign Data Wrapper 5.2.6 +include: + +| Type | Description | +| ----------- |------------ | +| Enhancement | Support for EDB Postgres Advanced Server 12. | + + + diff --git a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.3.0_rel_notes.mdx b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.3.0_rel_notes.mdx index d8b5b16023b..53ed4091569 100644 --- a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.3.0_rel_notes.mdx +++ b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/mongo5.3.0_rel_notes.mdx @@ -7,10 +7,11 @@ redirects: Enhancements, bug fixes, and other changes in MongoDB Foreign Data Wrapper 5.3.0 include: -| Type | Description | -| ---- |------------ | +| Type | Description | +| ----------- |------------ | +| Enhancement | Support for EDB Postgres Advanced Server 14. | | Enhancement | Join pushdown: If a query has a join between two foreign tables from the same remote server, you can now push that join down to the remote server instead of fetching all the rows for both the tables and performing a join locally. | -| Bug Fix | Improve API performance. | -| Bug Fix | Need support for the whole-row reference. | +| Bug Fix | Improve API performance. | +| Bug Fix | Need support for the whole-row reference. | From a13cd18a9ef89ed0bbc47dd6a1786cdba68c15f6 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Tue, 15 Mar 2022 15:19:48 -0400 Subject: [PATCH 06/12] clean up --- .../5/11_uninstalling_the_mongo_data_adapter.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx index ee322899c88..e405364c871 100644 --- a/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx @@ -6,7 +6,7 @@ title: "Uninstalling the MongoDB Foreign Data Wrapper" ## Uninstalling an RPM Package -You can use the `remove` command to uninstall MongoDB Foreign Data Wrapper packages. To uninstall, open a terminal window, assume superuser privileges, and enter the command applicable to your the operating system and the package manager used for the installation: +You can use the `remove` command to uninstall MongoDB Foreign Data Wrapper packages. To uninstall, open a terminal window, assume superuser privileges, and enter the command applicable to the operating system and package manager used for the installation: - On RHEL or CentOS 7: From 08f5ab9b2087d4a743da32e382c36f40b62fdb64 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Wed, 16 Mar 2022 10:55:16 -0400 Subject: [PATCH 07/12] more updates to uninstall topic --- .../5/11_uninstalling_the_mongo_data_adapter.mdx | 9 +++------ product_docs/docs/mongo_data_adapter/5/index.mdx | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx index e405364c871..1fc43e419da 100644 --- a/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx @@ -4,7 +4,6 @@ title: "Uninstalling the MongoDB Foreign Data Wrapper" -## Uninstalling an RPM Package You can use the `remove` command to uninstall MongoDB Foreign Data Wrapper packages. To uninstall, open a terminal window, assume superuser privileges, and enter the command applicable to the operating system and package manager used for the installation: @@ -20,12 +19,10 @@ You can use the `remove` command to uninstall MongoDB Foreign Data Wrapper packa `zipper remove edb-as-mongo_fdw` -Where `xx` is the server version number. +- On Debian or Ubuntu -## Uninstalling MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host + `apt-get remove edb-as-mongo-fdw` -- To uninstall MongoDB Foreign Data Wrapper on a Debian or Ubuntu host, invoke the following command. +Where `xx` is the EDB Postgres Advanced Server version number. - `apt-get remove edb-as-mongo-fdw` -Where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5/index.mdx b/product_docs/docs/mongo_data_adapter/5/index.mdx index 2aa72273a69..eb762ab7228 100644 --- a/product_docs/docs/mongo_data_adapter/5/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5/index.mdx @@ -1,5 +1,5 @@ --- -title: "MongoDB Foreign Data Wrapper Guide" +title: "MongoDB Foreign Data Wrapper" navigation: - mongo_rel_notes --- From 7d807267b395503db25d65261bef910b2db30645 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Wed, 16 Mar 2022 13:11:10 -0400 Subject: [PATCH 08/12] added SLES to updating topic --- .../5/05_updating_the_mongo_data_adapter.mdx | 75 ++++++++++++------- ...11_uninstalling_the_mongo_data_adapter.mdx | 2 +- 2 files changed, 48 insertions(+), 29 deletions(-) diff --git a/product_docs/docs/mongo_data_adapter/5/05_updating_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5/05_updating_the_mongo_data_adapter.mdx index 184f4ae5234..f33dbee05ea 100644 --- a/product_docs/docs/mongo_data_adapter/5/05_updating_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5/05_updating_the_mongo_data_adapter.mdx @@ -1,49 +1,68 @@ --- -title: "Updating the MongoDB Foreign Data Wrapper" +title: "Upgrading the MongoDB Foreign Data Wrapper" --- -## Updating an RPM Installation +If you have an existing installation of MongoDB Foreign Data Wrapper that you installed using the EDP repository, you can use the `upgrade` command to update your repository configuration file and then upgrade to a more recent product version. To update your edb.repo file and upgrade , open a terminal window, assume superuser privileges, and enter the commands applicable to the operating system and package manager used for the installation: -If you have an existing RPM installation of MongoDB Foreign Data Wrapper, you can use yum or dnf to upgrade your repository configuration file and update to a more recent product version. To update the `edb.repo` file, assume superuser privileges and enter: +## On RHEL or Rocky Linux or AlmaLinux or OL 8 -- On RHEL or CentOS 7: +```shell +# Update your edb.repo file to access the current EDB repository +dnf upgrade edb-repo - `yum upgrade edb-repo` +# Upgrade to the latest version product +dnf upgrade edb-as-mongo_fdw +# where is the EDB Postgres Advanced Server version number +``` +## On RHEL or CentOS or OL 7: -- On RHEL or CentOS 7 on PPCLE: +```shell +# Update your edb.repo file to access the current EDB repository +yum upgrade edb-repo - `yum upgrade edb-repo` +# Upgrade to the latest version product version +yum upgrade edb-as-mongo_fdw edb-libmongoc-libs` +# where is the EDB Postgres Advanced Server version number +``` -- On RHEL or Rocky Linux or AlmaLinux 8: +## On SLES - `dnf upgrade edb-repo` +```shell +# Update your edb.repo file to access the current EDB repository +zypper upgrade edb-repo -yum or dnf will update the `edb.repo` file to enable access to the current EDB repository, configured to connect with the credentials specified in your `edb.repo` file. Then, you can use yum or dnf to upgrade any installed packages: +# Upgrade to the latest version product +zypper upgrade edb-as-mongo_fdw +# where is the EDB Postgres Advanced Server version number +``` -- On RHEL or CentOS 7: +## On Debian or Ubuntu - `yum upgrade edb-as-mongo_fdw edb-libmongoc-libs` +```shell +# Update your edb.repo file to access the current EDB repository +zypper upgrade edb-repo - where `xx` is the server version number. +# Upgrade to the latest version product +apt-get --only-upgrade install edb-as-mongo-fdw edb-libmongoc +# where is the EDB Postgres Advanced Server version number +``` -- On RHEL or CentOS 7 on PPCLE: +## On RHEL or CentOS 7 on PPCLE - `yum upgrade edb-as-mongo_fdw edb-libmongoc-at-libs` - - where `xx` is the server version number and `yy` is Advance Toolchain major version number. For EDB Postgres Advanced Server versions 10 to 11, `yy` must be 10 and for EDB Postgres Advanced Server version 12 and later, `yy` must be 11. - -- On RHEL or Rocky Linux or AlmaLinux 8: - - `dnf upgrade edb-as-mongo_fdw` +```shell +# Update your edb.repo file to access the current EDB repository +yum upgrade edb-repo - where `xx` is the server version number. - -## Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host - -To update MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host, use the following command: +# Upgrade to the latest version product version +yum upgrade edb-as-mongo_fdw edb-libmongoc-at-libs` + +# where: +# is the EDB Postgres Advanced Server version number +# is Advance Toolchain major version number. For EDB Postgres +# Advanced Server versions 10 to 11, must be 10 and for +# EDB Postgres Advanced Server version 12 and later, must be 11. +``` - `apt-get --only-upgrade install edb-as-mongo-fdw edb-libmongoc` - where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx index 1fc43e419da..8fecb569438 100644 --- a/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5/11_uninstalling_the_mongo_data_adapter.mdx @@ -17,7 +17,7 @@ You can use the `remove` command to uninstall MongoDB Foreign Data Wrapper packa - On SLES: - `zipper remove edb-as-mongo_fdw` + `zypper remove edb-as-mongo_fdw` - On Debian or Ubuntu From 4e7dc72dfcec0e0eb81ebc7c6abdefb36de5259b Mon Sep 17 00:00:00 2001 From: Dee Dee Rothery <83650384+drothery-edb@users.noreply.github.com> Date: Wed, 16 Mar 2022 16:03:27 -0400 Subject: [PATCH 09/12] Update index.mdx --- .../docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx index 919eb50fa6b..4ccdc33f023 100644 --- a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx @@ -10,7 +10,7 @@ navigation: - mongo5.2.3_rel_notes --- -The Mongo Foreign Data Wrapper documentation describes the latest version of Foreign Data Wrapper 5 including minor releases and patches. The release notes in this section provide information on what was new in each release. For new functionality introduced in a minor or patch release, there are also indicators within the content about what release introduced the feature. +The Mongo Foreign Data Wrapper documentation describes the latest version of Mongo Foreign Data Wrapper 5 including minor releases and patches. The release notes in this section provide information on what was new in each release. For new functionality introduced in a minor or patch release, there are also indicators within the content about what release introduced the feature. | Version | Release Date | | ----------------------------- | ------------ | From 4d053f0195e630439c50e6f3d3ea9e46ae28cd4d Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Thu, 17 Mar 2022 06:56:48 -0400 Subject: [PATCH 10/12] cleaning up upgrade topic --- .../5/05_updating_the_mongo_data_adapter.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/product_docs/docs/mongo_data_adapter/5/05_updating_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5/05_updating_the_mongo_data_adapter.mdx index f33dbee05ea..069a3a602f3 100644 --- a/product_docs/docs/mongo_data_adapter/5/05_updating_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5/05_updating_the_mongo_data_adapter.mdx @@ -4,7 +4,7 @@ title: "Upgrading the MongoDB Foreign Data Wrapper" -If you have an existing installation of MongoDB Foreign Data Wrapper that you installed using the EDP repository, you can use the `upgrade` command to update your repository configuration file and then upgrade to a more recent product version. To update your edb.repo file and upgrade , open a terminal window, assume superuser privileges, and enter the commands applicable to the operating system and package manager used for the installation: +If you have an existing installation of MongoDB Foreign Data Wrapper that you installed using the EDB repository, you can use the `upgrade` command to update your repository configuration file and then upgrade to a more recent product version. To start the process, open a terminal window, assume superuser privileges, and enter the commands applicable to the operating system and package manager used for the installation: ## On RHEL or Rocky Linux or AlmaLinux or OL 8 @@ -23,7 +23,7 @@ dnf upgrade edb-as-mongo_fdw yum upgrade edb-repo # Upgrade to the latest version product version -yum upgrade edb-as-mongo_fdw edb-libmongoc-libs` +yum upgrade edb-as-mongo_fdw edb-libmongoc-libs # where is the EDB Postgres Advanced Server version number ``` @@ -56,7 +56,7 @@ apt-get --only-upgrade install edb-as-mongo-fdw edb-libmongoc yum upgrade edb-repo # Upgrade to the latest version product version -yum upgrade edb-as-mongo_fdw edb-libmongoc-at-libs` +yum upgrade edb-as-mongo_fdw edb-libmongoc-at-libs # where: # is the EDB Postgres Advanced Server version number From c761b409b40df554d75c8a94c53b9a17dae6c516 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Tue, 22 Mar 2022 12:55:04 -0400 Subject: [PATCH 11/12] minor changes to nav --- product_docs/docs/mongo_data_adapter/5/index.mdx | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/product_docs/docs/mongo_data_adapter/5/index.mdx b/product_docs/docs/mongo_data_adapter/5/index.mdx index eb762ab7228..ec5d6eaa4c8 100644 --- a/product_docs/docs/mongo_data_adapter/5/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5/index.mdx @@ -2,6 +2,17 @@ title: "MongoDB Foreign Data Wrapper" navigation: - mongo_rel_notes +- 02_requirements_overview +- 03_architecture_overview +- 04_installing_the_mongo_data_adapter +- 07_configuring_the_mongo_data_adapter +- 05_updating_the_mongo_data_adapter +- 06_features_of_mongo_fdw +- 08_example_using_the_mongo_data_adapter +- 08a_example_join_pushdown +- 09_identifying_data_adapter_version +- 10_limitations +- 11_uninstalling_the_mongo_data_adapter --- The MongoDB Foreign Data Wrapper (`mongo_fdw`) is a Postgres extension that allows you to access data that resides on a MongoDB database from EDB Postgres Advanced Server. It is a writable foreign data wrapper that you can use with Postgres functions and utilities, or in conjunction with other data that resides on a Postgres host. From 64028f8bdf20c107ad2194e812786b6a9c1e8ea6 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Tue, 22 Mar 2022 14:12:27 -0400 Subject: [PATCH 12/12] moved limitations up --- product_docs/docs/mongo_data_adapter/5/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/mongo_data_adapter/5/index.mdx b/product_docs/docs/mongo_data_adapter/5/index.mdx index ec5d6eaa4c8..1698a8cd92d 100644 --- a/product_docs/docs/mongo_data_adapter/5/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5/index.mdx @@ -3,6 +3,7 @@ title: "MongoDB Foreign Data Wrapper" navigation: - mongo_rel_notes - 02_requirements_overview +- 10_limitations - 03_architecture_overview - 04_installing_the_mongo_data_adapter - 07_configuring_the_mongo_data_adapter @@ -11,7 +12,6 @@ navigation: - 08_example_using_the_mongo_data_adapter - 08a_example_join_pushdown - 09_identifying_data_adapter_version -- 10_limitations - 11_uninstalling_the_mongo_data_adapter ---