diff --git a/gatsby-config.js b/gatsby-config.js index 6103769d579..41202f2dab1 100644 --- a/gatsby-config.js +++ b/gatsby-config.js @@ -76,6 +76,7 @@ const sourceToPluginConfig = { name: "pg_extensions", path: "advocacy_docs/pg_extensions", }, + "CloudNativePG": { name: "CloudNativePG", path:"advocacy_docs/supported-open-source/cloud_native_pg"}, pgpool: { name: "pgpool", path: "product_docs/docs/pgpool" }, postgis: { name: "postgis", path: "product_docs/docs/postgis" }, repmgr: { name: "repmgr", path: "product_docs/docs/repmgr" }, diff --git a/product_docs/docs/bart/2.6/bart_inst/02_installing_bart.mdx b/product_docs/docs/bart/2.6/bart_inst/02_installing_bart.mdx index dd29e5383aa..855614f0cf1 100644 --- a/product_docs/docs/bart/2.6/bart_inst/02_installing_bart.mdx +++ b/product_docs/docs/bart/2.6/bart_inst/02_installing_bart.mdx @@ -8,7 +8,7 @@ legacyRedirectsGenerated: This section will walk you through performing a fresh installation of BART on a host. Installation instructions are organized into the following platform/installer specific sections: -- [Installing BART on a CentOS/Rocky Linux/AlmaLinux Host](#installing-bart-on-a-centos-host) +- [Installing BART on a CentOS/Rocky Linux/AlmaLinux Host](#installing-bart-on-a-centosrocky-linuxalmalinux-host) - [Installing BART on a RHEL Host](#installing-bart-on-a-rhel-host) - [Installing BART on a CentOS or RHEL Host](#installing-bart-on-a-rhelcentos-7-ppcle-host) - [Installing BART on a Debian or Ubuntu Host](#installing-bart-on-a-debian-or-ubuntu-host) diff --git a/product_docs/docs/bart/2.6/bart_inst/index.mdx b/product_docs/docs/bart/2.6/bart_inst/index.mdx index 372b83ffb2d..ebb872f4af3 100644 --- a/product_docs/docs/bart/2.6/bart_inst/index.mdx +++ b/product_docs/docs/bart/2.6/bart_inst/index.mdx @@ -34,7 +34,7 @@ This guide provides information about how to install and configure the EDB Backu You require the following components to install BART. -- BART Host Components - Use EDB packages to add BART host components; see [Installing BART](02_installing_bart/#installing-bart) for information about how to install these components. +- BART Host Components - Use EDB packages to add BART host components; see [Installing BART](02_installing_bart/) for information about how to install these components. - Additional Components - In addition to the BART host components, the following components are required: diff --git a/product_docs/docs/bart/2.6/bart_migration/04_executing.mdx b/product_docs/docs/bart/2.6/bart_migration/04_executing.mdx index 4986c41991b..c5e6eb77e7f 100644 --- a/product_docs/docs/bart/2.6/bart_migration/04_executing.mdx +++ b/product_docs/docs/bart/2.6/bart_migration/04_executing.mdx @@ -13,9 +13,9 @@ Execution of the migration can be very straight forward. The steps include: 2. Installing the new tool on the backup server. -3. Configuring the configuration file for the new tool. See [How features are configured](#how-features-are-configured) for more information on changing a configuration file for one tool to another tool. +3. Configuring the configuration file for the new tool. See [How features are configured](02_configuring#how-features-are-configured) for more information on changing a configuration file for one tool to another tool. -4. Reconfiguring the scheduling. See [Scheduling](#scheduling) for more information on changing the scheduling from one tool to another tool. +4. Reconfiguring the scheduling. See [Scheduling](06_scheduling) for more information on changing the scheduling from one tool to another tool. 5. Checking the configuration: diff --git a/product_docs/docs/biganimal/release/using_cluster/03_modifying_your_cluster/index.mdx b/product_docs/docs/biganimal/release/using_cluster/03_modifying_your_cluster/index.mdx index 0f6f5a9f373..58ae5bfaa8e 100644 --- a/product_docs/docs/biganimal/release/using_cluster/03_modifying_your_cluster/index.mdx +++ b/product_docs/docs/biganimal/release/using_cluster/03_modifying_your_cluster/index.mdx @@ -53,7 +53,7 @@ You can modify the data groups in your extreme-high-availability cluster by edit 1. Select **Edit** next to the data group. -1. Edit the cluster settings in the **Data Groups** tab. See the table in [Modify your cluster](#modify-your-cluster). +1. Edit the cluster settings in the **Data Groups** tab. See the table in [Modify your cluster configuration settings](#modify-your-clusters-configuration-settings). 1. Select **Save**. diff --git a/product_docs/docs/biganimal/release/using_cluster/04_backup_and_restore.mdx b/product_docs/docs/biganimal/release/using_cluster/04_backup_and_restore.mdx index 42f422b6e5b..ff9d6358cea 100644 --- a/product_docs/docs/biganimal/release/using_cluster/04_backup_and_restore.mdx +++ b/product_docs/docs/biganimal/release/using_cluster/04_backup_and_restore.mdx @@ -60,5 +60,5 @@ You can restore one data group from a PGD cluster into a new PGD cluster. To res 1. Select the data group you want to restore. You can restore only one data group into a new cluster. 1. Select **Fully Restore** or **Point in Time Restore**. A point-in-time restore restores the data group as it was at the specified date and time. 1. In the **Nodes** section, select **Two Data Nodes** or **Three Data Nodes**. For more information on node architecture, see [Extreme high availability](/biganimal/latest/overview/02_high_availability/#extreme-high-availability-preview). -1. Follow Steps 3-5 in [Create an extreme-high-availability cluster](../getting_started/creating_a_cluster/creating_an_eha_cluster). +1. Follow Steps 3-5 in [Creating an extreme-high-availability cluster](../getting_started/creating_a_cluster/creating_an_eha_cluster/). 1. Select **Restore**. diff --git a/product_docs/docs/eprs/7/02_overview/03_replication_server_components_and_architecture/01_physical_components.mdx b/product_docs/docs/eprs/7/02_overview/03_replication_server_components_and_architecture/01_physical_components.mdx index 878ccb00958..7a6a52f0eb8 100644 --- a/product_docs/docs/eprs/7/02_overview/03_replication_server_components_and_architecture/01_physical_components.mdx +++ b/product_docs/docs/eprs/7/02_overview/03_replication_server_components_and_architecture/01_physical_components.mdx @@ -100,7 +100,7 @@ The following table contains a brief description of the parameters in the replic Replication Server creates the content of this file as follows: - The replication configuration file and some of its initial content are created when you install a publication server or subscription server on a host during the Replication Server installation process. -- Parameters `admin_user` and `admin_password` are determined during the Replication Server installation process. See [Installation and uninstallation](../../installing/#installation) for how the content of these parameters are determined. +- Parameters `admin_user` and `admin_password` are determined during the Replication Server installation process. See [Installation and uninstallation](../../installing/) for how the content of these parameters are determined. - Parameters `database`, `user`, `password`, `port`, `host`, and `type` are set with the connection and authentication information of the first publication database definition you create with the Replication Server console or CLI. This database is designated as the controller database (see [Controller database](#controller-database)). See [Adding a publication database](../../05_smr_operation/02_creating_publication/02_adding_pub_database/#adding_pub_database) for creating a publication database definition for a single-master replication system. See [Adding the primary definition node](../../06_mmr_operation/02_creating_publication_mmr/#adding_pdn) for creating the publication database definition for a multi-master replication system. The following is an example of the content of an EPRS Replication Configuration file: diff --git a/product_docs/docs/eprs/7/08_xdb_cli/01_prereq_steps.mdx b/product_docs/docs/eprs/7/08_xdb_cli/01_prereq_steps.mdx index 86f2eab9192..f106d15df5e 100644 --- a/product_docs/docs/eprs/7/08_xdb_cli/01_prereq_steps.mdx +++ b/product_docs/docs/eprs/7/08_xdb_cli/01_prereq_steps.mdx @@ -8,7 +8,7 @@ Perform these installation and setup steps before using the Replication Server C The Replication Server CLI is included if you choose the Replication Server console component when installing Replication Server. The Replication Server CLI is a Java application found in the directory `XDB_HOME/bin`. -1. Follow the installation steps in [Installation and uninstallation](../installing/#installation) to install Replication Server. +1. Follow the installation steps in [Installation and uninstallation](../installing/) to install Replication Server. 1. Follow the prerequisite steps in [Prerequisite steps](../05_smr_operation/01_prerequisites/) for single-master replication systems or [Prerequisite steps](../05_smr_operation/01_prerequisites/#prerequisites) for multi-master replication systems. diff --git a/product_docs/docs/eprs/7/installing/installation_details.mdx b/product_docs/docs/eprs/7/installing/installation_details.mdx index cd3ed968f55..2f1448b2a82 100644 --- a/product_docs/docs/eprs/7/installing/installation_details.mdx +++ b/product_docs/docs/eprs/7/installing/installation_details.mdx @@ -11,7 +11,7 @@ On Windows systems, the publication server and subscription server run as servic ## Linux details -On Linux hosts where you installed Replication Server with the graphical user interface or from the command line, you should now have a publication server daemon and a subscription server daemon running on your computer, assuming you chose to install the publication server and subscription server components. If you installed the Replication Server RPM package, you must start the publication server and the subscription server based on the instructions in [Registering a Publication Server](../../05_smr_operation/02_creating_publication/01_registering_publication_server/#registering_publication_server) for the publication server and [Registering a Subscription Server](../../05_smr_operation/03_creating_subscription/01_registering_subscription_server/#registering_subscription_server) for the subscription server. +On Linux hosts where you installed Replication Server with the graphical user interface or from the command line, you should now have a publication server daemon and a subscription server daemon running on your computer, assuming you chose to install the publication server and subscription server components. If you installed the Replication Server RPM package, you must start the publication server and the subscription server based on the instructions in [Registering a Publication Server](../05_smr_operation/02_creating_publication/01_registering_publication_server/#registering_publication_server) for the publication server and [Registering a Subscription Server](../05_smr_operation/03_creating_subscription/01_registering_subscription_server/#registering_subscription_server) for the subscription server. !!! Note On some Linux systems, you may have to restart the server before you can see the EPRS Replication Console choice in the application menu. If the Replication Console choice is still unavailable in the application menu, it can be started by invoking the script `XDB_HOME/bin/runRepConsole.sh`. diff --git a/product_docs/docs/pem/9/in_app_help.mdx b/product_docs/docs/pem/9/in_app_help.mdx index 91afb4bbf3f..722553f0391 100644 --- a/product_docs/docs/pem/9/in_app_help.mdx +++ b/product_docs/docs/pem/9/in_app_help.mdx @@ -54,6 +54,33 @@ redirects: - /pem/latest/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/10_table_dialog/ - /pem/latest/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/11_trigger_dialog/ - /pem/latest/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/12_unique_constraint_dialog/ +- /pem/latest/pem_online_help/01_toc_pem_getting_started/05_group_dialog/ +- /pem/latest/pem_online_help/01_toc_pem_getting_started/11_connect_error/ +- /pem/latest/pem_online_help/02_toc_pem_agent/04_pem_agent_ha/ +- /pem/latest/pem_online_help/03_toc_pem_client/02_pem_toolbar/ +- /pem/latest/pem_online_help/03_toc_pem_client/04_preferences/ +- /pem/latest/pem_online_help/03_toc_pem_client/05_keyboard_shortcuts/ +- /pem/latest/pem_online_help/03_toc_pem_client/06_search_objects/ +- /pem/latest/pem_online_help/04_toc_pem_features/ +- /pem/latest/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/ +- /pem/latest/pem_online_help/06_toc_pem_bart_management/07_bart_backup_dialog/ +- /pem/latest/pem_online_help/08_toc_pem_developer_tools/ +- /pem/latest/pem_online_help/08_toc_pem_developer_tools/01_debugger/ +- /pem/latest/pem_online_help/08_toc_pem_developer_tools/03_pem_interpreting_graphical_query/ +- /pem/latest/pem_online_help/08_toc_pem_developer_tools/04_editgrid/ +- /pem/latest/pem_online_help/08_toc_pem_developer_tools/04_editgrid/01_viewdata_filter/ +- /pem/latest/pem_online_help/10_pgagent/ +- /pem/latest/pem_online_help/10_pgagent/01_using_pgagent/ +- /pem/latest/pem_online_help/10_pgagent/02_pgagent_install/ +- /pem/latest/pem_online_help/10_pgagent/03_pgagent_jobs/ +- /pem/latest/pem_online_help/10_pgagent/04_pgagent-steps/ +- /pem/latest/pem_online_help/10_pgagent/05_pgagent-schedules/ +- /pem/latest/pem_online_help/11_appendices/ +- /pem/latest/pem_online_help/11_appendices/01_licence/ +- /pem/latest/pem_online_help/11_appendices/02_kerberos/ +- /pem/latest/pem_online_help/11_appendices/03_openssl/ +- /pem/latest/pem_online_help/11_appendices/04_snmp++/ +- /pem/latest/pem_online_help/11_appendices/05_jquery_table_sort/ --- The in-app help provides comprehensive details about the PEM web interface, including the menu items, charts and graphs on the dashboards, and the options on the dialog boxes. The in-app help also includes instructions for creating database objects. diff --git a/product_docs/docs/pem/9/managing_pem_server.mdx b/product_docs/docs/pem/9/managing_pem_server.mdx index b9a275c2f26..2fdef7d9ac2 100644 --- a/product_docs/docs/pem/9/managing_pem_server.mdx +++ b/product_docs/docs/pem/9/managing_pem_server.mdx @@ -27,7 +27,7 @@ Some of the tasks related to managing the PEM server include: ## Starting and stopping the PEM server and agents -The PEM server starts, stops, and restarts when the Postgres server instance on where it resides starts, stops, or restarts. Use the same commands to control the PEM server that you use to control the Postgres server. On Linux platforms, the command that stops and starts the service script varies by platform and OS version. +The PEM server starts, stops, and restarts when the Postgres server instance where it resides starts, stops, or restarts. Use the same commands to control the PEM server that you use to control the Postgres server. On Linux platforms, the command that stops and starts the service script varies by platform and OS version. The PEM agent is controlled by a service named pemagent. @@ -49,7 +49,7 @@ Where `x` indicates the server version number. You can use the service script to control the service. -- To control a service on RHEL or CentOS version 7.x or 8.x, at the command prompt, assume superuser privileges and issue the command: +To control a service on RHEL or CentOS version 7.x or 8.x, at the command prompt, assume superuser privileges and issue the command: ```shell systemctl @@ -148,7 +148,7 @@ PEM provides an interface for managing your Postgres roles and servers. ### Login roles -When you connect to the PEM server, you must provide role credentials that allow access to the database on which the PEM server stores data. By default, the postgres superuser account is used to initially connect to the server, but it is strongly recommended (for both security and auditing purposes) that individual roles are created for each connecting user. You can use the PEM Query tool, the PEM web interface `Create – Login/Group Role` dialog, or a command line client (such as psql) to create a role. +When you connect to the PEM server, you must provide role credentials that allow access to the database on which the PEM server stores data. By default, the postgres superuser account is used to initially connect to the server, but we strongly recommend (for both security and auditing purposes) creating individual roles for each connecting user. You can use the PEM Query tool, the PEM web interface Create – Login/Group Role dialog box, or a command line client (such as psql) to create a role. To use the Create – Login/Group Role dialog box to create a role: @@ -156,7 +156,7 @@ To use the Create – Login/Group Role dialog box to create a role: 1. Right-click the **Login/Group Roles** node. 1. From the context menu, select **Create > Login/Group Role**. 1. Use the tabs of the Create – Login/Group Role dialog box to define the role. -1. After you finish defining the new role, select **Save** to create the role. +1. After you finish defining the new role, to create the role, select **Save**. To modify the properties of an existing login role, right-click the name of a login role in the tree and select **Properties** from the context menu. To delete a login role, right-click the name of the role and select **Delete/Drop** from the context menu. @@ -172,16 +172,16 @@ Use the **Group Roles** node (located beneath the name of each registered server You can use the Login/Group Role dialog box to allow a role with limited privileges to access PEM features such as the Audit Manager, Capacity Manager, or SQL Profiler. PEM predefined roles allow access to PEM functionality. Roles that are assigned membership in these roles can access the associated feature. -When defining a user, use the **Membership** tab to specify the roles in which the new user is a member. The new user shares the privileges associated with each role in which it is a member. For a user to have access to PEM extended functionality, the role must be a member of the pem_user role and the predefined role that grants access to the feature. Use the **Roles** field to select predefined role names from a list. +When defining a user, use the **Membership** tab to specify the roles in which the new user is a member. The new user shares the privileges associated with each role in which it's a member. For a user to have access to PEM extended functionality, the role must be a member of the pem_user role and the predefined role that grants access to the feature. Use the **Roles** field to select predefined role names from a list. The **SQL** tab displays the SQL command that the server executes when you select **Save**. -This example creates a login role named acctg_clerk that has access to the Audit Manager. The role can make unlimited connections to the server at any given time. +This example creates a login role named acctg_clerk that has access to Audit Manager. The role can make unlimited connections to the server at any given time. ![Create Login Role - SQL tab](images/role_sql.png) -You can use PEM predefined roles to allow access to the functionality listed in the table. +You can use PEM predefined roles to allow access to the capabilities listed in the table. | Value | Parent role | Description | | -------------------------------- | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -234,6 +234,14 @@ Object permissions are managed with the graphical object editor for each particu The PEM client also contains a Grant wizard (accessed through the **Tools** menu) that allows you to manage many object permissions at once. +## Server configuration + +You can use the Server Configuration dialog box to modify values of user-configurable parameters that control PEM behavior. To access the Server Configuration dialog box, connect to the PEM server, and select **File > Server Configuration**. + +Enter a parameter name in the search box in the upper-right corner of the dialog to locate a specific parameter in the list. + +To modify a parameter value, edit the content displayed in the **Value** field to the right of a parameter name. To save your changes, select the **Save** icon in the upper-right corner of the dialog box. + ## Server configuration parameters - reference You can use global configuration options to modify aspects of the PEM Server's behavior. The list of configuration parameters is subject to change. diff --git a/product_docs/docs/pem/9/monitoring_performance/dashboards.mdx b/product_docs/docs/pem/9/monitoring_performance/dashboards.mdx index c43da028347..959aef86d95 100644 --- a/product_docs/docs/pem/9/monitoring_performance/dashboards.mdx +++ b/product_docs/docs/pem/9/monitoring_performance/dashboards.mdx @@ -29,7 +29,7 @@ PEM displays performance statistics through a number of dashboards. Each dashboa ## Dashboards overview -The PEM client displays the Global Overview dashboard when it connects to the PEM server. Additional dashboards provide statistical information about monitored objects. +The PEM client displays the Global Overview dashboard when it connects to the PEM server. Additional dashboards provide statistical information about monitored objects. ## Accessing in-app help for UI details @@ -39,15 +39,49 @@ The in-app help provides comprehensive details about the PEM web interface, incl - To access context-sensitive help for a dialog box, select **?**. ### Opening dashboards +Dashboards are presented in a hierarchy comparable to the PEM client tree control. The dashboard for each object in the tree control displays the information for that object as well as for any monitored object that resides below that level in the tree control, if appropriate. + +Each dashboard header displays the date and time that the server was started, if relevant, the date and time that the dashboard was last updated, and the current number of triggered alerts. Navigation menus displayed in the dashboard header provide easy access to other dashboards. Menus are organized hierarchically. Only those menus appropriate for the object currently highlighted in the tree control are available. + +- Select **Global Overview** from any dashboard to return to the Global Overview dashboard. +- Select the name of an agent from the **Agents** menu to navigate to the Operating System Analysis dashboard for that agent. +- Select a server name from the **Servers** menu to navigate to the Server Analysis dashboard for that server. +- Select a database name from the **Databases** menu to navigate to the Database Analysis dashboard for that database. +- Use the **Dashboards** menu to navigate to informational dashboards at the global level or for the selected agent, server, or database. + +Dashboards display statistical information in the form of: + +- Tables that provide statistical information collected by a PEM probe. +- Pie charts that display information collected by the most recent execution of a probe. +- Bar graphs that display comparative statistics collected by the most recent execution of a probe. +- Line graphs that display statistical data collected by PEM probes. You can open a dashboard using either of these techniques: -- Select an active dashboard name from the **Management > Dashboards** menu. -- Right-click the name of a monitored object in the tree and select the name of the dashboard to review from the **Dashboards** menu. +- from the **Management > Dashboards** menu, select an active dashboard name. +- Right-click the name of a monitored object in the tree. From the **Dashboards** menu, select the name of the dashboard to review. Each dashboard is displayed on the **Monitoring** tab in the main panel of the client window. After opening a dashboard, you can navigate to other dashboards in the same tab. -Each dashboard header includes navigation menus that allow you to navigate to other dashboards. Use your browser's forward and back icons to scroll through previously viewed dashboards. Use **Refresh** to update the current dashboard. +Each dashboard header includes navigation menus that allow you to navigate to other dashboards. Use your browser's forward and back buttons to scroll through previously viewed dashboards. Use **Refresh** to update the current dashboard. + +To sort statistics that are provided in table form, select a column heading. Select it again to reverse the sort order. Each table offers a stable sort feature. For example, to sort a table by ascending Session ID in each user name group, sort first by the Session ID column, then sort by the User Name column. + +Hover over the upper-right corner of each graph, chart, or table to reveal the PEM client toolbar icons. Hover over an icon to display a tooltip that briefly explains the icon's functionality: + +- Use the **Refresh** icon to update the information displayed on a dashboard. +- Use the **Save Chart as Image** icon to save the selected chart as a `.jpeg` image. +- Use the **Full Screen** icon to enlarge the chart to reveal granular details about the charted data. +- Use the **Personalize the chart configuration** icon to access a control panel that allows you to select chart-specific display details. +- Hover over the **Explain** icon to review a description of the information shared in the graph or chart. + +In the lower-right corner of each graph or chart is a legend that identifies each item plotted in the graph or chart. + +If it's displayed, select the information icon in the upper-left corner of a chart to display a note about the chart content and, if applicable, a link that allows you to enable one or more probes that retrieve content for the chart. + +## Available dashboards + +PEM offers the following dashboards. For more information on each of the available dashboards, see the PEM client's online help. ### Alerts dashboard @@ -134,9 +168,11 @@ Use the Dashboard Configuration dialog box to control attributes of the charts d Settings specified on the Dashboard Configuration dialog box are applied only to the current user's session. +After you specify your preferences, select **Save**. + ## Managing custom dashboards -PEM displays performance statistics through a number of system-defined dashboards. Each dashboard contains a series of summary views that contain charts, graphs, and tables that display statistics related to the selected object. You can use the **Manage Dashboards** tab to create and manage custom dashboards that display the information that's most relevant to your system. +PEM displays performance statistics on a number of system-defined dashboards. Each dashboard contains a series of summary views that contain charts, graphs, and tables that display statistics related to the selected object. You can use the **Manage Dashboards** tab to create and manage custom dashboards that display the information that's most relevant to your system. To create a custom dashboard, select **Create New Dashboard** located in the Quick Links section of the **Manage Dashboards** tab. diff --git a/product_docs/docs/pem/9/monitoring_performance/log_manager.mdx b/product_docs/docs/pem/9/monitoring_performance/log_manager.mdx index db59c49f8c7..4e171e0f790 100644 --- a/product_docs/docs/pem/9/monitoring_performance/log_manager.mdx +++ b/product_docs/docs/pem/9/monitoring_performance/log_manager.mdx @@ -18,11 +18,19 @@ You can use the PEM Log Manager to simplify server log configuration for Postgre - The format of log file entries - Log rotation properties +Before using Log Manager to define logging properties for a server, you must specify the name of the associated Advanced Server or PostgreSQL database server in the **Service ID** field on the **Advanced** tab of the New Server Registration or Properties dialog box. The server is available for configuration on the Server Selection dialog box only if you specify the name of the service in the **Service ID** field. + +For example, suppose you're setting logging preferences for an Advanced Server 9.4 instance that resides on a Linux host. Set the **Service ID** field on the **Advanced** tab of the **Properties** dialog box for the monitored server to `ppas-9.4`. + +!!! Note + - Log Manager depends on Settings and Server Log Configuration probes to populate all the fields in the wizard. Therefore, ensure that those probes for selected servers are enabled. In addition, set the execution frequency for those probes to a minimum to ensure that Log Manager reflects the latest log configurations. + - Rerun Log Manager if you make any manual changes related to logging in the configuration files, such as `postgresql.conf`. PEM doesn't reflect those changes automatically. + To configure logging for a Postgres instance, you must register the server as a PEM-managed server, and the registration information must include the name of a service script. 1. To open Log Manager, in the PEM client, select **Management > Log Manager**. The wizard opens and welcome message appears. Select **Next**. -1. The Server Selection dialog box displays a list of the server connections monitored by PEM. Select the check boxes next to the names of servers to which you want the Log Manager wizard to apply the specified configuration. Log Manager is disabled for any server displaying a red exclamation mark to the left of its name in the Server selection tree. A server might not be enabled for several reasons: +2. The Server Selection dialog box displays a list of the server connections monitored by PEM. Select the check boxes next to the names of servers to which you want the Log Manager wizard to apply the specified configuration. Log Manager is disabled for any server displaying a red exclamation mark to the left of its name in the Server selection tree. A server might not be enabled for several reasons: - Log Manager can configure only a server that specifies a service ID on the **Advanced** tab of the Properties dialog box. To provide a service ID: 1. In the tree, right-click the server name and select **Disconnect Server** from the context menu. @@ -36,7 +44,7 @@ To configure logging for a Postgres instance, you must register the server as a Select **Next**. -1. Use the Log configuration dialog box to specify how often to import log files to PEM and to specify log rotation details. +3. Use the Log configuration dialog box to specify how often to import log files to PEM and to specify log rotation details. Options in the **Import Logs** box specify how often to import the log files to PEM: @@ -55,7 +63,7 @@ To configure logging for a Postgres instance, you must register the server as a Select **Next**. -1. Use the Where to Log dialog box to specify where to write log files: +4. Use the Where to Log dialog box to specify where to write log files: - Select an option from the **Log Destination** box to specify a destination for the server log output: - Set the **stderr** switch to **Yes** to write log files to stderr. @@ -87,16 +95,18 @@ To configure logging for a Postgres instance, you must register the server as a Select **Next**. -1. Use the When to Log dialog box to specify the events that initiate a log file entry. The severity levels in order of severity, from most severe to least severe, are: - - - **panic** — Errors that cause all database sessions to abort. - - **fatal** — Errors that cause a session to abort. - - **log** — Information messages of interest to administrators. - - **error** — Errors that cause a command to abort. - - **warning** — Error conditions in which a command completes but might not perform as expected. - - **notice** — Items of interest to users. This is the default. - - **info** — Information implicitly requested by the user. - - **debug5** through **debug1** — Detailed debugging information useful to developers. +5. Use the When to Log dialog box to specify the events that initiate a log file entry. + - The severity levels in order of severity, from most severe to least severe, are: + + - **panic** — Errors that cause all database sessions to abort. + - **fatal** — Errors that cause a session to abort. + - **log** — Information messages of interest to administrators. + - **error** — Errors that cause a command to abort. + - **warning** — Error conditions in which a command completes but might not perform as expected. + - **notice** — Items of interest to users. This is the default. + - **info** — Information implicitly requested by the user. + - **debug5** through **debug1** — Detailed debugging information useful to developers. + - Use the **Client min messages** list to specify the lowest severity level of message sent to the client application. - Use the **Log min messages** list to specify the lowest severity level to write to the server log. - By default, when an error message is written to the server log, the text of the SQL statement that initiated the log entry isn't included. Use the **Log min error statement** list to specify a severity level that triggers SQL statement logging. If a message is of the specified severity or higher, the SQL statement that produced the message is written to the server log. @@ -106,9 +116,9 @@ To configure logging for a Postgres instance, you must register the server as a Select **Next**. -1. Use the What to Log dialog box to specify log entry options that are useful for debugging and auditing. +6. Use the What to Log dialog box to specify log entry options that are useful for debugging and auditing. - The switches in the **Debug options** box instruct the server to include information in the log files related to query execution that might be of interest to a developer: + Use the switches in the **Debug options** box to include information in the log files related to query execution that might be of interest to a developer: - Set the **Parse tree** switch to **Yes** to include the parse tree in the log file. - Set the **Rewriter output** switch to **Yes** to include query rewriter output in the log file. @@ -142,14 +152,14 @@ To configure logging for a Postgres instance, you must register the server as a Select **Next**. -1. Use the Schedule Logging Changes dialog box to specify when logging applies configuration changes: +7. Use the Schedule Logging Changes dialog box to specify when logging applies configuration changes: - Set the **Configure logging now** switch to **Yes** to enable your configuration preferences. The server restarts when you complete the Log Manager wizard. - Set **Configure logging now** to **No** to use the **Schedule it for some other time** calendar selector to specify a convenient time to apply logging configuration preferences and for the server to restart. When you apply the configuration changes specified by the Log Manager wizard, the server restarts, temporarily interrupting use of the database server for users. -1. Select **Finish** to exit the wizard. Either restart the server or schedule the server restart for the time specified on the scheduling dialog box. +8. Select **Finish** to exit the wizard. Either restart the server or schedule the server restart for the time specified on the scheduling dialog box. ## Reviewing the Server Log Analysis dashboard @@ -181,18 +191,13 @@ Before using the PEM Log Analysis Expert, you must specify a **Service ID** valu 1. To open the Postgres Log Analysis Expert wizard, in the PEM client, select **Management > Postgres Log Analysis Expert**. In the wizard's Welcome screen, select **Next**. -1. Select the check box next to an analyzer to specify for the Log Analysis Expert to prepare the corresponding table, chart, or graph. After making your selections, select **Next** to continue to the Server selection tree. + The wizard's Analyzer selection dialog box displays a list of analyzers from which you can select. Each analyzer generates a corresponding table, chart, or graph that contains information collected from the log files. -1. Use the tree to specify the servers you want the Postgres Log Analysis Expert to analyze. If you select multiple servers, the resulting report contains the corresponding result set for each server in a separate but continuous list. Select **Next** to continue to the Report options dialog box. +2. Select the check box next to an analyzer to specify for the Log Analysis Expert to prepare the corresponding table, chart, or graph. After making your selections, select **Next**. -1. Use the **Time Intervals** section to specify the time range for the Log Analysis Expert to analyze: - - - Set **Relative days** to **Yes** to enable the **(+/-) From date** field and specify the number of days before or after the date and time selected in the **From** field. - - Use the **From** field to specify the starting date and time for the analysis. - - Use the **To** field to specify the ending date and time for the analysis. - - Use the **(+/-) From date** selector to specify the number of days before or after the **From** date to include in the analysis. +3. Use the Server selection tree to specify the servers you want the Postgres Log Analysis Expert to analyze. If you select multiple servers, the resulting report contains the corresponding result set for each server in a separate but continuous list. Select **Next** to continue to the Report options dialog box. - Use the **Options** section to specify the analysis method and the maximum length of any resulting tables: +4. Use the **Options** section to specify the analysis method and the maximum length of any resulting tables: - Use the **Aggregate method** list to select the method used by the Log Analysis Expert to consolidate data for the selected time span: - **SUM** calculates a value that is the sum of the collected values for the specified time span. @@ -202,16 +207,24 @@ Before using the PEM Log Analysis Expert, you must specify a **Service ID** valu - Use the **Time span** field to specify the number of minutes that the analyzer incorporates into each calculation for a point on a graph. For example, if the time span is five minutes, and the aggregate method is **AVG**, each point on the given graph contains the average value of the activity that occurred in a five-minute time span. - Use the **Rows limit** field to specify the maximum number of rows to include in a table. + +5. Use the **Time Intervals** section to specify the time range for the Log Analysis Expert to analyze: + + - Set **Relative days** to **Yes** to enable the **(+/-) From date** field and specify the number of days before or after the date and time selected in the **From** field. + - Use the **From** field to specify the starting date and time for the analysis. + - Use the **To** field to specify the ending date and time for the analysis. + - Use the **(+/-) From date** selector to specify the number of days before or after the **From** date to include in the analysis. + After you specify the report options, select **Next** to continue to the Report destination dialog box. -1. You can select the default option and select **Finish** to view the Log Analysis Expert report in the PEM client's tabbed browser. Or select **Download the report** to save a copy of the report to an HTML file for later use. +6. You can select the default option and select **Finish** to view the Log Analysis Expert report in the PEM client's tabbed browser. Or select **Download the report** to save a copy of the report to an HTML file for later use. ## Reviewing the Postgres Log Analysis Expert report If you choose to review the report immediately, the Postgres Log Analysis Expert report is displayed in the PEM Client window. The report header displays: - The date and time that the report was generated - The time period that the report spans -- The aggregation method specified when defining the report. +- The aggregation method specified when defining the report The name of the server for which information is displayed appears at the start of each section of the report. @@ -220,3 +233,203 @@ The report displays the tables, graphs, and charts selected in the Log Analysis ![The Postgres Log Analysis Expert Report](../images/pem_log_analysis_expert_report.png) If the report contains an analysis of more than one monitored server, charts and tables are displayed in sets. First the graphs, tables, and charts that display statistics for one server appear. Then the graphics for the next server in the report appear. + +### Summary Statistics table + +The Summary Statistics table displays a summary of server activity for the selected server. + +| Row name | Description | +|-----------------------------|--------------| +| Number of unique queries | Count of unique queries made against the selected server in the specified time period. | +| Total queries | Count of queries made against the selected server in the specified time period. | +| Total queries duration | Amount of time used to execute queries against the server. | +| First query | Time, within the specified time period, that the first query executed against the server. | +| Last query | Time, within the specified time period, that the last query executed against the server. | +| Queries peak time | Point in time, within the specified time period, that query activity reached its highest level. | +| Number of events | Count of log events within the specified time period. | +| Number of unique events | Count of unique server events. | +| Total number of sessions | Count of the number of sessions recorded in the time period. | +| Total duration of sessions | Amount of time that sessions were connected during the specified time period. | +| Average sessions duration | Average length of each session. | +| Total number of connections | Number of user connections made to the server. | +| Total number of databases | Number of databases on the selected server. | + +### Hourly Statistics table + +The Hourly DML Statistics table displays the statistics related to the use of various DML commands (`SELECT`, `INSERT`, `UPDATE`, `DELETE`, `COPY`, and `FETCH`) within a one-hour period. To generate values in the Min Duration(sec), Max Duration(sec), and Avg Duration(sec) columns of this table, you must specify a value greater than or equal to 0 in the `log_min_duration_statement` configuration parameter. You can set the parameter either by modifying the `postgresql.conf` file with your editor of choice or by specifying a value of `0` or greater in the **Log Min Duration Statement** field of the Log Manager wizard. + +| Column name | Description | +|--------------------|--------------| +| Time | Start of the one-hour period for which data was analyzed. | +| Database | Name of the database in which the specified DML command executed. | +| Command Type | DML command type. | +| Total Count | Number of times that a command of the specified command type executed during the one-hour period analyzed by the report. | +| Min Duration(sec) | Shortest amount of time used by the server to respond to the specified command type, in seconds. | +| Max Duration(sec) | Longest amount of time used by the server to respond to the specified command type, in seconds. | +| Avg Duration(sec) | Average length of time used by the server when responding to the specified command type, in seconds. | + +### DML Statistics Timeline section + +The DML Statistics Timeline section of the Log Analysis Expert report displays information about DML statement usage. + +| Graphic | Description | +|------------|---------------| +| Line graph | Analysis of statement usage during the selected time period. Hover over a specific point to view detailed information about that point on the graph. | +| Pie chart | Percent of statement usage of each respective DML statement type during the selected time period. | + +### DDL Statistics Timeline section + +The DDL Statistics Timeline section of the Log Analysis Expert report displays information about DDL statement usage. + +| Graphic | Description | +|------------|---------------| +| Line graph | Analysis of statement usage during the selected time period. Hover over a specific point to view detailed information about that point on the graph. | +| Pie chart | Percent of statement use of each respective DDL statement type during the selected time period. | + +### Commit and Rollback Statistics Timeline section + +The Commit and Rollback Statistics Timeline section of the Log Analysis Expert report displays information about the `COMMIT`, `ROLLBACK`, and `SAVEPOINT` statements logged during the specified time period. + +| Graphic | Description | +|-------------|---------------| +| Line graph | Analysis of the commit and rollback activity during the specified time period. Hover over a specific point to view detailed information about that point on the graph. | +| Pie chart | Comparative percent of `COMMIT`, `SAVEPOINT`, or `ROLLBACK` statements executed during the specified time period. | + +### Checkpoint Statistics Timeline section + +The Checkpoint Statistics Timeline section of the Log Analysis Expert report displays information about the checkpoint operations logged during the specified time period. + +| Graphic | Description | +|-------------|---------------| +| Line graph | Analysis of the checkpoint operation activity during the specified time period. Hover over a specific point to view detailed information about that point on the graph. | +| Pie chart | Comparative percent of different types of checkpoint activity logged during the specified time period. | + +### Log Event Statistics table + +The Log Event Statistics table lists log entries with a severity level of WARNING, ERROR, FATAL, PANIC, HINT, or CONTEXT. The level of logging detail for error messages is controlled by the `log_min_error_statement` parameter. You can set the parameter either by modifying the `postgresql.conf` file with your editor of choice or by specifying a value in the **Log Min Error Statement** field of the Log Manager wizard. + +| Column name | Description | +|-----------------|---------------| +| Error Severity | Severity level of the log entry. | +| Message | The log message. | +| Total Count | Number of times that the log entry occurred. | + +### Log Statistics table + +The Log Statistics table lists log entries that indicate an operational severity level of LOG, DETAIL, DEBUG, NOTICE, INFO, or STATEMENT. The level of logging detail for informational messages is controlled by the `log_min_messages` parameter. You can set the parameter either by modifying the `postgresql.conf` file with your editor of choice or by specifying a value in the **Log Min Messages** field of the Log Manager wizard. + +| Column name | Description | +|-----------------|--------------| +| Error Severity | Severity level of the log entry. | +| Total Count | Number of times that the log entry occurred. | + +### Temp Generated Queries table + +The Temp Generated Queries table displays a list of queries that created temporary files. + +| Column name | Description | +|-----------------------|--------------| +| Log Time | The time that the log entry was generated. | +| TempFile Size(Bytes) | The size of the temporary file in bytes. | +| Query | The text of the query that created the temporary file. | + +### Temp File Statistics Timeline graph + +The Temp File Statistics Timeline graph displays the size of temporary files over the specified time period. Hover over a specific point to view detailed information about that point on the graph. + +### Lock Statistics Timeline section + +The Lock Statistics Timeline section of the Log Analysis Expert report displays information about the locks held during the specified time period: + +| Graphic | Description | +|-------------|-----------------| +| Graph | Number of locks held at any given point during the time period. Hover over a specific point to view detailed information about that point on the graph. | +| Pie chart | Displays the relative percentage of each type of lock used during the selected time period. | + +### Waiting Statistics Timeline section + +The Waiting Statistics Timeline section of the Log Analysis Expert report displays information about DML statements that are waiting for a lock during the specified time period. + +| Graphic | Description | +|-------------|---------------| +| Graph | Number of DML statements that are waiting at any given point during the time period. Each colored line represents a statement type. Hover over a specific point to view detailed information about that point on the graph. | +| Pie chart | Relative percentage of each type of DML statement that waited for a lock during the selected time period. | + +### Idle Statistics Timeline section + +The Idle Statistics Timeline section of the Log Analysis Expert report displays information about the amount of time that a connection to the server is idle. An `IDLE` server is waiting for a connection from a client. A connection that is `IDLE in transaction` has started a transaction but hasn't yet committed or rolled back the transaction and is waiting for a command from the client. A session that is `IDLE in transaction (aborted)` started a transaction but hasn't yet committed or rolled back the transaction and is waiting for a command from the client. An error has occurred in the transaction, and the transaction can only be rolled back. + +| Graphic | Description | +|-------------|--------------| +| Graph | Times at which the server is `IDLE`, `IDLE in transaction`, and `IDLE in transaction (aborted)`. Hover over a specific point to view detailed information about that point on the graph. | +| Pie chart | Relative percentage of each type of lock used during the selected time period. | + +### Autovacuum Statistics table + +The Autovacuum Statistics table displays statistics about autovacuum activity on monitored servers. + +| Column name | Description | +|---------------|--------------| +| Log Time | Time that the autovacuum activity was written to the log. | +| Relation | Name of the table on which the autovacuum was performed. | +| Index Details | Number of index scans that were performed. | +| Page Details | Number of pages that were removed and number of pages that remain. | +| Tuple Details | Number of tuples that were removed and number of tuples that remain. | +| Buffer Usage | Number of buffers hit, missed, or dirty. | +| Read Rate | Average read rate in MBs per second. | +| System Usage | Percent of CPU time used performing autovacuum activities. | + +### Autoanalyze Statistics table + +The Autoanalyze Statistics table displays logged autoanalyze activity. + +| Column name | Description | +|---------------|-------------------| +| Log Time | Time that the autoanalyze activity was written to the log. | +| Relation | Name of the table on which the autoanalyze was performed. | +| System Usage | Percent of CPU time used performing autoanalyze activities. | + +### Slow Query Statistics table + +The Slow Query Statistics table displays the slowest queries executed on monitored servers. The table includes the number of entries specified in the Rows Limit field of the Log Analysis Expert. + +| Column name | Description | +|-----------------|--------------| +| Log Time | Time that the query activity was written to the log. | +| Tag | Command type. | +| Query | Text of the performed query. | +| Parameters | Parameters if the query is a parameterized query. | +| Duration | Length of time that it took the server to execute the query. | +| Host | Name of the host on which the query executed. | +| Database | Name of the database on which the query executed. | + +### Frequently Executed Query Statistics table + +The Frequently Executed Query Statistics table displays the most frequently executed query statements. The table includes the number of entries specified in the **Rows Limit** field of the Log Analysis Expert. + +| Column name | Description | +|------------------------|--------------| +| Query | Text of the performed query. | +| Parameters | Parameters if the query is a parameterized query. | +| No. of Times Executed | Number of times that the query executed. | +| Total Duration | Length of time that it took the server to execute the query. | + +### Most Time Executed Query Statistics table + +The Most Time Executed Query Statistics table displays the queries that took the most execution time on the server. The table includes the number of entries specified in the **Rows Limit** field of the Log Analysis Expert. + +| Column name | Description | +|------------------------|--------------| +| Query | Text of the performed query. | +| Parameters | Parameters if the query is a parameterized query. | +| No. of Times Executed | Number of times that the query executed. | +| Total Duration | Length of time that it took the server to execute the query. | + +### Connections Overview Timeline section + +The Connections Overview Timeline section of the Log Analysis Expert report displays information about successful and unsuccessful connection attempts during the specified time period. + +| Graphic | Description | +|------------------|--------------| +| Timestamp graph | Number of server connections attempted and connections authenticated at any given point during the specified time period. Hover over a specific point to view detailed information about that point on the graph. | +| Summary pie chart | The relative percentage of connections attempted and connections authenticated during the specified time period. | diff --git a/product_docs/docs/pem/9/monitoring_performance/pem_remote_monitoring.mdx b/product_docs/docs/pem/9/monitoring_performance/pem_remote_monitoring.mdx index 83c91c51d10..dfeae973dad 100644 --- a/product_docs/docs/pem/9/monitoring_performance/pem_remote_monitoring.mdx +++ b/product_docs/docs/pem/9/monitoring_performance/pem_remote_monitoring.mdx @@ -25,7 +25,7 @@ PEM remote monitoring supports: | [Manage dashboards](dashboards/) | Limited | Some dashboards might not be able to show complete data. For example, the operating system information where the Postgres cluster is running isn't displayed as it isn't available. | | [Manage probes](probes/) | Limited | Some of the PEM probes don't return information, and some of the functionality might be affected. For details about probe functionality, see [PEM agent privileges](../managing_pem_agent/#agent-privileges). | | [Postgres Expert](../tuning_performance/postgres_expert/) | Limited | The Postgres Expert provides partial information as operating system information isn't available. | -| [Scheduled tasks](/pem/latest/pem_web_interface/#the-management-menu) | Limited | Scheduled tasks work only for Postgres clusters, and scripts run on a remote agent. | +| [Scheduled tasks](../pem_web_interface/#management-menu) | Limited | Scheduled tasks work only for Postgres clusters, and scripts run on a remote agent. | | [Core usage reports](../reports/#core-usage-report) | Limited | The Core Usage reports don't show complete information. For example, the platform, number of cores, and total RAM aren't displayed. | | [Audit manager](audit_manager/) | No | | | [Log manager](log_manager/) | No | | diff --git a/product_docs/docs/pem/9/pem_web_interface.mdx b/product_docs/docs/pem/9/pem_web_interface.mdx index bc2cb9215e7..6065d07937c 100644 --- a/product_docs/docs/pem/9/pem_web_interface.mdx +++ b/product_docs/docs/pem/9/pem_web_interface.mdx @@ -38,7 +38,7 @@ After the server installation completes, you can open the PEM interface in your Use the fields on the Postgres Enterprise Manager Login window to authenticate yourself with the PEM server: -- Provide the name of a `pem` database user in the **Username** field. For the first user connecting, this is the name provided when installing the PEM server. +- Provide the name of a pem database user in the **Username** field. For the first user connecting, this is the name provided when installing the PEM server. - Provide the password associated with the user in the **Password** field. @@ -66,11 +66,182 @@ The in-app help provides comprehensive details about the PEM web interface, incl - To access the landing page for the help, select **Help > Online help**. - To access context-sensitive help for a dialog box, select **?**. -## The PEM menu bar +## PEM client object browser + +The Browser tree control provides access to information and management options for the database objects that reside on each server. The tree control expands to display a hierarchical view of the servers and objects that are monitored by the PEM server. You can use context menu options, accessed by right-clicking nodes of the tree control, to create new objects and modify and delete existing objects if your role holds the required privileges. + +Expand nodes in the tree control to display a hierarchical view of the database objects that reside on a selected server: + +- Use the plus sign (+) to the left of a node to expand a segment of the tree control. +- Use the minus sign (-) to the left of a node to close that node. + +Context menu options can include one or more of the following selections: + +| Option | Action | +| ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Add named restore point** | Create and enter the name of a restore point. | +| **Backup** | Open the [Backup](#tools-menu) dialog box to back up database objects. | +| **Backup Globals** | Open the [Backup Globals](#tools-menu) dialog box to back up cluster objects. | +| **Backup Server** | Open the [Backup Server](#tools-menu) dialog box to back up a server. | +| **Connect Server** | Establish a connection with the selected server. | +| **Create** | Access a context menu that provides context-sensitive selections. Your selection opens a Create dialog box for creating a new object. | +| **CREATE Script** | Open the [Query tool](/pem/latest/pem_query_tool/) to edit or view the CREATE script. | +| **Dashboards** | Select for quick access to PEM dashboards. | +| **Delete/Drop** | Delete the currently selected object from the server. | +| **Disconnect Database** | Terminate a database connection. | +| **Disconnect Server** | Refresh the currently selected object. | +| **Drop Cascade** | Delete the currently selected object and all dependent objects from the server. | +| **Debugging** | Access the Debugger tool. | +| **Grant Wizard** | Access the [Grant Wizard](#tools-menu) tool. | +| **Maintenance** | Open the [Maintenance](#management-menu) dialog box to VACUUM, ANALYZE, REINDEX, or CLUSTER. | +| **Management** | Access management tasks that are relevant to the node. | +| **Properties** | Review or modify the currently selected object's properties. | +| **Refresh** | Refresh the currently selected object. | +| **Reload Configuration** | Update configuration files without restarting the server. | +| **Restore** | Access the [Restore](#tools-menu) dialog box to restore database files from a backup. | +| **View Data** | Use the **View Data** option to access the data stored in a selected table with the **Data Output** tab of the Query tool. | + +The context-sensitive menus associated with Tables and nested Table nodes provides additional display options. + +| Option | Action | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Import/Export** | Open the Import/Export dialog box to import data to or export data from the selected table. | +| **Reset Statistics** | Reset statistics for the selected table. | +| **Scripts** | Open the [Query tool](/pem/latest/pem_query_tool/) to edit or view the selected script from the flyout menu. | +| **Truncate** | Remove all rows from a table. | +| **Truncate Cascade** | Remove all rows from a table and its child tables. | +| **View First 100 Rows** | Access the data grid that displays the first 100 rows of the selected table. | +| **View Last 100 Rows** | Access the data grid that displays the last 100 rows of the selected table. | +| **View All Rows** | Access the data grid that displays all rows of the selected table. | +| **View Filtered Rows** | Access the **Data Filter** popup to apply a filter to a set of data. | + +## PEM tabbed browser window + +The main panel of the PEM client contains a collection of tabs that display information about the object currently selected in the tree control. + +The **Dashboard** tab is context sensitive. When you navigate to the **Dashboard** tab from a server group or the PEM Agents node, the EDB Postgres Welcome window opens, where you can: + +- Select the **Add New Server** icon to open the [Create - Server dialog box](/pem/latest/registering_database_server/#manually-registering-a-database-server) to define a connection to a server. +- Select the **Configure PEM** icon to open the [Server Configuration dialog box](/pem/latest/managing_pem_server/#server-configuration) and modify server parameters. +- Select the **Getting Started** icon to open a new tab, displaying the PEM Getting Started section at the EnterpriseDB website. +- Select the **EDB Website** icon to navigate to the home page of the EnterpriseDB website. The EnterpriseDB website features news about upcoming events and other projects. +- Select the **PostgreSQL Website** icon to navigate to the PostgreSQL project website. The PostgreSQL site features news about recent releases and other project information. +- Select the **EDB Blogs** icon to navigate to the EDB Blog page, where you can review the most-recent employee posts to Postgres related blogs. + +Select the name of an agent or server and navigate to the **Dashboard** tab to review session or server activity for the currently selected object. + +When opened from the name of an agent or server, the **Dashboard** tab provides a graphical analysis of usage statistics: + +- The Server sessions or Database sessions graph displays the interactions with the server or database. +- The Transactions per second graph displays the commits, rollbacks, and total transactions per second that are taking place on the server or database. +- The Tuples In graph displays the number of tuples inserted, updated, and deleted on the server or database. +- The Tuples Out graph displays the number of tuples fetched and returned from the server or database. +- The Block I/O graph displays the number of blocks read from the file system or fetched from the buffer cache (but not the operating system's file system cache) for the server or database. +- The Server activity tabbed panel displays tables that contain session information, session locks, prepared transactions, and configuration. + +Navigate to the **Properties** tab to review the properties of the item currently selected in the tree control. + +The **SQL** tab displays the SQL code used to generate the object currently selected in the Browser tree control. + +The **Statistics** tab displays the statistics gathered for each object on the tree control. The statistics displayed in the table vary by the type of object that's highlighted. Select a column heading to sort the table by the data displayed in the column. Select it again to reverse the sort order. The following table lists some of the statistics that might be displayed. + +| Panel | Description | +| ------------------------- | ---------------------------------------------------------------------------------------------------------- | +| PID | The process ID associated with the row. | +| User | The name of the user that owns the object. | +| Database | The database name. | +| Backends | The number of current connections to the database. | +| Backend start | The start time of the backend process. | +| Xact Committed | The number of transactions committed to the database in the last week. | +| Xact Rolled Back | The number of transactions rolled back in the last week. | +| Blocks Read | The number of blocks read from memory in the last week, in MB. | +| Blocks Hit | The number of blocks hit in the cache in the last week, in MB. | +| Tuples Returned | The number of tuples returned in the last week. | +| Tuples Fetched | The number of tuples fetched in the last week. | +| Tuples Inserted | The number of tuples inserted into the database in the last week. | +| Tuples Updated | The number of tuples updated in the database in the last week. | +| Tuples Deleted | The number of tuples deleted from the database in the last week. | +| Last statistics reset | The time of the last statistics reset for the database. | +| Tablespace conflicts | The number of queries canceled because of recovery conflict with dropped tablespaces in database. | +| Lock conflicts | The number of queries canceled because of recovery conflict with locks in database. | +| Snapshot conflicts | The number of queries canceled because of recovery conflict with old snapshots in database. | +| Bufferpin conflicts | The number of queries canceled because of recovery conflict with pinned buffers in database. | +| Temporary files | The total number of temporary files, including those used by the statistics collector. | +| Size of temporary files | The size of the temporary files. | +| Deadlocks | The number of queries canceled because of a recovery conflict with deadlocks in database. | +| Block read time | The number of milliseconds required to read the blocks read. | +| Block write time | The number of milliseconds required to write the blocks read. | +| Size | The size of the selected database, in MB. | + +The **Dependencies** tab displays the objects on which the currently selected object depends. To ensure the integrity of the database structure, the server makes sure that you don't accidentally drop objects that other objects depend on. You must use `DROP CASCADE` to remove an object on which another object depends. + +The **Dependencies** table displays: + +- The **Type** field, which specifies the parent object type. +- The **Name** field, which specifies the identifying name of the parent object. +- The **Restriction** field, which describes the dependency relationship between the currently selected object and the parent. + +The **Dependents** tab displays a table of objects that depend on the object currently selected in the Browser tree. A dependent object can be dropped without affecting the object currently selected in the Browser tree control. + +- The **Type** field specifies the dependent object type. +- The **Name** field specifies the identifying name for the dependent object. +- The **Restriction** field describes the dependency relationship between the currently selected object and the parent. + +Navigate to the **Monitoring** tab to access information presented on [PEM dashboards](/pem/latest/monitoring_performance/dashboards/). Dashboards display statistical information about the objects monitored by the PEM server. + +PEM opens additional tabs when you access PEM functionality through the Management or Tools dialog boxes. Right-click the current tab and select from a context menu that allows you to customize the display for your working style: + +- Select **Remove Panel** to remove the currently selected panel. +- Select **Rename Panel** to rename the currently selected panel. +- Select **Detach Panel** to detach the currently selected panel, repositioning it for convenience. +- Select **Add Panel** and select any of the available options to add to the panels. + +The PEM client preserves any adjustments when you exit the program. To reset the PEM client to its original format, select **File > Reset Layout**. + +## Using chart, graph, and table controls + +Use the icons in the upper-right corner of each graphic on a PEM client dashboard to control, download, and customize the charts, graphs, and tables displayed in the PEM client. + +Use the **Refresh** icon ![refresh](../images/lgrefresh.png) to display the most recent content available from the PEM probes. + +Select the **Download** icon ![download](../images/lgdownload.png) to download a .jpeg or .png image of the chart or graph. By default, the file is in .jpeg format. To save the file as a .png, use the **Personalize** icon to modify the download format. + +Select the **Fullscreen** icon ![fullscreen](../images/lgfullscreen.png) to expand the chart or graph to fill the main pane of the PEM client. + +Select the **Personalize** ![personal](../images/lgpersonalize.png) icon to modify the display properties of the chart or graph for your session only. + +Use the **Information** ![info](../images/lginformation.png) icon to access information about the chart or graph. + +### Personalizing a graphic + +When you select the **Personalize** icon, the Personalize chart configuration dialog box opens. + +Use controls on the Personalize chart configuration dialog box to modify the properties of the graphic: + +- Use the **Auto Refresh** control to increase or decrease the number of seconds between refreshes. +- Use the **Auto Refresh** field to specify the number of seconds between updates of the data displayed in the table or chart. +- If applicable, use the **Download as** field to indicate if you want to download a chart as a .jpeg image or a .png image. +- If applicable, use the **Colors** selectors to specify the display colors to use on a chart. +- If applicable, set the **Show Acknowledged Alerts** switch to **Yes** to indicate that you want the table to display alerts that you have acknowledged with a check box in the **Ack'ed** column. Set the field to **No** to indicate for the table to hide any acknowledged alerts. Acknowledged alerts aren't purged from the table content until the time specified in the alert definition passes. + +After personalizing the display properties, use the controls in the upper-right hand corner to apply your changes: + +- Use the **Delete** icon to reset the properties of the graphic to their default settings. Use the menu to specify whether to apply the change to only this instance of the graphic or to the same graphic when displayed on other dashboards. +- Use the **Save** icon to save your changes to the properties for the graphic. Use the menu to specify to apply the change to only this instance of the graphic or to the same graphic when displayed on other dashboards. + +## Browser toolbar + +The browser toolbar provides shortcuts for frequently used features, like View Data and the Query tool. This toolbar is visible on the Browser panel. Buttons get enabled/disabled based on the selected browser node. + +- Use the **Query tool** button to open the Query tool in the current database context. +- Use the **View Data** button to view/edit the data stored in a selected table. +- Use the **Filtered Rows** button to access the Data Filter popup to apply a filter to a set of data for viewing/editing. + +## PEM menu bar The PEM menu bar provides access to commands and features that you can use to manage your database servers and the objects that reside on those servers. If an option is disabled: -- The database server to which you are currently connected might not support the selected feature. +- The database server to which you're currently connected might not support the selected feature. - The selected menu option might not apply to the current object. @@ -78,7 +249,7 @@ The PEM menu bar provides access to commands and features that you can use to ma Context-sensitive menus across the top of the PEM web interface allow you to customize your environment and provide access to the enterprise management features of PEM. -### The File menu +### File menu Use the **File** menu to access the following options. @@ -87,119 +258,119 @@ Use the **File** menu to access the following options. | Preferences | Open the Preferences dialog box to customize your PEM client settings. | | Lock Layout | Open a submenu to select the level for locking the UI layout. | | Server Configuration | Open the Server Configuration dialog box and update your PEM server configuration settings. | -| Reset Layout | If a workspace panel is popped out by mistake or intentionally, you can reset it to the default using **Reset Layout**. | +| Reset Layout | If a workspace panel is popped out, you can reset it to the default using **Reset Layout**. | -### The Object menu +### Object menu The **Object** menu is context sensitive. Use the **Object** menu to access the following options. | Option | Action | | ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------| -| Create | Access a menu that provides context-sensitive selections. | -| Refresh | Refresh the currently selected object. | -| Delete/Drop | Delete the currently selected object from the server. | -| Connect Server | Open the Connect to Server dialog box to establish a connection with a server. | -| CREATE Script | Open the Query tool to edit or view the selected script. | -| Disconnect Server | Refresh the currently selected object. | -| BART | Access a menu that provides options for removing BART configuration, taking a BART backup, or revalidating the BART configuration. | -| Clear Saved Password | If you saved the database server password, clear the saved password. Enabled only after password is saved. | -| Clear SSH Tunnel Password | If you saved the ssh tunnel password, clear the saved password. Enabled only after password is saved. | -| Drop Cascade | Delete the currently selected object and all dependent objects from the server. | -| Hide | Hide the currently selected group. To view hidden groups, enable the **Show hidden groups** option in Preferences. | -| Properties | Review or modify the currently selected object's properties. | -| Trigger(s) | Disable or enable triggers for the currently selected table. | -| Truncate | Remove all rows from a table (**Truncate**) or remove all rows from a table and its child tables (**Truncate Cascade**). | -| View Data | Access a menu that provides several options for viewing data. | -| Remove Server | Click to remove the selected server from the browser tree. | -| Delete/Drop | Click to delete the currently selected object from the server. | -| Connect Database | Click to connect to selected database. | -| Count Rows | Click to count the number of rows of the selected table. | -| Reset Statistics | Click to reset the statistics of the selected table. | -| Scripts | Click to CREATE, DELETE, INSERT, SELECT and UPDATE script for the selected table. | - -### The Management menu +| **Create** | Access a menu that provides context-sensitive selections. | +| **Refresh** | Refresh the currently selected object. | +| **Delete/Drop** | Delete the currently selected object from the server. | +| **Connect Server** | Open the Connect to Server dialog box to establish a connection with a server. | +| **CREATE Script** | Open the Query tool to edit or view the selected script. | +| **Disconnect Server** | Refresh the currently selected object. | +| **BART** | Access a menu that provides options for removing BART configuration, taking a BART backup, or revalidating the BART configuration. | +| **Clear Saved Password** | If you saved the database server password, clear the saved password. Enabled only after password is saved. | +| **Clear SSH Tunnel Password** | If you saved the ssh tunnel password, clear the saved password. Enabled only after password is saved. | +| **Drop Cascade** | Delete the currently selected object and all dependent objects from the server. | +| **Hide** | Hide the currently selected group. To view hidden groups, enable the **Show hidden groups** option in Preferences. | +| **Properties** | Review or modify the currently selected object's properties. | +| **Trigger(s)** | Disable or enable triggers for the currently selected table. | +| **Truncate** | Remove all rows from a table (**Truncate**) or remove all rows from a table and its child tables (**Truncate Cascade**). | +| **View Data** | Access a menu that provides several options for viewing data. | +| **Remove Server** | Remove the selected server from the browser tree. | +| **Delete/Drop** | Delete the currently selected object from the server. | +| **Connect Database** | Connect to selected database. | +| **Count Rows** | Count the number of rows of the selected table. | +| **Reset Statistics** | Reset the statistics of the selected table. | +| **Scripts** | CREATE, DELETE, INSERT, SELECT, and UPDATE script for the selected table. | + +### Management menu Use the **Management** menu to access the following PEM features. | Option | Action | | ------------------------------- | ------------------------------------------------------------------------------------------------------------------- | -| Audit Manager | Open the Audit Manager and configure auditing on your monitored servers. | -| Auto Discovery | Open the Auto Discovery dialog box to configure a PEM agent to locate and bind monitored database servers. | -| Capacity Manager | Open the Capacity Manager dialog box and analyze historical or project future resource usage. | -| Log Manager | Open the Log Manager dialog box and configure log collection for a server. | -| Manage Alerts | Open the **Manage Alerts** tab and create or modify behavior for alerts. | -| Manage Charts | Open the **Manage Charts** tab to create or modify PEM charts. | -| Manage Dashboards | Open the Manage Dashboards dialog box to VACUUM, ANALYZE, REINDEX, or CLUSTER. | -| Manage Probes | Open the Manage Probes dialog box to VACUUM, ANALYZE, REINDEX, or CLUSTER. | -| Postgres Expert | Open the Postgres Expert wizard and perform a static analysis of your servers and databases. | -| Postgres Log Analysis Expert | Open the Postgres Log Analysis Expert dialog box to analyze log file contents for usage trends. | -| Scheduled Tasks | Open the **Scheduled Tasks** tab and review tasks that are pending or recently completed. | -| Tuning Wizard | Open the Tuning Wizard dialog box to generate a set of tuning recommendations for your server. | -| Reports | Open the Reports dialog box to generate the system configuration report and core usage report for your server. | -| Schedule Alert Blackout... | Open the Schedule Alert Blackout dialog box and schedule the alerts blackout for your servers and agents. | - -### The Dashboards menu +| **Audit Manager** | Open the Audit Manager and configure auditing on your monitored servers. | +| **Auto Discovery** | Open the Auto Discovery dialog box to configure a PEM agent to locate and bind monitored database servers. | +| **Capacity Manager** | Open the Capacity Manager dialog box and analyze historical or project future resource usage. | +| **Log Manager** | Open the Log Manager dialog box and configure log collection for a server. | +| **Manage Alerts** | Open the **Manage Alerts** tab and create or modify behavior for alerts. | +| **Manage Charts** | Open the **Manage Charts** tab to create or modify PEM charts. | +| **Manage Dashboards** | Open the Manage Dashboards dialog box to VACUUM, ANALYZE, REINDEX, or CLUSTER. | +| **Manage Probes** | Open the Manage Probes dialog box to VACUUM, ANALYZE, REINDEX, or CLUSTER. | +| **Postgres Expert** | Open the Postgres Expert wizard and perform a static analysis of your servers and databases. | +| **Postgres Log Analysis Expert** | Open the Postgres Log Analysis Expert dialog box to analyze log file contents for usage trends. | +| **Scheduled Tasks** | Open the **Scheduled Tasks** tab and review tasks that are pending or recently completed. | +| **Tuning Wizard** | Open the Tuning Wizard dialog box to generate a set of tuning recommendations for your server. | +| **Reports** | Open the Reports dialog box to generate the system configuration report and core usage report for your server. | +| **Schedule Alert Blackout** | Open the Schedule Alert Blackout dialog box and schedule the alerts blackout for your servers and agents. | + +### Dashboards menu Use the context-sensitive **Dashboards** menu to access dashboards. | Option | Action | | --------------------- | --------------------------------------------------------------------------------- | -| Alerts | Open the Alerts dashboard for the selected node. | -| Audit Log | Open the Audit Log Analysis dashboard for the selected node. | -| PGD Admin | Open the PGD Admin dashboard for the selected node. | -| PGD Group Monitoring | Open the PGD Group Monitoring dashboard for the selected node. | -| PGD Node Monitoring | Open the PGD Node Monitoring dashboard for the selected node. | -| Database Server | Open the Database Analysis dashboard for the selected node. | -| I/O Analysis | Open the I/O Analysis dashboard for the selected node. | -| Memory | Open the Memory Analysis dashboard for the selected node. | -| Object Activity | Open the Object Activity Analysis dashboard for the selected node. | -| Operating System | Open the Operating System Analysis dashboard for the selected node. | -| Probe Log | Open the Probe Log Analysis dashboard for the selected node. | -| Server Log | Open the Server Log Analysis dashboard for the selected node. | -| Session Activity | Open the Session Activity Analysis dashboard for the selected node. | -| Storage | Open the Storage Analysis dashboard for the selected node. | -| Streaming Replication | Open the Streaming Replication Analysis dashboard for the selected node. | -| System Wait | Open the System Wait Analysis dashboard for the selected node. | -| Session Waits | Open the Session Waits Analysis Dasbhoard for the selected node. | -| Custom Dashboards | Open the Custom Dashboards that list the custom dashboards configured by the user. | - -### The Tools menu +| **Alerts** | Open the Alerts dashboard for the selected node. | +| **Audit Log** | Open the Audit Log Analysis dashboard for the selected node. | +| **PGD Admin** | Open the PGD Admin dashboard for the selected node. | +| **PGD Group Monitoring** | Open the PGD Group Monitoring dashboard for the selected node. | +| **PGD Node Monitoring** | Open the PGD Node Monitoring dashboard for the selected node. | +| **Database Server** | Open the Database Analysis dashboard for the selected node. | +| **I/O Analysis** | Open the I/O Analysis dashboard for the selected node. | +| **Memory** | Open the Memory Analysis dashboard for the selected node. | +| **Object Activity** | Open the Object Activity Analysis dashboard for the selected node. | +| **Operating System** | Open the Operating System Analysis dashboard for the selected node. | +| **Probe Log** | Open the Probe Log Analysis dashboard for the selected node. | +| **Server Log** | Open the Server Log Analysis dashboard for the selected node. | +| **Session Activity** | Open the Session Activity Analysis dashboard for the selected node. | +| **Storage** | Open the Storage Analysis dashboard for the selected node. | +| **Streaming Replication** | Open the Streaming Replication Analysis dashboard for the selected node. | +| **System Wait** | Open the System Wait Analysis dashboard for the selected node. | +| **Session Waits** | Open the Session Waits Analysis Dasbhoard for the selected node. | +| **Custom Dashboards** | Open the Custom Dashboards that list the custom dashboards configured by the user. | + +### Tools menu Use the options on the **Tools** menu to access the following features. | Option | Action | | ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Schema Diff | Open the Schema Diff dialog box to compare the schema objects between two database schemas. | -| Search objects | Open the Search Objects dialog box to search the database objects in a database. | -| Server | Access the various server-related tools such as Add Named Restore Point, Performance Diagnostics, Queue Server Startup, Queue Server Shutdown, Replace Cluster Primary, Switchover EFM Cluster, and SQL Profiler. | -| Query tool | Open the Query tool for the currently selected object. | -| Reload Configuration | Update configuration files without restarting the server. | -| Pause replay of WAL | Pause replay of the WAL log. | -| Resume replay of WAL | Resume replay of the WAL log. | -| Import/Export | Open the Import/Export Data dialog box to import or export data from a table. | -| Maintenance | Open the Maintenance dialog box to VACUUM, ANALYZE, REINDEX, or CLUSTER. | -| Backup | Open the Backup dialog box to back up database objects. | -| Backup Globals | Open the Backup Globals dialog box to back up cluster objects. | -| Backup Server | Open the Backup Server dialog box to back up a server. | -| Restore | Open the Restore dialog box to restore database files from a backup. | -| Grant Wizard | Open the Grant Wizard tool. | -| Schedule Backup | Open the Schedule Backup dialog box for BART backups. | -| New ERD Porject(Beta) | Open the ERD Tool and start designing your database. | -| Storage Manager | Open the Storage manager to upload, delete or download the backup files. | - -### The Help menu +| **Schema Diff** | Open the Schema Diff dialog box to compare the schema objects between two database schemas. | +| **Search objects** | Open the Search Objects dialog box to search the database objects in a database. | +| **Server** | Access the various server-related tools such as Add Named Restore Point, Performance Diagnostics, Queue Server Startup, Queue Server Shutdown, Replace Cluster Primary, Switchover EFM Cluster, and SQL Profiler. | +| **Query tool** | Open the Query tool for the currently selected object. | +| **Reload Configuration** | Update configuration files without restarting the server. | +| **Pause replay of WAL** | Pause replay of the WAL log. | +| **Resume replay of WAL** | Resume replay of the WAL log. | +| **Import/Export** | Open the Import/Export Data dialog box to import or export data from a table. | +| **Maintenance** | Open the Maintenance dialog box to VACUUM, ANALYZE, REINDEX, or CLUSTER. | +| **Backup** | Open the Backup dialog box to back up database objects. | +| **Backup Globals** | Open the Backup Globals dialog box to back up cluster objects. | +| **Backup Server** | Open the Backup Server dialog box to back up a server. | +| **Restore** | Open the Restore dialog box to restore database files from a backup. | +| **Grant Wizard** | Open the Grant Wizard tool. | +| **Schedule Backup** | Open the Schedule Backup dialog box for BART backups. | +| **New ERD Project(Beta)** | Open the ERD Tool and start designing your database. | +| **Storage Manager** | Open the Storage manager to upload, delete or download the backup files. | + +### Help menu Use the options on the **Help** menu to access the online help documents or to review information about the PEM installation. | Option | Action | | --------------------------------- | -------------------------------------------------------------------------------- | -| Quick Search | Type your keywords in the Quick Search field. Typing atleast three characters will display all the matching possibilities under Menu items and the relevant documents under Help articles. Click on the options under Menu items to perform action of particular functionality or object. Click on any of the Help articles to open the help of that topic with highlighted text in a separate window. **Note**:- If any of the option under Menu items is disabled, then it will provide information via info icon. | -| Online Help | Open in-app documentation for Postgres Enterprise Manager. | -| REST API Reference | Open the REST API Reference. | -| EDB Website | Open the EDB website in a browser window. | -| About Postgres Enterprise Manager | Locate versioning and user information for Postgres Enterprise Manager. | +| **Quick Search** | Type your keywords in the Quick Search field. Typing at least three characters displays all the matching possibilities under Menu items and the relevant documents under Help articles. Select the options under Menu items to perform action of particular functionality or object. Select any of the Help articles to open the help of that topic with highlighted text in a separate window. **Note**: If any of the options under Menu items is disabled, then it provides information by way of an info icon. | +| **Online Help** | Open in-app documentation for Postgres Enterprise Manager. | +| **REST API Reference** | Open the REST API reference. | +| **EDB Website** | Open the EDB website in a browser window. | +| **About Postgres Enterprise Manager** | Locate versioning and user information for Postgres Enterprise Manager. | -## Controlling and customizing charts, graphs and tables +## Controlling and customizing charts, graphs, and tables Use the icons in the upper-right corner of each graphic on a dashboard to control and customize the charts, graphs, and tables displayed in the PEM client for your current user session. diff --git a/product_docs/docs/pem/9/registering_agent.mdx b/product_docs/docs/pem/9/registering_agent.mdx index 80095bb8f2b..eb8ab03bf4b 100644 --- a/product_docs/docs/pem/9/registering_agent.mdx +++ b/product_docs/docs/pem/9/registering_agent.mdx @@ -12,9 +12,10 @@ redirects: You must register each PEM agent installed on a separate host with the PEM server. (The PEM agent is different from the PEM server host.) The registration process provides the PEM server with the information it needs to communicate with the agent. The PEM agent graphical installer for Windows supports self-registration for the agent. On a Linux host, you must use the pemworker utility to register the agent. -The PEM agent package installer places the PEM agent in the `/usr/edb/pem/agent/bin` directory. To register an agent, include the `--register-agent` keywords along with registration details when invoking the pemworker utility: +The PEM agent package installer places the PEM agent in the `/usr/edb/pem/agent/bin` directory. To register an agent, include the `--register-agent` keywords along with registration details when invoking the pemworker utility. Run the `pemworker` utility as root user. ```shell +# Running as root pemworker --register-agent ``` @@ -69,11 +70,12 @@ Each registered PEM agent must have a unique agent ID. For 9.0 and later: ### Examples -This example shows how to register the PEM agent overriding the default configurations in versions 9.0 and later. Use the `-o` option. +This example shows how to register the PEM agent overriding the default configurations in versions 9.0 and later. -Register the PEM agent using command line. Assign an `agent_id` value of 8 using the `-o` option. +Register the PEM agent using the command line. Assign an `agent_id` value of 8 using the `-o` option. -```shell{promptUser: root}{promptHost: pem}{outputLines: 2-8} +```shell +# Running as root /usr/edb/pem/agent/bin/pemworker --register-agent \ --pem-server pemserver \ --pem-user postgres \ @@ -86,9 +88,10 @@ Postgres Enterprise Manager Agent registered successfully! Because the `agent_id` of 8 is available, the PEM agent registers successfully. If the given ID is already in use by the existing agent, it throws an error. -Register the PEM agent using command line. Assign the existing SSL certificates and key files to avoid generating new ones for a particular agent ID in versions 9.0 and later. The SSL certificates and key files must be valid for the database user `agent` , where `` must be the same as provided using the command line. Use the `-o` option. +Register the PEM agent using the command line. Assign the existing SSL certificates and key files to avoid generating new ones for a particular agent ID in versions 9.0 and later. The SSL certificates and key files must be valid for the database user `agent` , where `` must be the same as provided using the command line. Use the `-o` option. -```shell{promptUser: root}{promptHost: pem}{outputLines: 1,3-7,9-18} +```shell +# Running as root # List the location of valid SSL certificates and key files. ls -l /root/.pem/agent5.* -rw------- 1 root root 2192 Nov 7 11:27 /root/.pem/agent5.crt @@ -116,6 +119,7 @@ Because the valid SSL certificates and key files are available at the given loca You can use the pemworker utility to unregister a PEM agent. To unregister an agent, include the `--unregister-agent` keywords along with the details when invoking the pemworker utility: ``` text +# Running as root pemworker --unregister-agent ``` @@ -208,6 +212,7 @@ To use a nonroot user account to register a PEM agent, you must first install th a. Update the values for the configuration file path and the user in the `pemagent` service file as superuser: ```ini + # Running as superuser sudo vi /usr/lib/systemd/system/pemagent.service [Service] Type=forking diff --git a/product_docs/docs/pem/9/troubleshooting.mdx b/product_docs/docs/pem/9/troubleshooting.mdx index 8fb184d7b97..7e80f7a6d70 100644 --- a/product_docs/docs/pem/9/troubleshooting.mdx +++ b/product_docs/docs/pem/9/troubleshooting.mdx @@ -41,7 +41,7 @@ Try installing the PEM server again. ### RHEL 7 ppc64le -After installing the PEM server on RHEL 7 ppc64le, you may see the following cipher error in the worker log file: +After installing the PEM server on RHEL 7 ppc64le, you might see the following cipher error in the worker log file: ```text WARNING: ConnectToPEM: unable to connect to PEM database: could not create SSL context: library has no ciphers @@ -110,3 +110,28 @@ Restart the HTTPD server after adding the statement: ```shell sudo systemctl restart httpd.service ``` +## Error connecting to PostgreSQL server + +When connecting to a PostgreSQL server, you might get one of these error messages. Review the message carefully. Each error message attempts to incorporate the information you need to resolve the problem. + +- **Connection to the server has been lost**: This error message indicates that the connection attempt took longer than the specified threshold. There might be a problem with the connection properties provided on the Server dialog box, network connectivity issues, or the server might not be running. + +- **Could not connect to Server: Connection refused**: There are two possible reasons for this error: + + - The database server isn't running. Start the server. + - The server isn't configured to accept TCP/IP requests on the address shown. + + For security reasons, a PostgreSQL server "out of the box" doesn't listen on TCP/IP ports. Instead, you must enable it to listen for TCP/IP requests. Add `tcpip = true` to the `postgresql.conf` file for Versions 7.3.x and 7.4.x. Add `listen_addresses='*'` for Version 8.0.x and above. These additions make the server accept connections on any IP interface. + + For more information, refer to the PostgreSQL documentation about [runtime configuration](http://www.postgresql.org/docs/current/interactive/runtime-config.html). + +- **FATAL: no pg_hba.conf entry**: If PEM displays this message when connecting, your server can be contacted correctly over the network, but it isn't configured to accept your connection. Your client wasn't detected as a legal user for the database. + + To connect to a server, configure the `pg_hba.conf` file on the database server to accept connections from the host of the PEM client. Modify the `pg_hba.conf` file on the database server host, and add an entry in the form: + + - `host template1 postgres 192.168.0.0/24 md5` for an IPV4 network + - `host template1 postgres ::ffff:192.168.0.0/120 md5` for an IPV6 network + + For more information, see the PostgreSQL documentation about [client authentication](http://www.postgresql.org/docs/current/interactive/client-authentication.html). + +- **FATAL: password authentication failed**: The `password authentication failed for user` error message indicates there might be a problem with the password you entered. Retry the password to confirm you entered it correctly. If the error message returns, make sure that you have the correct password, that you are authorized to access the server, and that the access was correctly configured in the server's `postgresql.conf` configuration file. diff --git a/product_docs/docs/pem/9/tuning_performance/postgres_expert.mdx b/product_docs/docs/pem/9/tuning_performance/postgres_expert.mdx index 6337e80edf3..2d5273ad8e8 100644 --- a/product_docs/docs/pem/9/tuning_performance/postgres_expert.mdx +++ b/product_docs/docs/pem/9/tuning_performance/postgres_expert.mdx @@ -24,19 +24,25 @@ To use the Postgres Expert wizard, in the PEM client select **Management > Postg 1. In the Welcome message, select **Next**. -1. The **Experts/Rules** tree lists the available experts and rules for identifying best practice deviations. Select the ones you want to use to evaluate the selected servers or databases. +2. The **Experts/Rules** tree lists the available experts and rules for identifying best practice deviations. Select the ones you want to use to evaluate the selected servers or databases. - The tree categorizes the rules under three experts: + The tree categorizes the rules under three experts: - - **Configuration Expert** evaluates the parameter settings of the server or operating system to find any adjustments that might improve system performance. - - **Schema Expert** evaluates schema objects, such as locating missing primary keys or foreign keys without indexes. - - **Security Expert** evaluates the system to find security vulnerabilities. + - **Configuration Expert** evaluates the parameter settings of the server or operating system to find any adjustments that might improve system performance. + - **Schema Expert** evaluates schema objects, such as locating missing primary keys or foreign keys without indexes. + - **Security Expert** evaluates the system to find security vulnerabilities. - After making your selections, select **Next**. +3. Use the check box to the left of an **Experts/Rules** to indicate for Postgres Expert to analyze the configuration of the selected servers for any best practice deviations related to the selected item. -1. Select or clear the servers and databases that you want to evaluate. If you select multiple servers or databases, then the resulting report contains a separate evaluation of each target. After you finish, select **Next**. + - Use the check box next to **Experts/Rules** to select or deselect all of the items listed in the tree control. + - Use the check box next to the name of an expert to select or deselect all of the configuration items listed under that node of the tree control. + - Use the check box next to a rule to select or deselect the rule for inclusion in the Postgres Expert report. + + After making your selections, select **Next**. -1. To view the report in the client, select **View the report now**. To save a copy to an HTML file, select **Download the report**. If you download the report, then the file saves to your default download directory. Select **Finish**. +4. Select or clear the servers and databases that you want to evaluate. If you select multiple servers or databases, then the resulting report contains a separate evaluation of each target. After you finish, select **Next**. + +5. To view the report in the client, select **View the report now**. To save a copy to an HTML file, select **Download the report**. If you download the report, then the file saves to your default download directory. Select **Finish**. ## Reviewing Postgres Expert recommendations diff --git a/product_docs/docs/pem/9/tuning_performance/tuning_wizard.mdx b/product_docs/docs/pem/9/tuning_performance/tuning_wizard.mdx index 9136349c7ed..e822f65dc46 100644 --- a/product_docs/docs/pem/9/tuning_performance/tuning_wizard.mdx +++ b/product_docs/docs/pem/9/tuning_performance/tuning_wizard.mdx @@ -15,68 +15,67 @@ Before using the Tuning Wizard, you must specify the name of the service in the The Tuning Wizard can make recommendations only for those servers that reside on the same server as their bound PEM agent. If you specify a value of **Yes** in the **Remote monitoring** field when defining your server, the server doesn't display in the Tuning Wizard tree. -1. To open the Tuning Wizard, in the PEM client select **Management > Tuning Wizard**. +1. To open the Tuning Wizard, in the PEM client select **Management > Tuning Wizard**. -1. Select **Next**. +2. Select **Next**. -1. When you expand the **Servers** node of the tree, a list of servers appears. All of these servers are currently monitored by PEM and available for tuning. Select a server to tune it. +3. When you expand the **Servers** node of the tree, a list of servers appears. All of these servers are currently monitored by PEM and available for tuning. Select a server to tune it. !!! Note If you don't provide the server's service name, then the Tuning Wizard displays a warning next to the server name on the tree. + - Select **Next**. +4. Select **Next**. -1. Select an option in the **Machine utilization** field to specify the type of work performed by each server. The type of work performed by the server determines how the Tuning Wizard allocates system resources: +5. Select an option in the **Machine utilization** field to specify the type of work performed by each server. The type of work performed by the server determines how the Tuning Wizard allocates system resources: - - Select **Dedicated** to dedicate the majority of the system resources to the database server. - - Select **Mixed use** to dedicate a moderate amount of system resources to the database server. - - Select **Developer workstation** to dedicate a relatively small amount of system resources to the database server. + - Select **Dedicated** to dedicate the majority of the system resources to the database server. + - Select **Mixed use** to dedicate a moderate amount of system resources to the database server. + - Select **Developer workstation** to dedicate a relatively small amount of system resources to the database server. + - Select an option in the **Workload Selection** field to specify the type of workload typically performed on the selected server: + - Select **OLTP** if the selected server is used primarily to process online transaction workloads. + - Select **Mixed** if the selected server provides a mix of transaction processing and data reporting. + - Select **Data warehouse** if the server is used for heavy data reporting. - Select an option in the **Workload Selection** field to specify the type of workload typically performed on the selected server: +6. Select **Next**. - - Select **OLTP** if the selected server is used primarily to process online transaction workloads. - - Select **Mixed** if the selected server provides a mix of transaction processing and data reporting. - - Select **Data warehouse** if the server is used for heavy data reporting. +7. The tree on the Tuning Changes Summary dialog box displays the parameter setting modifications recommended for each server analyzed by the Tuning Wizard. Select the recommendations that you want the Tuning Wizard to apply or to include in a preview report: - Select **Next**. + - Select a parameter name, and the Tuning Wizard includes the parameter setting. + - Select the server name, and the Tuning Wizard includes all parameter setting recommendations for the specified server. -1. The tree on the Tuning Changes Summary dialog box displays the parameter setting modifications recommended for each server analyzed by the Tuning Wizard. Select the recommendations that you want the Tuning Wizard to apply or to include in a preview report: +8. Select **Next**. - - Select a parameter name and the Tuning Wizard includes the parameter setting. - - Select the server name and the Tuning Wizard includes all parameter setting recommendations for the specified server. +9. In the Schedule or Run? dialog box, either select a time for PEM to apply the recommended changes or generate a report that details the recommended changes. - Select **Next**. + PEM makes the recommended changes that you selected in the Tuning Changes Summary dialog box. If you choose to generate a report, then PEM creates a report. It contains a list of the current values and recommended changes to the configuration parameters as selected in the Tuning Changes Summary dialog box. To implement changes, you must open the Tuning Wizard a second time, selecting the parameters you want to modify in the Tuning Changes Summary dialog box. -1. In the **Schedule or Run?** dialog box, either select a time for PEM to apply the recommended changes or generate a report that details the recommended changes. - - PEM makes the recommended changes that you selected in the **Tuning Changes Summary** dialog box. If you choose to generate a report, then PEM creates a report. It contains a list of the current values and recommended changes to the configuration parameters as selected in the **Tuning Changes Summary** dialog box. To implement changes, you must open the Tuning Wizard a second time, selecting the parameters you want to modify in the **Tuning Changes Summary** dialog box. - - Select **Schedule changes** to view and specify your scheduling options. + Select **Schedule changes** to view and specify your scheduling options. You can set the **Configuration now?** slider to: - - **Yes** — Apply the Tuning Wizard's recommendations and restart the server. + - **Yes** — Apply the Tuning Wizard's recommendations and restart the server. - - **No** — Enable the **Time?** field where you can specify a date and time with the calendar selector. PEM applies the recommended changes and restarts the server at this time. + - **No** — Enable the **Time?** field where you can specify a date and time with the calendar selector. PEM applies the recommended changes and restarts the server at this time. - Select **Generate report** to view your report options. + Select **Generate report** to view your report options. - You can set the **View report now?** slider to: + You can set the **View report now?** slider to: - - **Yes** — Display the Tuning Wizard report onscreen. + - **Yes** — Display the Tuning Wizard report onscreen. - - **No** — Enable the **Save the report to file** field where you can specify a file name and location. + - **No** — Enable the **Save the report to file** field where you can specify a file name and location. -1. Select **Finish**. +10. Select **Finish**. -To confirm that the Tuning Wizard implemented the recommended changes, review the `postgresql.conf` file for the modified server. When the change is applied, the Tuning Wizard adds a comment above each modified parameter in the `postgresql.conf` file. + To confirm that the Tuning Wizard implemented the recommended changes, review the `postgresql.conf` file for the modified server. When the change is applied, the Tuning Wizard adds a comment above each modified parameter in the `postgresql.conf` file. -![Confirming a change in the postgresql.conf file](../images/tuning_wiz_confirm_chg.png) + ![Confirming a change in the postgresql.conf file](../images/tuning_wiz_confirm_chg.png) -You can also confirm a parameter value by querying the server. For example, to confirm the value of the shared_buffers parameter, open a SQL command line using either the Query tool (accessed through the Tools menu) or the psql client, and issue the command: + You can also confirm a parameter value by querying the server. For example, to confirm the value of the shared_buffers parameter, open a SQL command line using either the Query tool (accessed through the Tools menu) or the psql client, and issue the command: -```sql -SHOW shared_buffers; -``` + ```sql + SHOW shared_buffers; + ``` The value returned by the server confirms whether the parameter was modified. diff --git a/product_docs/docs/pgd/3.7/bdr/camo.mdx b/product_docs/docs/pgd/3.7/bdr/camo.mdx index 55f580e0fb5..09ecd9a1bb4 100644 --- a/product_docs/docs/pgd/3.7/bdr/camo.mdx +++ b/product_docs/docs/pgd/3.7/bdr/camo.mdx @@ -1,8 +1,724 @@ --- navTitle: Commit at Most Once -title: Commit At Most Once +title: Commit At Most Once (CAMO) originalFilePath: camo.md --- - +The objective of the Commit at Most Once (CAMO) feature is to prevent +the application from committing more than once. + +Without CAMO, when a client loses connection after COMMIT has been +submitted, the application might not receive a reply from the server +and will therefore be unsure whether the transaction committed or +not. + +The application cannot easily decide between the two options of: + +1) retrying the transaction with the same data, since this can in some + cases cause the data to be entered twice, or + +2) not retrying the transaction, and risk that the data doesn't get + processed at all. + +Either of those is a critical error with high value data. + +There are two ways to avoid this situation: + +One way to avoid this situation is to make sure that the transaction +includes at least one `INSERT` into a table with a unique index, but +that is dependent upon the application design and requires application- +specific error-handling logic, so is not effective in all cases. + +The CAMO feature in BDR offers a more general solution and does +not require an `INSERT` as described above. When activated via +`bdr.enable_camo` or `bdr.commit_scope`, the application will +receive a message containing the transaction identifier, if already +assigned. Otherwise, the first write statement in a transaction will +send that information to the client. +If the application sends an explicit COMMIT, the protocol +will ensure that the application will have received the notification +of the transaction identifier before the COMMIT is sent. +If the server does not reply to the COMMIT, the application can +handle this error by using the transaction identifier to request +the final status of the transaction from another BDR node. +If the prior transaction status is known, then the application can safely +decide whether or not to retry the transaction. + +CAMO works in one of two modes: + +- Pair mode +- In combination with Eager All Node Replication + +In the Pair mode, CAMO works by creating a pair of partner nodes that +are two BDR master nodes from the same top level BDR group. In this operation mode, +each node in the pair knows the outcome of any recent transaction executed +on the other peer, and especially (for our need) knows the outcome of any +transaction disconnected during COMMIT. +We may refer as "origin" to the node that receives the transactions from +the application and "partner" the node that confirms these transactions, +but there is no difference in the CAMO configuration for the nodes in the +CAMO pair. The pair is symmetric. + +When combined with [Eager All Node Replication](eager.md), CAMO +enables every peer (that is a full BDR master node) to act as a CAMO partner. +No designated CAMO partner needs to be configured in this mode. + +!!! Warning + CAMO requires changes to the user's application + to take advantage of the advanced error handling: it is not sufficient + to enable a parameter to gain protection. Reference client implementations + are provided in [Appendix E](camo_clients.md). + +## Requirements + +To utilize CAMO, an application must issue an explicit COMMIT message, +issued as a separate request (not as part of a multi-statement request). +CAMO cannot provide status for transactions issued from within procedures, +or from single-statement transactions that use implicit commits. + +## Configuration + +Assuming an existing BDR cluster consisting of the two nodes `node1` and +`node2`, both with a BDR enabled database called `bdrdemo`, and both part +of the same node group `mygroup` the following steps will configure the nodes +to be CAMO partners for each other. + +1) Create the BDR cluster where nodes `node1` and `node2` are part of +`mygroup` node group. +2) Run the function `bdr.add_camo_pair()` on one node: + +```sql +SELECT bdr.add_camo_pair('mygroup', 'node1', 'node2'); +``` +3) Adjust the application to use the COMMIT error handling that CAMO suggests. + +We do not recommend enabling CAMO at server level, as this imposes +higher latency for all transactions, even when not needed. Instead, +we recommend to selectively enable it just for individual transactions +by turning on CAMO at session or transaction level. + +To enable at session level, issue: + +```sql +SET bdr.enable_camo = 'remote_commit_flush'; +``` + +...or to enable for individual transactions, issue this after starting the +transaction and before committing it: + +```sql +SET LOCAL bdr.enable_camo = 'remote_commit_flush'; +``` + +Valid values for `bdr.enable_camo` that enable CAMO are: + +* `off` (default) +* `remote_write` +* `remote_commit_async` +* `remote_commit_flush` or `on` + +See the [Comparison](durability/#comparison) of synchronous replication +modes for details about how each mode behaves. +Setting `bdr.enable_camo = off` disables this feature, which is the default. + +### CAMO with Eager All Node Replication + +To use CAMO with Eager All Node Replication, no changes are required +on either node. It is sufficient to enable the global commit +scope after the start of the transaction - you do not need to set +`bdr.enable_camo` at all. + +```sql +BEGIN; +SET LOCAL bdr.commit_scope = 'global'; +... +COMMIT; +``` + +The application still needs to be adjusted to use COMMIT error +handling as specified, but is free to connect to any available BDR +node to query the transaction's status. + +## Failure Scenarios + +In this section, we analyze failure scenarios for different +configurations. After comparing Local mode with CAMO mode in terms of +Availability versus Consistency, we also provide three specific +examples. + +### Data persistence at receiver side + +By default, a PGL writer operates in +`bdr.synchronous_commit = off` mode when applying transactions +from remote nodes. This holds true for CAMO as well, meaning that +transactions are confirmed to the origin node possibly before reaching +the disk of the CAMO partner. In case of a crash or hardware failure, +it is possible for a confirmed transaction to not be recoverable on the +CAMO partner by itself. This is not an issue as long as the CAMO +origin node remains operational, as it will redistribute the +transaction once the CAMO partner node recovers. + +This in turn means CAMO can protect against a single node failure, +which is correct for Local mode as well as (or even in combination +with) Remote Write. + +To cover an outage of both nodes of a CAMO pair, it is possible to use +`bdr.synchronous_commit = local` to enforce a flush prior to the +pre-commit confirmation. This does not work in combination with +either Remote Write nor Local mode, and has an additional performance +impact due to additional I/O requirements on the CAMO partner in the +latency sensitive commit path. + +### Local Mode + +When `synchronous_replication_availability = 'async'`, a node +(i.e. master) will detect whether its CAMO partner is +ready; if not, it will temporarily switch to **Local** mode. +When in Local mode, a node commits transactions locally, until +switching back to CAMO mode. + +This clearly does not allow COMMIT status to be retrieved, but does +provide the option to choose availability over consistency. This mode +can tolerate a single node failure. In case both nodes of a CAMO pair +fail, they may choose incongruent commit decisions to maintain +availability, leading to data inconsistencies. + +For a CAMO partner to switch to ready, it needs to be connected, and +the estimated catchup interval needs to drop below +`bdr.global_commit_timeout`. The current readiness status of a CAMO +partner can be checked with `bdr.is_camo_partner_ready`, while +`bdr.node_replication_rates` provides the current estimate of the catchup +time. + +The switch from CAMO protected to Local Mode is only ever triggered by +an actual CAMO transaction. Either because the commit exceeds the +`bdr.global_commit_timeout` or in case the CAMO partner is already +known disconnected at the time of commit. This switch is independent +of the estimated catchup interval. If the CAMO pair is configured to +require Raft to switch to Local Mode, this switch will require a +majority of nodes to be operational (see the `require_raft` flag for +[bdr.add_camo_pair](camo.md#adding-a-camo-pair)). This can prevent a +split brain situation due to an isolated node from switching to Local +Mode. If `require_raft` is not set for the CAMO pair, the origin node +will switch to Local Mode immediately. + +The detection on the sending node can be configured via PostgreSQL +settings controlling keep-alives and timeouts on the TCP connection to +the CAMO partner. +The `wal_sender_timeout` is the amount of time that a node waits +for a CAMO partner until switching to Local mode. Additionally, +the `bdr.global_commit_timeout` setting puts a per-transaction +limit on the maximum delay a COMMIT can incur due to the +CAMO partner being unreachable. It may well be lower than the +`wal_sender_timeout`, which influences synchronous standbys as +well, and for which a good compromise between responsiveness and +stability needs to be found. + +The switch from Local mode to CAMO mode depends on the CAMO partner +node, which initiates the connection. The CAMO partner tries to +re-connect at least every 30 seconds. After connectivity is +reestablished, it may therefore take up to 30 seconds until the CAMO +partner connects back to its origin node. Any lag that accumulated on +the CAMO partner will further delay the switch back to CAMO protected +mode. + +Unlike during normal CAMO operation, in Local mode there is no +additional commit overhead. This can be problematic, as it allows the +node to continuously process more transactions than the CAMO +pair could normally process. Even if the CAMO partner eventually +reconnects and applies transactions, its lag will only ever increase +in such a situation, preventing re-establishing the CAMO protection. +To artificially throttle transactional throughput, BDR provides the +`bdr.camo_local_mode_delay` setting, allowing to delay COMMITs in +Local mode by an arbitrary amount of time. We recommend to measure +commit times in normal CAMO mode during expected workloads and +configure this delay accordingly. The default is 5 ms, which reflects +a local network and a relatively quick CAMO partner response. + +The choice of whether to allow Local mode should be taken in view of +the architecture and the availability requirements. We expand this +point by discussing three specific examples in some detail. + +### Example: Symmetric Node Pair + +In this section we consider a setup with two BDR nodes that are the +CAMO partner of each other. +This is the only possible configuration starting with BDR4. + +This configuration enables CAMO behavior on both nodes; it is +therefore suitable for workload patterns where it is acceptable to +write concurrently on more than one node, e.g. in cases that are not +likely to generate conflicts. + +#### With Local Mode + +If Local mode is allowed, there is no single point of failure, and +when one node fails: + +- The other node can determine the status of all transactions that + were disconnected during COMMIT on the failed node. +- New write transactions are allowed: + - If the second node also fails, then the outcome of those + transactions that were being committed at that time will be + unknown. + +#### Without Local Mode + +If Local mode is not allowed, then each node requires the other node +for committing transactions, i.e. each node is a single point of +failure. Precisely, when one node fails: + +- The other node can determine the status of all transactions that + were disconnected during COMMIT on the failed node. +- New write transactions will be prevented until the node recovers. + +## Application Usage + +### Overview and Requirements + +Commit At Most Once relies on a retry loop and specific error handling +on the client side. There are three aspects to it: + +* The result of a transaction's COMMIT needs to be checked, and in + case of a temporary error, the client must retry the transaction. +* Prior to COMMIT, the client needs to retrieve a global + identifier for the transaction, consisting of a **node id** and a + **transaction id** (both 32 bit integers). +* Should the current server fail while attempting COMMIT of a transaction, + the application must connect to its CAMO partner, retrieve the status + of that transaction, and retry depending on the response. + +Note that the application needs to store the global transaction +identifier only for the purpose of verifying the transaction status in +case of disconnection during COMMIT. In particular, the application +does not need any additional persistence layer: if the application +fails, it only needs the information in the database to restart. + +### Adding a CAMO pair + +The function `bdr.add_camo_pair()` configures an existing pair of BDR +nodes to work as a symmetric CAMO pair. + +The `require_raft` option controls how and when to switch to Local +Mode in case `synchronous_replication_availability` is set to `async`, +allowing such a switch in general. + +#### Synopsis + +```sql +bdr.add_camo_pair(node_group text, left_node text, right_node text, + require_raft bool) +``` + +!!! Note + The names `left` and `right` have no special meaning. + +!!! Note + Since BDR version 4.0, only symmetric CAMO configurations are + supported, i.e. both nodes of the pair act as a CAMO partner for + each other. + +### Changing the configuration of a CAMO pair + +The function `bdr.alter_camo_pair()` allows to toggle the +`require_raft` flag. Note that it is not currently possible to change +the nodes of a pairing, `bdr.remove_camo_pair` followed by +`bdr.add_camo_pair` must be used, instead. + +#### Synopsis + +```sql +bdr.alter_camo_pair(node_group text, left_node text, right_node text, + require_raft bool) +``` + +### Removing a CAMO pair + +The function `bdr.remove_camo_pair()` removes a CAMO pairing of two +nodes and disallows future use of CAMO transactions via +`bdr.enable_camo` on those two nodes. + +#### Synopsis + +```sql +bdr.remove_camo_pair(node_group text, left_node text, right_node text) +``` + +!!! Note + The names `left` and `right` have no special meaning. + +### CAMO partner connection status + +The function `bdr.is_camo_partner_connected` allows checking the +connection status of a CAMO partner node configured in Pair mode. +There currently is no equivalent for CAMO used in combination with +Eager Replication. + +#### Synopsis + +```sql +bdr.is_camo_partner_connected() +``` + +#### Return value + +A boolean value indicating whether the CAMO partner is currently +connected to a WAL sender process on the local node and therefore able +to receive transactional data and send back confirmations. + +### CAMO partner readiness + +The function `bdr.is_camo_partner_ready` allows checking the readiness +status of a CAMO partner node configured in Pair mode. Underneath, +this is what's used to trigger the switch to and from Local mode. + +#### Synopsis + +```sql +bdr.is_camo_partner_ready() +``` + +#### Return value + +A boolean value indicating whether the CAMO partner can reasonably be +expected to confirm transactions originating from the local node in a +timely manner (i.e. before `bdr.global_commit_timeout` expires). + +!!! Note + Note that this function queries the past or current state. A + positive return value is no indication for the CAMO partner being + able to confirm future transactions. + +### Fetch the CAMO partner + +This function shows the local node's CAMO partner (configured via Pair +mode). + +```sql +bdr.get_configured_camo_partner() +``` + +### Wait for consumption of the apply queue from the CAMO partner + +The function `bdr.wait_for_camo_partner_queue` is a wrapper of +`bdr.wait_for_apply_queue` defaulting to query the CAMO partner node. +It yields an error if the local node is not part of a CAMO pair. + +#### Synopsis + +```sql +bdr.wait_for_camo_partner_queue() +``` + +### Transaction status between CAMO nodes + +This function enables a wait for CAMO transactions to be fully resolved. + +```sql +bdr.camo_transactions_resolved() +``` + +### Transaction status query function + +The application should use the function: + +```sql +bdr.logical_transaction_status(node_id, xid, require_camo_partner) +``` + +...to check the status of a transaction which was being committed when the node +failed. + +With CAMO used in Pair mode, this function should only ever be used on +a node that's part of a CAMO pair. In combination with Eager +Replication, it may be used on all nodes. + +In both cases, the function needs to be called within 15 minutes after +the commit was issued, as the CAMO partner needs to regularly purge +such meta-information and therefore cannot provide correct answers for +older transactions. + +Prior to querying the status of a transaction, this function waits for +the receive queue to be consumed and fully applied. This prevents +early negative answers for transactions that have already been +received, but not applied, yet. + +Note that despite its name, it is not always a read-only operation. +If the status is unknown, the CAMO partner will decide whether to +commit or abort the transaction, storing that decision locally to +ensure consistency going forward. + +Also note that the client must not call this function before +attempting to commit on the origin, otherwise the transaction may be +forced to be rolled back. + +#### Synopsis + +```sql +bdr.logical_transaction_status(node_id OID, + xid OID, + require_camo_partner BOOL DEFAULT true) +``` + +#### Parameters + +- `node_id` - the node id of the BDR node the transaction originates + from, usually retrieved by the client before COMMIT from the PQ + parameter `bdr.local_node_id`. +- `xid` - the transaction id on the origin node, usually retrieved by + the client before COMMIT from the PQ parameter `transaction_id` + (requires `enable_camo` to be set to `on`, `remote_write`, + `remote_commit_async`, or `remote_commit_flush`. See + [Commit at Most Once Settings](configuration.md#commit-at-most-once)) +- `require_camo_partner` - defaults to true and enables configuration + checks; may be set to false to disable these checks and query the + status of a transaction that was protected by Eager All Node + Replication. + +#### Return value + +The function will return one of these results: + +- `'committed'::TEXT` - the transaction has been committed, is visible + on both nodes of the CAMO pair and will eventually be replicated to + all other BDR nodes. No need for the client to retry it. + +- `'aborted'::TEXT` - the transaction has been aborted and will not be + replicated to any other BDR node. The client needs to either + retry it or escalate the failure to commit the transaction. + +- `'in progress'::TEXT` - the transaction is still in progress on this + local node and has neither been committed nor aborted, yet. Note + that the transaction may well be in the COMMIT phase, waiting for + the CAMO partner to confirm or deny the commit. The recommended + client reaction is to disconnect from the origin node and reconnect + to the CAMO partner to query that instead. See the + `isTransactionCommitted` method of the + [reference clients](camo_clients.md). With a load balancer or proxy + in between, where the client lacks control over which node gets + queried, the client can only poll repeatedly until the status + switches to either `'committed'` or `'aborted'`. + + For Eager All Node Replication, peer nodes yield this result for + transactions that are not yet committed or aborted. This means that + even transactions not yet replicated (or not even started on the + origin node) may yield an `in progress` result on a peer BDR node in + this case. However, the client must not query the transaction + status prior to attempting to commit on the origin. + +- `'unknown'::TEXT` - the transaction specified is unknown, either + because it is in the future, not replicated to that specific node + yet, or too far in the past. The status of such a transaction is + not yet or no longer known. This return value is a sign of improper + use by the client. + +The client must be prepared to retry the function call on error. + +### Connection pools and proxies + +The effect of connection pools and proxies needs to be considered when +designing a CAMO cluster. A proxy may freely distribute transactions +to all nodes in the commit group (i.e. to both nodes of a CAMO pair or +to all BDR nodes in case of Eager All Node Replication). + +Care needs to be taken to ensure that the application fetches +the proper node id: when using session pooling, the client remains +connected to the same node, so the node id remains constant for the +lifetime of the client session. However, with finer-grained transaction +pooling, the client needs to fetch the node id for every transaction (as +in the example given below). + +A client that is not directly connected to the BDR nodes might not even +notice a failover or switchover, but can always use the +`bdr.local_node_id` parameter to determine which node it is currently +connected to. In the crucial situation of a disconnect during COMMIT, +the proxy must properly forward that disconnect as an error to the +client applying the CAMO protocol. + +For CAMO in `remote_write` mode, a proxy that potentially switches +between the CAMO pairs must use the `bdr.wait_for_camo_partner_queue` +function to prevent stale reads. + +HARP is the only proxy that supports all of the above requirements. +PgBouncer and HAproxy can work with CAMO, but do not support CAMO's +`remote_write` mode. + +### Example + +The following example demonstrates what a retry loop of a CAMO aware +client application should look like in C-like pseudo-code. It expects +two DSNs `origin_dsn` and `partner_dsn` providing connection information. +These usually are the same DSNs as used for the initial call to +`bdr.create_node`, and can be looked up in `bdr.node_summary`, column +`interface_connstr`. + +``` +PGconn *conn = PQconnectdb(origin_dsn); + +loop { + // start a transaction + PQexec(conn, "BEGIN"); + + // apply transactional changes + PQexec(conn, "INSERT INTO ..."); + ... + + // store a globally unique transaction identifier + node_id = PQparameterStatus(conn, "bdr.local_node_id"); + xid = PQparameterStatus(conn, "transaction_id"); + + // attempt to commit + PQexec(conn, "COMMIT"); + if (PQresultStatus(res) == PGRES_COMMAND_OK) + return SUCCESS; + else if (PQstatus(res) == CONNECTION_BAD) + { + // Re-connect to the partner + conn = PQconnectdb(partner_dsn); + // Check if successfully reconnected + if (!connectionEstablished()) + panic(); + + // Check the attempted transaction's status + sql = "SELECT bdr.logical_transaction_status($node_id, $xid)"; + txn_status = PQexec(conn, sql); + if (txn_status == "committed") + return SUCCESS; + else + continue; // to retry the transaction on the partner + } + else + { + // The connection is intact, but the transaction failed for some + // other reason. Differentiate between permanent and temporary + // errors. + if (isPermanentError()) + return FAILURE; + else + { + // Determine an appropriate delay to back-off to account for + // temporary failures due to congestion, so as to decrease + // the overall load put on the servers. + sleep(increasing_retry_delay); + + continue; + } + } +} +``` + +This example needs to be extended with proper logic for connecting, including +retries and error handling. If using a load balancer +(e.g. PgBouncer), re-connecting can be implemented by simply using +`PQreset`. Ensure that the load balancer only +ever redirects a client to a CAMO partner and not any other BDR node. + +In practice, an upper limit of retries is recommended. Depending on the +actions performed in the transaction, other temporary errors may be +possible and need to be handled by retrying the transaction depending +on the error code, similarly to the best practices on deadlocks or on +serialization failures while in `SERIALIZABLE` isolation mode. + +Please see the [reference client implementations](camo_clients.md) provided +as part of this documentation. + +## Interaction with DDL and global locks + +Transactions protected by CAMO may contain DDL operations. Note +however that DDL uses global locks, which already provide some +synchronization among nodes; see +[DDL Locking Details](ddl.md#ddl-locking-details) for more +information. + +Combining CAMO with DDL not only imposes a higher latency, but also +increases the chance of global deadlocks. We therefore recommend using a +relatively low `bdr.global_lock_timeout`, which aborts the DDL and +therefore resolves a deadlock in a reasonable amount of time. + +### Non-transactional DDL + +The following DDL operations are not allowed within a transaction +block and therefore cannot possibly benefit from CAMO protection. For +these, CAMO is automatically disabled internally: + +* all concurrent index operations (`CREATE`, `DROP`, and `REINDEX`) +* `REINDEX DATABASE`, `REINDEX SCHEMA`, and `REINDEX SYSTEM` +* `VACUUM` +* `CLUSTER` without any parameter +* `ALTER TABLE DETACH PARTITION CONCURRENTLY` +* `ALTER TYPE [enum] ADD VALUE` +* `ALTER SYSTEM` +* `CREATE` and `DROP DATABASE` +* `CREATE` and `DROP TABLESPACE` +* `ALTER DATABASE [db] TABLESPACE` + +## CAMO Limitations + +CAMO is designed to query the results of a recently failed COMMIT on +the origin node, so in case of disconnection, the application should +be coded to immediately request the transaction status from the CAMO partner. +There should be as little delay as possible after the failure before +requesting the status. Applications should not rely on CAMO decisions +being stored for longer than 15 minutes. + +If the application forgets the global identifier assigned, for example +as a result of a restart, there is no easy way to recover +that. Therefore, it is recommended applications wait for outstanding +transactions to terminate before shutting down. + +For the client to apply proper checks, a transaction protected by CAMO +cannot be a single statement with implicit transaction control. Nor +is it possible to use CAMO with a transaction-controlling procedure or +within a `DO` block that tries to start or end transactions. + +Changing the CAMO partners in a CAMO pair is not currently possible. +It's only possible to add or remove a pair. +Adding or removing a pair does not need a restart of Postgres or even a +reload of the configuration. + +CAMO resolves commit status but does not yet resolve pending +notifications on commit. CAMO and Eager replication options do not +allow the `NOTIFY` SQL command or the `pg_notify()` function, +nor do they allow `LISTEN` or `UNLISTEN`. + +Replacing a crashed and unrecoverable BDR node with its physical +standby is not currently supported in combination with CAMO-protected +transactions. + +Also, CAMO does not currently work together with the Decoding Worker. +Installations using CAMO must keep `enable_wal_decoder` disabled for +the BDR node group using CAMO. + +Legacy BDR synchronous replication uses a mechanism for transaction confirmation +different from CAMO. The two are not compatible and must not be used +together. Therefore, a CAMO partner must not be configured in +`synchronous_standby_names`. Using synchronous replication to a +non-BDR node acting as a physical standby is well possible. + +When replaying changes, CAMO transactions may detect conflicts just +the same as other transactions. If timestamp conflict detection is used, +the CAMO transaction uses the timestamp of the prepare on the origin +node, which is before the transaction becomes visible on the origin +node itself. + +## Performance Implications + +CAMO extends Postgres' replication protocol by adding an additional +message round trip at commit. Applications should expect a higher +commit latency than with asynchronous replication, mostly determined +by the round trip time between involved nodes. Increasing the number +of concurrent sessions can help to increase parallelism to still +obtain reasonable transaction throughput. + +The CAMO partner confirming transactions needs to store transaction +states. Again, compared to non-CAMO operation, this might require an +additional seek for each transaction applied from the origin. + +## Client Application Testing + +Proper use of CAMO on the client side is not trivial; we strongly +recommend testing the application behavior in combination with the BDR +cluster against failure scenarios such as node crashes or network +outages. + diff --git a/product_docs/docs/pgd/3.7/bdr/configuration.mdx b/product_docs/docs/pgd/3.7/bdr/configuration.mdx index 4bbee97a5ef..071539ef68c 100644 --- a/product_docs/docs/pgd/3.7/bdr/configuration.mdx +++ b/product_docs/docs/pgd/3.7/bdr/configuration.mdx @@ -37,7 +37,7 @@ which vary according to the size and scale of the cluster. - `max_replication_slots` - Same as `max_wal_senders`. - `wal_sender_timeout` and `wal_receiver_timeout` - Determine how quickly an origin considers its CAMO partner as disconnected or - reconnected; see [CAMO Failure Scenarios](/pgd/latest/bdr/camo/#failure-scenarios) for + reconnected; see [CAMO Failure Scenarios](/pgd/3.7/bdr/camo/#failure-scenarios) for details. Note that in normal running for a group with N peer nodes, BDR will require diff --git a/product_docs/docs/pgd/3.7/bdr/functions.mdx b/product_docs/docs/pgd/3.7/bdr/functions.mdx index 100fdce4f61..41bbb6f4a13 100644 --- a/product_docs/docs/pgd/3.7/bdr/functions.mdx +++ b/product_docs/docs/pgd/3.7/bdr/functions.mdx @@ -37,7 +37,7 @@ currently in use. Version numbers are monotonically increasing, allowing this value to be used for less-than and greater-than comparisons. The following formula is used to turn the version number consisting of -major version, minor verion and patch release into a single numerical +major version, minor version and patch release into a single numerical value: ``` @@ -117,7 +117,7 @@ function to prevent stale reads. For convenience, BDR provides a special variant of this function for CAMO and the CAMO partner node, see -[bdr.wait_for_camo_partner_queue](/pgd/latest/bdr/camo/#wait-for-consumption-of-the-apply-queue-from-the-camo-partner). +[bdr.wait_for_camo_partner_queue](/pgd/3.7/bdr/camo/#wait-for-consumption-of-the-apply-queue-from-the-camo-partner). In case a specific LSN is given, that's the point in the recovery stream from the peer to wait for. This can be used in combination @@ -517,3 +517,112 @@ bdr.global_advisory_unlock(key1 integer, key2 integer) - `key1` - first part of the composite key. - `key2` - second part of the composite key. + +## Monitoring functions + +### bdr.monitor_group_versions + +To provide a cluster-wide version check, this function uses +BDR version information returned from the view +`bdr.group_version_details`. + +#### Synopsis + +```sql +bdr.monitor_group_versions() +``` + +#### Notes + +This function returns a record with fields `status` and `message`, +as explained in [Monitoring](monitoring/#monitoring-bdr-versions). + +This function calls `bdr.run_on_all_nodes()`. + +### bdr.monitor_group_raft + +To provide a cluster-wide Raft check, this function uses +BDR Raft information returned from the view +`bdr.group_raft_details`. + +#### Synopsis + +```sql +bdr.monitor_group_raft() +``` + +#### Notes + +This function returns a record with fields `status` and `message`, +as explained in [Monitoring](monitoring/#monitoring-raft-consensus). + +This function calls `bdr.run_on_all_nodes()`. + +### bdr.monitor_local_replslots + +This function uses replication slot status information returned from the +view `pg_replication_slots` (slot active or inactive) to provide a +local check considering all replication slots except the BDR group +slots. + +#### Synopsis + +```sql +bdr.monitor_local_replslots() +``` + +#### Notes + +This function returns a record with fields `status` and `message`, +as explained in [Monitoring replication slots](monitoring/#monitoring-replication-slots). + +### bdr.wal_sender_stats + +If the [decoding worker](nodes#decoding-worker) is enabled, this +function shows information about the decoder slot and current LCR +(logical change record) segment file being read by each WAL sender. + +#### Synopsis + +```sql +bdr.wal_sender_stats() +``` + +#### Output columns + +- `pid` — PID of the WAL sender (corresponds to `pg_stat_replication`'s `pid` column). + +- `is_using_lcr` — Whether the WAL sender is sending LCR files. The next columns are `NULL` if `is_using_lcr` is `FALSE`. + +- `decoder_slot_name` — The name of the decoder replication slot. + +- `lcr_file_name` — The name of the current LCR file. + + +### bdr.get_decoding_worker_stat + +If the [decoding worker](nodes#decoding-worker) is enabled, this function +shows information about the state of the decoding worker associated with the +current database. This also provides more granular information about decoding +worker progress than is available via `pg_replication_slots`. + +#### Synopsis + +```sql +bdr.get_decoding_worker_stat() +``` + +#### Output columns + +- `pid` — The PID of the decoding worker (corresponds to the column `active_pid` in `pg_replication_slots`). + +- `decoded_upto_lsn` — LSN up to which the decoding worker read transactional logs. + +- `waiting` — Whether the decoding worker is waiting for new WAL. + +- `waiting_for_lsn` — The LSN of the next expected WAL. + +#### Notes + +For further details, see [Monitoring WAL senders using LCR](monitoring/#monitoring-wal-senders-using-lcr). + diff --git a/product_docs/docs/pgd/3.7/bdr/monitoring.mdx b/product_docs/docs/pgd/3.7/bdr/monitoring.mdx index d7ef2808774..d198379f53e 100644 --- a/product_docs/docs/pgd/3.7/bdr/monitoring.mdx +++ b/product_docs/docs/pgd/3.7/bdr/monitoring.mdx @@ -276,7 +276,7 @@ subscription_status | replicating If the [Decoding Worker](nodes/#decoding-worker) is enabled, information about the current LCR (`Logical Change Record`) file for each WAL sender can be monitored -via the function [bdr.wal_sender_stats](/pgd/latest/bdr/functions/#bdrwal_sender_stats), +via the function [bdr.wal_sender_stats](/pgd/3.7/bdr/functions/#bdrwal_sender_stats), e.g.: ``` @@ -294,7 +294,7 @@ This will be the case if the Decoding Worker is not enabled, or the WAL sender i serving a [logical standby](nodes/#logical-standby-nodes). Additionally, information about the Decoding Worker can be monitored via the function -[bdr.get_decoding_worker_stat](/pgd/latest/bdr/functions/#bdrget_decoding_worker_stat), e.g.: +[bdr.get_decoding_worker_stat](/pgd/3.7/bdr/functions/#bdrget_decoding_worker_stat), e.g.: ``` postgres=# SELECT * FROM bdr.get_decoding_worker_stat(); diff --git a/product_docs/docs/pgd/3.7/bdr/upgrades/index.mdx b/product_docs/docs/pgd/3.7/bdr/upgrades/index.mdx index 359205a077c..53fdb230e87 100644 --- a/product_docs/docs/pgd/3.7/bdr/upgrades/index.mdx +++ b/product_docs/docs/pgd/3.7/bdr/upgrades/index.mdx @@ -242,7 +242,7 @@ installed in the system. The current version of pglogical can be checked using: SELECT pglogical.pglogical_version(); ``` -Always check the [monitoring](monitoring) after upgrade +Always check the [monitoring](../monitoring) after upgrade of a node to confirm that the upgraded node is working as expected. ## Database Encoding @@ -263,7 +263,7 @@ application schema upgrade as documented in the following section. ## Rolling Application Schema Upgrades By default, DDL will automatically be sent to all nodes. This can be -controlled manually, as described in [DDL Replication](ddl), which +controlled manually, as described in [DDL Replication](../ddl), which could be used to create differences between database schemas across nodes. BDR is designed to allow replication to continue even while minor differences exist between nodes. These features are designed to allow @@ -276,7 +276,7 @@ standby nodes for reporting or testing. on production clusters. Extensive testing is advised. Details of this are covered here -[Replicating between nodes with differences](appusage). +[Replicating between nodes with differences](../appusage). When one node runs DDL that adds a new table, nodes that have not yet received the latest DDL will need to cope with the extra table. @@ -335,4 +335,4 @@ However, in controlled DBA environments, it is possible to change the type of a column to an automatically castable one by adopting a rolling upgrade for the type of this column in a non-replicated environment on all the nodes, one by one. More details are provided in the -[ALTER TABLE](ddl) section. +[ALTER TABLE](../ddl#alter-table) section. diff --git a/product_docs/docs/pgd/3.7/harp/03_installation.mdx b/product_docs/docs/pgd/3.7/harp/03_installation.mdx index b12a5d2b083..ef9ff6a3bcb 100644 --- a/product_docs/docs/pgd/3.7/harp/03_installation.mdx +++ b/product_docs/docs/pgd/3.7/harp/03_installation.mdx @@ -25,7 +25,7 @@ version as listed here. The easiest way to install and configure HARP is to use the EDB TPAexec utility for cluster deployment and management. For details on this software, see the -[TPAexec product page](/pgd/latest/deployments/tpaexec/). +[TPAexec product page](/tpa/latest/). !!! Note TPAExec is currently available only through an EULA specifically dedicated diff --git a/product_docs/docs/pgd/3.7/pglogical/subscriptions/index.mdx b/product_docs/docs/pgd/3.7/pglogical/subscriptions/index.mdx index 3e5a5a1e9c7..32090912e28 100644 --- a/product_docs/docs/pgd/3.7/pglogical/subscriptions/index.mdx +++ b/product_docs/docs/pgd/3.7/pglogical/subscriptions/index.mdx @@ -360,7 +360,7 @@ Switch the subscription to use a different interface to connect to the provider This is how you change the address, port etc that a subscription uses when connecting to a provider. -See [`pglogical.alter_node_create_interface()`](/pgd/3.7/pglogical/nodes/#pglogical_alter_node_add_interface) +See [`pglogical.alter_node_create_interface()`](/pgd/3.7/pglogical/nodes/#pglogicalalter_node_add_interface) for usage. #### Synopsis diff --git a/product_docs/docs/pgd/5/cli/installing_cli.mdx b/product_docs/docs/pgd/5/cli/installing_cli.mdx index 861b99a462f..7f414ec4bf8 100644 --- a/product_docs/docs/pgd/5/cli/installing_cli.mdx +++ b/product_docs/docs/pgd/5/cli/installing_cli.mdx @@ -43,3 +43,7 @@ By default, `pgd-cli-config.yml` is located in the `/etc/edb/pgd-cli` directory. 2. `$HOME/.edb/pgd-cli` If you rename the file or move it to another location, specify the new name and location using the optional `-f` or `--config-file` flag when entering a command. See the [sample use case](/pgd/latest/cli/#passing-a-database-connection-string). + +!!! Note Avoiding Stale data +The PGD CLI can return stale data on the state of the cluster if it is still connecting to nodes that have previously been parted from the cluster. Edit the `pgd-cli-config.yml` file or change your `--dsn` settings to ensure only active nodes in the cluster are listed for connection. +!!! diff --git a/product_docs/docs/pgd/5/index.mdx b/product_docs/docs/pgd/5/index.mdx index e296f9b1e82..3694b92b5d5 100644 --- a/product_docs/docs/pgd/5/index.mdx +++ b/product_docs/docs/pgd/5/index.mdx @@ -45,7 +45,7 @@ navigation: --- -EDB Postgres Distributed (PGD) provides multi-master replication and data distribution with advanced conflict management, data-loss protection, and throughput up to 5X faster than native logical replication, and enables distributed PostgreSQL clusters with high availability up to five 9s. +EDB Postgres Distributed (PGD) provides multi-master replication and data distribution with advanced conflict management, data-loss protection, and throughput up to 5X faster than native logical replication. It enables distributed PostgreSQL clusters with high availability up to five 9s. By default EDB Postgres Distributed uses asynchronous replication, applying changes on the peer nodes only after the local commit. Additional levels of synchronicity can diff --git a/product_docs/docs/pgd/5/known_issues.mdx b/product_docs/docs/pgd/5/known_issues.mdx index 4b040cae19f..4c3dcc8b971 100644 --- a/product_docs/docs/pgd/5/known_issues.mdx +++ b/product_docs/docs/pgd/5/known_issues.mdx @@ -52,4 +52,6 @@ release. - There currently is no protection against altering or removing a commit scope. Running transactions in a commit scope that is concurrently being altered or removed can lead to the transaction blocking or replication stalling completely due to an error on the downstream node attempting to apply the transaction. Ensure that any transactions using a specific commit scope have finished before altering or removing it. +- The [PGD CLI](cli) can return stale data on the state of the cluster if it is still connecting to nodes that have previously been parted from the cluster. Edit the [`pgd-cli-config.yml`](cli/installing_cli/#specifying-database-connection-strings) file or change your [`--dsn`](cli/#passing-a-database-connection-string) settings to ensure only active nodes in the cluster are listed for connection. + Details of other design or implementation [limitations](limitations) are also available. diff --git a/product_docs/docs/pgd/5/quickstart/connecting_applications.mdx b/product_docs/docs/pgd/5/quickstart/connecting_applications.mdx new file mode 100644 index 00000000000..92d58788192 --- /dev/null +++ b/product_docs/docs/pgd/5/quickstart/connecting_applications.mdx @@ -0,0 +1,211 @@ +--- +title: "Connecting to your database" +navTitle: "Connecting to your database" +description: > + Connect to your quick started PGD cluster with psql and client applications +--- + +Connecting your application or remotely connecting to your new Postgres Distributed cluster involves: + +* Getting credentials and optionally creating a `.pgpass` file +* Establishing the IP address of any PGD Proxy hosts you want to connect to +* Ensuring that you can connect to that IP address +* Getting an appropriate Postgres client +* Connecting the client to the cluster + +## Getting credentials + +The default user, enterprisedb, was created in the cluster by tpaexec. It also generated passwords for that user as part of the provisioning. To get the password, run: + +```shell +tpaexec show-password democluster enterprisedb +``` + +This command returns a string that's the password for the enterprisedb user. If you want, you can use that string when prompted for a password. + +## Creating a .pgpass file + +You can avoid entering passwords for `psql` and other Postgres clients by creating [a `.pgpass` file](https://www.postgresql.org/docs/current/libpq-pgpass.html) in your home directory. It contains password details that applications can look up when connecting. After getting the password (see [Getting credentials](#getting-credentials)), you can open the `.pgpass` file using your preferred editor. + +In the file, enter: + +```plain +*:*:bdrdb:enterprisedb: +``` + +Save the file and exit the editor. To secure the file, run the following command. This command gives read and write access only to you. + +```shell +chmod 0600 ~/.pgpass +``` + +## Establishing the IP address + +### Docker + +Your Docker quick start cluster is by default accessible on the IP addresses 172.17.0.2 (kaboom), 172.17.0.3 (kaftan), 172.17.04 (kaolin), and 172.17.0.5 (kapok). Docker generates these addresses. + +### AWS + +You can refer to the IP addresses in `democluster/ssh_config`. Alternatively, run: + +```shell +aws ec2 --region eu-west-1 describe-instances --query 'Reservations[*].Instances[*].{PublicIpAddress:PublicIpAddress,Name:Tags[?Key==`Name`]|[0].Value}' +__OUTPUT__ +[ + [ + { + "PublicIpAddress": "54.217.130.13", + "Name": "kapok" + } + ], + [ + { + "PublicIpAddress": "54.170.119.101", + "Name": "kaolin" + } + ], + [ + { + "PublicIpAddress": "3.250.235.130", + "Name": "kaftan" + } + ], + [ + { + "PublicIpAddress": "34.247.188.211", + "Name": "kaboom" + } + ] +] + +``` + +This command shows you EC2's list of public IP addresses for the cluster instances. + + +### Linux hosts + +You set IP addresses for your Linux servers when you configured the cluster in the quick start. Use those addresses. + +## Ensure you can connect to your IP addresses + +### Linux hosts and Docker + +You don't need to perform any configuration to connect these. + +### AWS + +AWS is configured to allow outside access only to its SSH endpoints. To allow Postgres clients to connect from outside the AWS cloud, you need to enable the transit of traffic on port 6432. + +Get your own external IP address or the external IP address of the system you want to connect to the cluster. One way to do this is to run: + +```shell +curl https://checkip.amazonaws.com +__OUTPUT__ +89.97.100.108 +``` + +You also need the security groupid for your cluster. Run: + +```shell +aws ec2 --region eu-west-1 describe-security-groups --filter Name=group-name,Values="*democluster*" | grep GroupId +__OUTPUT__ + "GroupId": "sg-072f996360ba20d5c", +``` + +Enter the correct region for your cluster, which you set when you configured it. + +``` +aws ec2 authorize-security-group-ingress --group-id --protocol tcp --port 6432 --cidr /32 --region eu-west-1 +``` + +Again, make sure you put in the correct region for your cluster. + +You can read more about this command in [Add rules to your security group](https://docs.aws.amazon.com/cli/latest/userguide/cli-services-ec2-sg.html#configuring-a-security-group) in the AWS CLI guide. + + +## Getting an appropriate Postgres client + +Unless you installed Postgres on your local system, you probably need to install a client application, such as `psql`, to connect to your database cluster. + +On Ubuntu, for example, you can run: + +```shell +sudo apt install postgresql-client +``` + +This command installs `psql`, along with some other tools but without installing the Postgres database locally. + +## Connecting the client to the cluster + +After you install `psql` or a similar client, you can connect to the cluster. Run: + +```shell +psql -h -p 6432 -U enterprisedb bdrdb +__OUTPUT__ +psql (15.2, server 15.2.0 (Debian 15.2.0-2.buster)) +SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, bits: 256, compression: off) +Type "help" for help. + +bdrdb=# +``` + +[Use the `.pgpass` file](#creating-a-pgpass-file) with clients that support it, or use the host, port, user, password, and database name to connect with other applications. + +## Using proxy failover to connect the client to the cluster + +By listing all the addresses of proxies as the host, you can ensure that the client will always failover and connect to the first available proxy in the event of a proxy failing. + + +```shell +psql -h ,, -U enterprisedb -p 6432 bdrdb +__OUTPUT__ +psql (15.2, server 15.2.0 (Debian 15.2.0-2.buster)) +SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, bits: 256, compression: off) +Type "help" for help. + +bdrdb=# +``` + +## Creating a Connection URL + +Many applications use a [Connection URL](https://www.postgresql.org/docs/current/libpq-connect.html#id-1.7.3.8.3.6) to connect to the database. To create a Connection URL, you need to assemble a string in the format: + +``` +postgresql://@:6432,:6432,:6432/bdrdb +``` + +This format of string can be used with the `psql` command, so if our database nodes were on Ip addresses 192.168.9.10, 192.168.10.10 and 192.168.10.11, we could use: + +``` +psql postgresql://enterprisedb@192.168.9.10:6432,192.168.10.10:6432,192.168.11.10:6432/bdrdb +``` + +You can also embed the password in the created URL. If we are using the enterprisedb user and the password for the enterprisedb user is `notasecret` then the URL +would look like: + +``` +psql postgresql://enterprisedb:notasecret@192.168.9.10:6432,192.168.10.10:6432,192.168.11.10:6432/bdrdb +``` + +Actual passwords are more complex than that and may contain special characters. You will need to urlencode the password to ensure that it doesn't trip up the shell, the command or the driver you are using. + +!!! Warning Passwords should not be embedded + While we have shown you how to embed a password, we recommend that you do not do this. The password is easily extracted from the URL and can easily be saved in insecure locations. Consider other ways of passing the password. + +### Making a Java Connection URL + +Finally, the URL we have created is fine for many Postgres applications and clients, but those based on Java require one change to allow Java to invoke the appropriate driver. Precede the URL with `jdbc:` to make a Java compatible URL. + +``` +jdbc:postgresql://enterprisedb@192.168.9.10:6432,192.168.10.10:6432,192.168.11.10:6432/bdrdb +``` + +## Moving On + +You are now equipped to connect your applications to your cluster, with all the connection credentials and URLs you need. + + + + diff --git a/product_docs/docs/pgd/5/quickstart/further_explore_conflicts.mdx b/product_docs/docs/pgd/5/quickstart/further_explore_conflicts.mdx index 7a3ab084b65..deec4f09b37 100644 --- a/product_docs/docs/pgd/5/quickstart/further_explore_conflicts.mdx +++ b/product_docs/docs/pgd/5/quickstart/further_explore_conflicts.mdx @@ -5,23 +5,21 @@ description: > An exploration of how PGD handles conflicts between data nodes --- -## Conflicts +In a multi-master architecture like PGD, conflicts happen. PGD is built to handle them. -A multi-master architecture like PGD will see conflicts happen. PGD is built to handle them. +A conflict can occur when one database node has an update from an application to a row and another node has a different update to the same row. This type of conflict is called a *row-level conflict*. Conflicts aren't errors. Resolving them effectively is core to how Postgres Distributed maintains consistency. -A conflict can occur when one database node has an update from an application to a row and another node has a different update to the same row. This is called a **row level conflict**. Conflicts aren't errors and effectively resolving them is core to how Postgres Distributed maintains consistency. +The best way to handle conflicts is not to have them in the first place! Use PGD's Always-On architecture with proxies to ensure that your applications write to the same server in the cluster. -The _best_ way to handle conflicts is to not have them in the first place! Use PGD's Always-On architecture with proxies to ensure that your applications write to the same server in the cluster. - -When there are conflicts though, it is useful to know what PGD does to resolve them, how you can control that resolution and how you can find out that they are happening. This topic explores two ways in which conflicts can be caused: row insertion and row updates. +When conflicts occur, though, it's useful to know how PGD resolves them, how you can control that resolution, and how you can find out that they're happening. Row insertion and row updates are two actions that can cause conflicts. To see how it works, you need to open a command line view of all the servers. -## Your quick started configuration +## Your quick start configuration -This exploration assumes that you created your PGD cluster using the [quick start for Docker](quick_start_docker) or the [quick start for AWS](quick_start_aws). +This exploration assumes that you created your PGD cluster using the [quick start for Docker](quick_start_docker), the [quick start for AWS](quick_start_aws), or the [quick start for Linux hosts](quick_start_linux). -At the end of both those quick starts, you'll have a cluster with four nodes and these roles: +At the end of each quick start, you'll have a cluster with four nodes and these roles: | Host name | Host role | | --------- | ----------------- | @@ -34,7 +32,7 @@ You'll use these hostnames throughout this exercise. ## Installing xpanes -You'll use xpanes, a utility that allows you to quickly create multiple terminal sessions that you can easily switch between. It isn't installed by default, so you'll have to install it. Start by connecting to the kaboom node with ssh. +You'll use `xpanes`, a utility that allows you to quickly create multiple terminal sessions that you can easily switch between. It isn't installed by default, so you'll have to install it. Start by connecting to the kaboom node with ssh: ```shell cd democluster && ssh -F ssh_config kaboom @@ -161,6 +159,10 @@ Again you'll see both commits working. And, again, in the bottom-right pane, you An additional row in the conflict history shows an `update_origin_change` conflict occurred and that the resolution was `apply_remote`. This resolution means that the remote change was applied, updating the record. This conflict is called an UPDATE/UPDATE conflict and is explained in more detail in [UPDATE/UPDATE conflicts](../consistency/conflicts/#updateupdate-conflicts). +!!!Tip Exiting Tmux +You can quickly exit Tmux and all the associated sessions. First terminate any running processes, as they will otherwise continue running after the session is killed. Press **Control-b** and then enter `:kill-session`. This approach is simpler than quitting each pane's session one at a time using **Control-D** or `exit`. +!!! + ## Other conflicts You are now equipped to explore all the possible conflict scenarios and resolutions that can occur. For full details of how conflicts are managed, see [Conflicts](../consistency/conflicts/) in the documentation. While ideally you should avoid conflicts, it's important to know that, when they do happen, they're recorded and managed by Postgres Distributed's integrated and configurable conflict resolver. diff --git a/product_docs/docs/pgd/5/quickstart/further_explore_failover.mdx b/product_docs/docs/pgd/5/quickstart/further_explore_failover.mdx index e2d2a176cc8..11d2bf95b27 100644 --- a/product_docs/docs/pgd/5/quickstart/further_explore_failover.mdx +++ b/product_docs/docs/pgd/5/quickstart/further_explore_failover.mdx @@ -5,17 +5,15 @@ description: > An exploration of how PGD handles failover between data nodes --- -## Failover - With a high-availability cluster, the ability to failover is crucial to the overall resilience of the cluster. When the lead data nodes stops working for whatever reason, applications need to be able to continue working with the database with little or no interruption. For PGD, that means directing applications to the new lead data node, which takes over automatically. This is where PGD Proxy is useful. It works with the cluster and directs traffic to the lead data node automatically. In this exercise, you'll create an application that sends data to the database regularly. Then you'll first softly switch lead data node by requesting a change through the PGD CLI. And then you'll forcibly shut down a database instance and see how PGD handles that. ## Your quick started configuration -This exploration assumes that you created your PGD cluster using the [quick start for Docker](quick_start_docker) or the [quick start for AWS](quick_start_aws). +This exploration assumes that you created your PGD cluster using the [quick start for Docker](quick_start_docker), the [quick start for AWS](quick_start_aws) or the [quick start for Linux hosts](quick_start_linux). -At the end of both those quick starts, you'll have a cluster with four nodes and these roles: +At the end of each quick starts, you'll have a cluster with four nodes and these roles: | Host name | Host role | | --------- | ----------------- | @@ -28,7 +26,7 @@ You'll use these hostnames throughout this exercise. ## Installing xpanes -You'll be using xpanes, a utility that allows you to quickly create multiple terminal sessions that you can easily switch between. It isn't installed by default, so you'll have to install it. For this exercise, you'll be launching xpanes from the system where you ran tpaexec to configure your quick-start cluster. +You'll be using `xpanes`, a utility that allows you to quickly create multiple terminal sessions that you can easily switch between. It isn't installed by default, so you'll have to install it. For this exercise, you'll be launching xpanes from the system where you ran tpaexec to configure your quick-start cluster. If the system is running Ubuntu, run this: @@ -400,11 +398,15 @@ Bring the proxy service on kaftan back by running: sudo systemctl start pgd-proxy.service ``` +!!!Tip Exiting Tmux +You can quickly exit Tmux and all the associated sessions. First terminate any running processes, as they will otherwise continue running after the session is killed. Press **Control-b** and then enter `:kill-session`. This approach is simpler than quitting each pane's session one at a time using **Control-D** or `exit`. +!!! + ## Other scenarios This example uses the quick-start configuration of three data nodes and one backup node. You can configure a cluster to have two data nodes and a witness node, which is less resilient to a node failing. Or you can configure five data nodes, which is much more resilient to a node failing. With this configuration, you can explore how failover works for your applications. For clusters with multiple locations, the same basic rules apply: taking a server down elects a new write leader that proxies now point to. ## Further reading -* Read more about the management capabilities of [PGD cli](../cli/). +* Read more about the management capabilities of [PGD CLI](../cli/). * Learn more about [monitoring replication using SQL](../monitoring/sql/#monitoring-replication-peers). diff --git a/product_docs/docs/pgd/5/quickstart/index.mdx b/product_docs/docs/pgd/5/quickstart/index.mdx index 07c5311ce0e..e4a982391ae 100644 --- a/product_docs/docs/pgd/5/quickstart/index.mdx +++ b/product_docs/docs/pgd/5/quickstart/index.mdx @@ -1,32 +1,34 @@ --- title: "Introducing PGD quick starts" -navTitle: "Quick Start" +navTitle: "Quick start" description: > How to select your PGD quick start deployment and what to expect from the experience. indexCards: none navigation: - quick_start_docker +- quick_start_linux - quick_start_aws +- connecting_applications - further_explore_failover - further_explore_conflicts - next_steps --- -## Quick Start +## Quick start Postgres Distributed (PGD) is a multi-master replicating implementation of Postgres designed for high performance and availability. You can create database clusters made up of many bidirectionally synchronizing database nodes. The clusters can have a number of proxy servers that direct your query traffic to the most available nodes, adding further resilience to your cluster configuration. PGD is very configurable. To quickly evaluate and deploy PGD, use this quick start. It'll get you up and running with a fully configured PGD cluster using the same tools that you'll use to deploy to production. This quick start includes: * A short introduction to Trusted Postgres Architect (TPA) and how it helps you configure, deploy, and manage Postgres Distributed -* A guide to selecting Docker or AWS quick starts - * The Docker quick start +* A guide to selecting Docker, Linux hosts, or AWS quick starts + * The Docker quick start + * The Linux host quick start * The AWS quick start * Connecting applications to your cluster -* Further explorations with your cluster +* Further explorations with your cluster including * Conflicts * Failover - * Reconfiguration ## Introducing PGD and TPA @@ -35,31 +37,43 @@ PGD is a multi-master replicating implementation of Postgres designed for high p We created TPA to make installing and managing various Postgres configurations easily repeatable. TPA orchestrates creating and deploying Postgres. -In these quick starts, you install TPA first. If you already have TPA installed, you can skip those steps. TPA is more of a tool than a simple installer. You will be able to use the same installation of TPA to deploy many different configurations of Postgres clusters. +These quick starts are designed to let you quickly get a single region cluster. -You'll use TPA to generate a configuration file for a PGD demonstration cluster. This cluster will have three replicating database nodes, co-hosting three high availability proxies, and one backup node. +In these quick starts, you install TPA first. If you already have TPA installed, you can skip those steps. TPA is more of a tool than a simple installer. You can use the same installation of TPA to deploy many different configurations of Postgres clusters. + +You'll use TPA to generate a configuration file for a PGD demonstration cluster. This cluster will have three replicating database nodes, cohosting three high-availability proxies and one backup node. You will then use TPA to provision and deploy the required configuration and software to each node. -## Selecting Docker or AWS quick starts +## Selecting Docker, Linux hosts, or AWS quick starts -Two quick starts currently available. One uses Docker to host the PGD cluster. The other deploys PGD to AWS. +Three quick starts are currently available: +* Docker — Provisions, deploys, and hosts the cluster on Docker containers on a single machine. +* Linux hosts — Deploys and hosts the cluster on Linux servers that you have already provisioned with an operating system and SSH connectivity. These can be actual physical servers or virtual machines, deployed on-premises or in the cloud. +* AWS — Provisions, deploys, and hosts the cluster on AWS. ### Docker quick start -The Docker quick start is ideal for those looking to initially explore PGD and its capabilities. This configuration of PGD isn't suitable for production use but can be valuable for testing the functionality and behavior of PGD clusters. You might also find it useful when familiarizing yourself with PGD commands and APIs to prepare for deploying on cloud, VM, or bare metal platforms. +The Docker quick start is ideal for those looking to initially explore PGD and its capabilities. This configuration of PGD isn't suitable for production use but can be valuable for testing the functionality and behavior of PGD clusters. You might also find it useful when familiarizing yourself with PGD commands and APIs to prepare for deploying on cloud, VM, or Linux hosts. * [Begin the Docker quick start](quick_start_docker) +### Linux host quick start + +The Linux hosts quick start is suited for those looking to install PGD on their own hosts, where they have complete control of the hardware and software, or in a private cloud. The overall configuration is similar to the Docker configuration but is more persistent over system restarts and closer to a single-region production deployment of PGD. + +* [Begin the Linux host quick start](quick_start_linux) + ### AWS quick start -The AWS quick start is more extensive and deploys the PGD cluster onto EC2 nodes on Amazon's cloud. The cluster's overall configuration is similar to the Docker quick start. However, instead of using Docker containers, it uses t3.micro instances of Amazon EC2 to provide the compute power. The AWS deployment is more persistent and not subject to the limitations of the Docker quick start deployment, but it requires more initial setup to configure the AWS CLI. +The AWS quick start is more extensive and deploys the PGD cluster onto EC2 nodes on Amazon's cloud. The cluster's overall configuration is similar to the Docker quick start. However, instead of using Docker containers, it uses t3.micro instances of Amazon EC2 to provide the compute power. The AWS deployment is more persistent and not subject to the limitations of the Docker quick start deployment. However, it requires more initial setup to configure the AWS CLI. * [Begin the AWS quick start](quick_start_aws) ## Further explorations with your cluster +* [Connect applications to your PGD cluster](connecting_applications/) * [Find out how an PGD cluster stands up to downtime of data nodes or proxies](further_explore_failover/) * [Learn about how Postgres Distributed manages conflicting updates](further_explore_conflicts/) -* [Moving beyond the quickstarts](next_steps/) +* [Moving beyond the quick starts](next_steps/) diff --git a/product_docs/docs/pgd/5/quickstart/next_steps.mdx b/product_docs/docs/pgd/5/quickstart/next_steps.mdx index 52c760cccb9..3c72e9b099c 100644 --- a/product_docs/docs/pgd/5/quickstart/next_steps.mdx +++ b/product_docs/docs/pgd/5/quickstart/next_steps.mdx @@ -7,9 +7,23 @@ description: > ## Going further with your PGD cluster +### Architecture + +In this quick start, we created a single region cluster of high availability Postgres databases. This is the, Always On Single Location architecture, one of a range of available PGD architectures. Other architectures include Always On Multi-Location, with clusters in multiple data centers working together, and variations of both with witness nodes enhancing resilience. Read more in [architectural options](../architectures/). + +### Postgres versions + +In this quick start, we deployed EDB Postgres Advanced Server (EPAS) to the database nodes. PGD is able to deploy a three different kinds of Postgres distributions, EPAS, EDB Postgres Extended Server and open-source PostgreSQL. The selection of database affects PGD, offering [different capabilities](../choosing_server) dependant on server. + +* Open-source PostgreSQL does not support CAMO +* EDB Postgres Extended Server supports CAMO, but does not offer Oracle compatibility +* EDB Postgres Advanced Server supports CAMO and offers optional Oracle compatibility + +### Read On + * Learn PGD's [terminology](../terminology/) - from Asynchronous Replication to Write Scalability -* Discover the [architectural options](../architectures/) available with PGD * Find out how [applications work](../appusage/) with PGD and how common Postgres features like [sequences](../sequences/) are globally distributed +* Discover how PGD supports [rolling upgrades](../upgrades/) of your clusters * Take control of [routing](../routing/) and use SQL to control the PGD Proxies * Engage with the [PGD CLI](../cli/) to manage and monitor your cluster diff --git a/product_docs/docs/pgd/5/quickstart/quick_start_aws.mdx b/product_docs/docs/pgd/5/quickstart/quick_start_aws.mdx index 7cd259935ae..c4c17c575de 100644 --- a/product_docs/docs/pgd/5/quickstart/quick_start_aws.mdx +++ b/product_docs/docs/pgd/5/quickstart/quick_start_aws.mdx @@ -63,7 +63,7 @@ You can add this to your `.bashrc` script or similar shell profile to ensure it' ### Configure the repository -All the software needed for this example is available from the Postgres Distributed package repository. Download and run a script to configure the Postgres Distributed repository. This repository also contains the TPA packages. +All the software needed for this example is available from the Postgres Distributed package repository. The following command downloads and runs a script to configure the Postgres Distributed repository. This repository also contains the TPA packages. ```shell curl -1sLf "https://downloads.enterprisedb.com/$EDB_SUBSCRIPTION_TOKEN/postgres_distributed/setup.deb.sh" | sudo -E bash @@ -252,7 +252,7 @@ To leave the SQL client, enter `exit`. ### Using PGD CLI -The pgd utility, also known as the PGD CLI, lets you control and manage your Postgres Distributed cluster. It's already installed on the node. +The `pgd` utility, also known as the PGD CLI, lets you control and manage your Postgres Distributed cluster. It's already installed on the node. You can use it to check the cluster's health by running `pgd check-health`: @@ -304,6 +304,7 @@ bdrdb=# ## Explore your cluster +* [Connect to your database](connecting_applications) to applications * [Explore failover](further_explore_failover) with hands-on exercises * [Understand conflicts](further_explore_conflicts) by creating and monitoring them * [Next steps](next_steps) in working with your cluster diff --git a/product_docs/docs/pgd/5/quickstart/quick_start_docker.mdx b/product_docs/docs/pgd/5/quickstart/quick_start_docker.mdx index fbdeeba5ebc..8c4d4bbc58c 100644 --- a/product_docs/docs/pgd/5/quickstart/quick_start_docker.mdx +++ b/product_docs/docs/pgd/5/quickstart/quick_start_docker.mdx @@ -14,9 +14,11 @@ This quick start uses TPA to set up PGD with an Always On Single Location archit We created TPA to make installing and managing various Postgres configurations easily repeatable. TPA orchestrates creating and deploying Postgres. In this quick start, you install TPA first. If you already have TPA installed, you can skip those steps. You can use TPA to deploy various configurations of Postgres clusters. -PGD is a multi-master replicating implementation of Postgres designed for high performance and availability. The installation of PGD is orchestrated by TPA. You will use TPA to generate a configuration file for a PGD demonstration cluster. This cluster uses local Docker containers to host the cluster's nodes: three replicating database nodes, two connection proxies, and one backup node. You can then use TPA to provision and deploy the required configuration and software to each node. +PGD is a multi-master replicating implementation of Postgres designed for high performance and availability. The installation of PGD is orchestrated by TPA. You will use TPA to generate a configuration file for a PGD demonstration cluster. -This configuration of PGD isn't suitable for production use but can be valuable for testing the functionality and behavior of PGD clusters. You might also find it useful when familiarizing yourself with PGD commands and APIs to prepare for deployment on cloud, VM, or bare-metal platforms. +This cluster uses local Docker containers to host the cluster's nodes: three replicating database nodes, three cohosted connection proxies, and one backup node. You can then use TPA to provision and deploy the required configuration and software to each node. + +This configuration of PGD isn't suitable for production use but can be valuable for testing the functionality and behavior of PGD clusters. You might also find it useful when familiarizing yourself with PGD commands and APIs to prepare for deployment on cloud, VM, or Linux hosts. !!! Note This set of steps is specifically for Ubuntu 22.04 LTS on Intel/AMD processors. @@ -24,15 +26,28 @@ This set of steps is specifically for Ubuntu 22.04 LTS on Intel/AMD processors. ## Prerequisites -To complete this example, you need free storage and Docker installed. +To complete this example, you need a system with enough RAM and free storage. You also need `curl` and Docker installed. + +### RAM requirements + +You need a minimum of 4GB of RAM on the system. You need this much RAM because you will be running four containers, three of which will be hosting Postgres databases. ### Free disk space -You need at least 5GB of free storage (accessible by Docker) to deploy the cluster described by this example. A bit more is probably wise. +You need at least 5GB of free storage, accessible by Docker, to deploy the cluster described by this example. A bit more is probably wise. + +### The curl utility + +You will download and run scripts during this quick start using the `curl` utilty which might not be installed by default. To ensure that `curl` is installed, run: + +```shell +sudo apt update +sudo apt install curl +``` ### Docker Engine -Use Docker containers as the target platform for this PGD deployment: +You will use Docker containers as the target platform for this PGD deployment. Install Docker Engine: ```shell sudo apt update @@ -40,13 +55,14 @@ sudo apt install docker.io ``` !!! Important Running as a non-root user - Be sure to add your user to the Docker group once installed: + Once Docker Engine is installed, be sure to add your user to the Docker group: ```shell sudo usermod -aG docker newgrp docker ``` + ## Preparation ### EDB account @@ -71,7 +87,7 @@ You can add this to your `.bashrc` script or similar shell profile to ensure it' ### Configure the repository -All the software needed for this example is available from the Postgres Distributed package repository. Download and run a script to configure the Postgres Distributed repository. This repository also contains the TPA packages. +All the software needed for this example is available from the Postgres Distributed package repository. The following command downloads and runs a script to configure the Postgres Distributed repository. This repository also contains the TPA packages. ```shell curl -1sLf "https://downloads.enterprisedb.com/$EDB_SUBSCRIPTION_TOKEN/postgres_distributed/setup.deb.sh" | sudo -E bash @@ -95,22 +111,25 @@ You'll use TPA to provision and deploy PGD. If you previously installed TPA, you !!! Important If the Linux host platform you're using is running [cgroups](https://en.wikipedia.org/wiki/Cgroups) v2, you need to disable it and enable cgroups v1 while using TPA to deploy to Docker. - To check for cgroup v2: + To check for cgroup v2, run: ```shell mount | grep cgroup | head -1 ``` - You need to disable cgroup v2 if the output is: - ```text - cgroup on /sys/fs/cgroup type cgroup2 + If you do **not** see a line beginning: + + `tmpfs on /sys/fs/cgroup type tmpfs` + + Then you need to to disable cgroup v2. To do this, run: + + ```shell + echo 'GRUB_CMDLINE_LINUX=systemd.unified_cgroup_hierarchy=false' | sudo tee /etc/default/grub.d/cgroup.cfg ``` - To disable cgroup v2: + Then update the grub bootloader and reboot by running: ```shell - echo 'GRUB_CMDLINE_LINUX=systemd.unified_cgroup_hierarchy=false' | sudo tee \ - /etc/default/grub.d/cgroup.cfg sudo update-grub sudo reboot ``` @@ -264,13 +283,13 @@ Type "help" for help. bdrdb=# ``` -You're directly connected to the Postgres database running on the `kaboom` node and can start issuing SQL commands. +You're directly connected to the Postgres database running on the kaboom node and can start issuing SQL commands. To leave the SQL client, enter `exit`. ### Using PGD CLI -The pgd utility, also known as the PGD CLI, lets you control and manage your Postgres Distributed cluster. It's already installed on the node. +The `pgd` utility, also known as the PGD CLI, lets you control and manage your Postgres Distributed cluster. It's already installed on the node. You can use it to check the cluster's health by running `pgd check-health`: @@ -323,6 +342,7 @@ bdrdb=# ## Explore your cluster +* [Connect to your database](connecting_applications) to applications * [Explore failover](further_explore_failover) with hands-on exercises * [Understand conflicts](further_explore_conflicts) by creating and monitoring them * [Next steps](next_steps) in working with your cluster diff --git a/product_docs/docs/pgd/5/quickstart/quick_start_linux.mdx b/product_docs/docs/pgd/5/quickstart/quick_start_linux.mdx new file mode 100644 index 00000000000..513ad57769d --- /dev/null +++ b/product_docs/docs/pgd/5/quickstart/quick_start_linux.mdx @@ -0,0 +1,366 @@ +--- +title: "Deploying an EDB Postgres Distributed example cluster on Linux hosts" +navTitle: "Deploying on Linux hosts" +description: > + A quick demonstration of deploying a PGD architecture using TPA on Linux hosts +redirects: + - /pgd/latest/quick_start_bare/ +--- + +## Introducing TPA and PGD + +We created TPA to make installing and managing various Postgres configurations easily repeatable. TPA orchestrates creating and deploying Postgres. In this quick start, you install TPA first. If you already have TPA installed, you can skip those steps. You can use TPA to deploy various configurations of Postgres clusters. + +PGD is a multi-master replicating implementation of Postgres designed for high performance and availability. The installation of PGD is orchestrated by TPA. You will use TPA to generate a configuration file for a PGD demonstration cluster. + +The TPA Linux host option allows users of any cloud or VM platform to use TPA to configure EDB Postgres Distributed. All you need from TPA is for the target system to be configured with a Linux operating system and accessible using SSH. Unlike the other TPA platforms (Docker and AWS), the Linux host configuration doesn't provision the target machines. It's up to you to provision them wherever you decide to deploy. + +This cluster uses Linux server instances to host the cluster's nodes. The nodes include three replicating database nodes, three cohosted connection proxies, and one backup node. TPA can then provision, prepare, and deploy the required EDB Postgres Distributed software and configuration to each node. + +!!! Note On host compatibility +This set of steps is specifically for users running Ubuntu 22.04 LTS on Intel/AMD processors. +!!! + +## Prerequisites + +### Configure your Linux hosts + +You will need to provision four hosts for this quick start. Each host should have a [supported Linux operating system](/tpa/latest/reference/distributions/) installed. To eliminate prompts for password, each host also needs to be SSH-accessible using certificate key pairs. + +!!! Note On machine provisioning +Azure users can follow [a Microsoft guide](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/quick-create-portal?tabs=ubuntu) on how to provision Azure VMs loaded with Linux. Google Cloud Platform users can follow [a Google guide](https://cloud.google.com/compute/docs/create-linux-vm-instance) on how to provision GCP VMs with Linux loaded. You can use any virtual machine technology to host a Linux instance, too. Refer to your virtualization platform's documentation for instructions on how to create instances with Linux loaded on them. + +Whichever cloud or VM platform you use, you need to make sure that each instance is accessible by SSH and that each instance can connect to the other instances. They can connect through either the public network or over a VPC for the cloud platforms. You can connect through your local network for on-premises VMs. + +If you can't do this, you might want to consider the Docker or AWS quick start. These configurations are easier to set up and quicker to tear down. The [AWS quick start](quick_start_aws), for example, automatically provisions compute instances and creates a VPC for those instances. +!!! + +In this quick start, you will install PGD nodes onto four hosts configured in the cloud. Each of these hosts in this example is installed with Rocky Linux. Each has a public IP address to go with its private IP address. + +| Host name | Public IP | Private IP | +| ----------- | ------------------------ | -------------- | +| linuxhost-1 | 172.19.16.27 | 192.168.2.247 | +| linuxhost-2 | 172.19.16.26 | 192.168.2.41 | +| linuxhost-3 | 172.19.16.25 | 192.168.2.254 | +| linuxhost-4 |172.19.16.15 | 192.168.2.30 | + +These are example IP addresses. Substitute them with your own public and private IP addresses as you progress through the quick start. + +### Set up a host admin user + +Each machine requires a user account to use for installation. For simplicity, use a user with the same name on all the hosts. On each host, also configure the user so that you can SSH into the host without being prompted for a password. Be sure to give that user sudo privileges on the host. On the four hosts, the user rocky is already configured with sudo privileges. + +## Preparation + +### EDB account + +You'll need an EDB account to install both TPA and PGD. + +[Sign up for a free EDB account](https://www.enterprisedb.com/accounts/register) if you don't already have one. Signing up gives you a trial subscription to EDB's software repositories. + +After you are registered, go to the [EDB Repos 2.0](https://www.enterprisedb.com/repos-downloads) page, where you can obtain your repo token. + +On your first visit to this page, select **Request Access** to generate your repo token. Copy the token using the **Copy Token** icon, and store it safely. + + +### Setting environment variables + +First, set the `EDB_SUBSCRIPTION_TOKEN` environment variable to the value of your EDB repo token, obtained in the [EDB account](#edb-account) step. + +``` +export EDB_SUBSCRIPTION_TOKEN= +``` + +You can add this to your `.bashrc` script or similar shell profile to ensure it's always set. + +### Configure the repository + +All the software needed for this example is available from the Postgres Distributed package repository. Download and run a script to configure the Postgres Distributed repository. This repository also contains the TPA packages. + +``` +curl -1sLf "https://downloads.enterprisedb.com/$EDB_SUBSCRIPTION_TOKEN/postgres_distributed/setup.deb.sh" | sudo -E bash +``` +## Installing Trusted Postgres Architect (TPA) + +You'll use TPA to provision and deploy PGD. If you previously installed TPA, you can move on to the [next step](#installing-pgd-using-tpa). You'll find full instructions for installing TPA in the [Trusted Postgres Architect documentation](/tpa/latest/INSTALL/), which we've also included here. + +### Linux environment + +[TPA supports several distributions of Linux](/tpa/latest/INSTALL/) as a host platform. These examples are written for Ubuntu 22.04, but steps are similar for other supported platforms. + +### Install the TPA package + +```shell +sudo apt install tpaexec +``` + +### Configuring TPA + +You now need to configure TPA, which configures TPA's Python environment. Call `tpaexec` with the command `setup`: + +```shell +sudo /opt/EDB/TPA/bin/tpaexec setup +export PATH=$PATH:/opt/EDB/TPA/bin +``` + +You can add the `export` command to your shell's profile. + +### Testing the TPA installation + +You can verify TPA is correctly installed by running `selftest`: + +```shell +tpaexec selftest +``` +TPA is now installed. + +## Installing PGD using TPA + +### Generating a configuration file + +Run the [`tpaexec configure`](/tpa/latest/tpaexec-configure/) command to generate a configuration folder: + +``` +tpaexec configure democluster --architecture PGD-Always-ON --platform bare --edb-postgres-advanced 15 --redwood --no-git --location-names dc1 --active-location dc1 --hostnames-unsorted +``` + +You specify the PGD-Always-ON architecture (`--architecture PGD-Always-ON`), which sets up the configuration for [PGD 5's Always On architectures](https://www.enterprisedb.com/docs/pgd/latest/architectures/). As part of the default architecture, it configures your cluster with three data nodes, cohosting three [PGD Proxy](https://www.enterprisedb.com/docs/pgd/latest/routing/proxy/) servers and a [Barman](https://www.enterprisedb.com/docs/pgd/latest/backup/#physical-backup) node for backup. + +For Linux hosts, specify that you're targeting a "bare" platform (`--platform bare`). TPA will determine the Linux version running on each host during deployment. See [the EDB Postgres Distributed compatibility table](https://www.enterprisedb.com/resources/platform-compatibility) for details about the supported operating systems. + +Specify that the data nodes will be running [EDB Postgres Advanced Server v15](https://www.enterprisedb.com/docs/epas/latest/) (`--edb-postgres-advanced 15`) with Oracle compatibility (`--redwood`). + +You set the notional location of the nodes to `dc1` using `--location-names`. You then activate the PGD proxies in that location using `--active-locations dc1` set to the same location. + +By default, TPA commits configuration changes to a Git repository. For this example, you don't need to do that, so pass the `--no-git` flag. + +Finally, you ask TPA to generate repeatable hostnames for the nodes by passing `--hostnames-unsorted`. Otherwise, it selects hostnames at random from a predefined list of suitable words. + +This command creates a subdirectory in the current working directory called `democluster`. It contains the `config.yml` configuration file TPA uses to create the cluster. You can view it using: + +```shell +less democluster/config.yml +``` + +You now need to edit the configuration file to add details related to your Linux hosts, such as admin user names and public and private IP addresses. + +## Editing your configuration + +Using your preferred editor, open `democluster/config.yml`. + +Search for the line containing `ansible_user: root`. Change `root` to the name of the user you configured with SSH access and sudo privileges. Follow that with this line: + +```yaml + manage_ssh_hostkeys: yes +``` + +Your `instance_defaults` section now looks like this: + +```yaml +instance_defaults: + platform: bare + vars: + ansible_user: rocky + manage_ssh_hostkeys: yes +``` +Next, search for `node: 1`, which is the configuration settings of the first node, kaboom. + +After the `node: 1` line, add the public and private IP addresses of your node. Use `linuxhost-1` as the host for this node. Add the following to the file, substituting your IP addresses. Align the start of each line with the start of the `node:` line. + +```yaml + public_ip: 172.19.16.27 + private_ip: 192.168.2.247 +``` + +The whole entry for kaboom looks like this but with your own IP addresses: + +```yaml +- Name: kaboom + backup: kapok + location: dc1 + node: 1 + public_ip: 172.19.16.27 + private_ip: 192.168.2.247 + role: + - bdr + - pgd-proxy + vars: + bdr_child_group: dc1_subgroup + bdr_node_options: + route_priority: 100 +``` +Repeat this process for the three other nodes. + +Search for `node: 2`, which is the configuration settings for the node kaftan. Use `linuxhost-2` for this node. Substituting your IP addresses, add: + +```yaml + public_ip: 172.19.16.26 + private_ip: 192.168.2.41 +``` + +Search for `node: 3`, which is the configuration settings for the node kaolin. Use `linuxhost-3` for this node. Substituting your IP addresses, add: + +```yaml + public_ip: 172.19.16.25 + private_ip: 192.168.2.254 +``` + +Finally, search for `node: 4`, which is the configuration settings for the node kapok. Use `linuxhost-4` for this node. Substituting your IP addresses, add: + +```yaml + public_ip: 172.19.16.15 + private_ip: 192.168.2.30 +``` + +## Provisioning the cluster + +You can now run: + +``` +tpaexec provision democluster +``` + +This command prepares for deploying the cluster. (On other platforms, such as Docker and AWS, this command also creates the required hosts. When using Linux hosts, your hosts should already be configured.) + +!!! SeeAlso "Further reading" + - [`tpaexec provision`](/tpa/latest/tpaexec-provision/) in the Trusted Postgres Architect documentation + + +One part of this process for Linux hosts is creating key-pairs for the hosts for SSH operations later. With those key-pairs created, you will need to copy the public part of the key-pair to the hosts. You can do this with `ssh-copy-id`, giving the democluster identity (`-i`) and the login to each host. For this example, these are the commands: + +```shell +ssh-copy-id -i democluster/id_democluster rocky@172.19.16.27 +ssh-copy-id -i democluster/id_democluster rocky@172.19.16.26 +ssh-copy-id -i democluster/id_democluster rocky@172.19.16.25 +ssh-copy-id -i democluster/id_democluster rocky@172.19.16.15 +``` + + +You can now create the `tpa_known_hosts` file, which allows the hosts to be verified. Use `ssh-keyscan` on each host (`-H`) and append its output to `tpa_known_hosts`: + +```shell +ssh-keyscan -H 172.19.16.27 >> democluster/tpa_known_hosts +ssh-keyscan -H 172.19.16.26 >> democluster/tpa_known_hosts +ssh-keyscan -H 172.19.16.25 >> democluster/tpa_known_hosts +ssh-keyscan -H 172.19.16.15 >> democluster/tpa_known_hosts +``` + +## Deploy your cluster + +You now have everything ready to deploy your cluster. To deploy, run: + +```shell +tpaexec deploy democluster +``` + +TPA applies the configuration, installing the needed packages and setting up the actual EDB Postgres Distributed cluster. + +!!! SeeAlso "Further reading" + - [`tpaexec deploy`](/tpa/latest/tpaexec-deploy/) in the Trusted Postgres Architect documentation + +## Connecting to the cluster + +You're now ready to log into one of the nodes of the cluster with SSH and then connect to the database. Part of the configuration process set up SSH logins for all the nodes, complete with keys. To use the SSH configuration, you need to be in the `democluster` directory created by the `tpaexec configure` command earlier: + +```shell +cd democluster +``` + +From there, you can run `ssh -F ssh_config ` to establish an SSH connection. Connect to kaboom, the first database node in the cluster: + +```shell +ssh -F ssh_config kaboom +__OUTPUT__ +[rocky@kaboom ~]# +``` + +Notice that you're logged in as rocky, the admin user and ansible user you configured earlier, on kaboom. + +You now need to adopt the identity of the enterprisedb user. This user is preconfigured and authorized to connect to the cluster's nodes. + +```shell +sudo -iu enterprisedb +__OUTPUT__ +[root@kaboom ~]# sudo -iu enterprisedb +enterprisedb@kaboom:~ $ +``` + +You can now run the `psql` command to access the `bdrdb` database: + +```shell +psql bdrdb +__OUTPUT__ +enterprisedb@kaboom:~ $ psql bdrdb +psql (15.2.0, server 15.2.0) +Type "help" for help. + +bdrdb=# +``` + +You're directly connected to the Postgres database running on the kaboom node and can start issuing SQL commands. + +To leave the SQL client, enter `exit`. + +### Using PGD CLI + +The `pgd` utility, also known as the PGD CLI, lets you control and manage your Postgres Distributed cluster. It's already installed on the node. + +You can use it to check the cluster's health by running `pgd check-health`: + +```console +enterprisedb@kaboom:~ $ pgd check-health +__OUTPUT__ +Check Status Message +----- ------ ------- +ClockSkew Ok All BDR node pairs have clockskew within permissible limit +Connection Ok All BDR nodes are accessible +Raft Ok Raft Consensus is working correctly +Replslots Ok All BDR replication slots are working correctly +Version Ok All nodes are running same BDR versions +enterprisedb@kaboom:~ $ +``` + +Or, you can use `pgd show-nodes` to ask PGD to show you the data-bearing nodes in the cluster: + +```console +enterprisedb@kaboom:~ $ pgd show-nodes +__OUTPUT__ +Node Node ID Group Type Current State Target State Status Seq ID +---- ------- ----- ---- ------------- ------------ ------ ------ +kaboom 2710197610 dc1_subgroup data ACTIVE ACTIVE Up 1 +kaftan 3490219809 dc1_subgroup data ACTIVE ACTIVE Up 3 +kaolin 2111777360 dc1_subgroup data ACTIVE ACTIVE Up 2 +enterprisedb@kaboom:~ $ +``` + +Similarly, use `pgd show-proxies` to display the proxy connection nodes: + +```console +enterprisedb@kaboom:~ $ pgd show-proxies +__OUTPUT__ +Proxy Group Listen Addresses Listen Port +----- ----- ---------------- ----------- +kaboom dc1_subgroup [0.0.0.0] 6432 +kaftan dc1_subgroup [0.0.0.0] 6432 +kaolin dc1_subgroup [0.0.0.0] 6432 +``` + +The proxies provide high-availability connections to the cluster of data nodes for applications. You can connect to the proxies and, in turn, to the database with the command `psql -h kaboom,kaftan,kaolin -p 6432 bdrdb`: + +```console +enterprisedb@kaboom:~ $ psql -h kaboom,kaftan,kaolin -p 6432 bdrdb +__OUTPUT__ +psql (15.2.0, server 15.2.0) +SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, compression: off) +Type "help" for help. + +bdrdb=# +``` + + +## Explore your cluster + +* [Connect to your database](connecting_applications) to applications +* [Explore failover](further_explore_failover) with hands-on exercises +* [Understand conflicts](further_explore_conflicts) by creating and monitoring them +* [Next steps](next_steps) in working with your cluster diff --git a/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx index a06cc2d723f..bf486ed0b1a 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx @@ -24,7 +24,7 @@ specific to Kubernetes and PostgreSQL. | [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) | `kubectl` is the command-line tool used to manage a Kubernetes cluster. | EDB Postgres for Kubernetes requires a Kubernetes version supported by the community. Please refer to the -["Supported releases"](supported_releases.md) page for details. +["Supported releases"](/resources/platform-compatibility#pgk8s) page for details. ## PostgreSQL terminology diff --git a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx index c037c0bbe6b..6acdcd3f617 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx @@ -137,7 +137,7 @@ plane for self-managed Kubernetes installations). ## Upgrades !!! Important - Please carefully read the [release notes](release_notes.md) + Please carefully read the [release notes](rel_notes) before performing an upgrade as some versions might require extra steps. @@ -224,7 +224,7 @@ come current periodically and not skipping versions. In 2022, EDB plans an LTS release for EDB Postgres for Kubernetes in environments where frequent online updates are not possible. -The [release notes](release_notes.md) page contains a detailed list of the +The [release notes](rel_notes) page contains a detailed list of the changes introduced in every released version of EDB Postgres for Kubernetes, and it must be read before upgrading to a newer version of the software. diff --git a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx index 4de22a85b04..6570029814c 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx @@ -7,7 +7,7 @@ originalFilePath: 'src/monitoring.md' Installing Prometheus and Grafana is beyond the scope of this project. We assume they are correctly installed in your system. However, for experimentation we provide instructions in - [Part 4 of the Quickstart](quickstart.md#part-4-monitor-clusters-with-prometheus-and-grafana). + [Part 4 of the Quickstart](quickstart.md#part-4---monitor-clusters-with-prometheus-and-grafana). ## Monitoring Instances @@ -56,7 +56,7 @@ by specifying a list of one or more databases in the `target_databases` option. !!! Seealso "Prometheus/Grafana" If you are interested in evaluating the integration of EDB Postgres for Kubernetes with Prometheus and Grafana, you can find a quick setup guide - in [Part 4 of the quickstart](quickstart.md#part-4-monitor-clusters-with-prometheus-and-grafana) + in [Part 4 of the quickstart](quickstart.md#part-4---monitor-clusters-with-prometheus-and-grafana) ### Prometheus Operator example @@ -679,7 +679,7 @@ kubectl delete -f curl.yaml In the [`samples/monitoring/`](https://github.com/EnterpriseDB/docs/tree/main/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring) directory you will find a series of sample files for observability. -Please refer to [Part 4 of the quickstart](quickstart.md#part-4-monitor-clusters-with-prometheus-and-grafana) +Please refer to [Part 4 of the quickstart](quickstart.md#part-4---monitor-clusters-with-prometheus-and-grafana) section for context: - `kube-stack-config.yaml`: a configuration file for the kube-stack helm chart diff --git a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx index ff5da677090..626c1eb7738 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx @@ -47,7 +47,7 @@ reading about the systems and decide which one to proceed with. After setting up one of them, please proceed with part 2. We also provide instructions for setting up monitoring with Prometheus and -Grafana for local testing/evaluation, in [part 4](#part-4-monitor-clusters-with-prometheus-and-grafana) +Grafana for local testing/evaluation, in [part 4](#part-4---monitor-clusters-with-prometheus-and-grafana) ### Minikube @@ -123,7 +123,7 @@ with the deployment of a PostgreSQL cluster. As with any other deployment in Kubernetes, to deploy a PostgreSQL cluster you need to apply a configuration file that defines your desired `Cluster`. -The [`cluster-example.yaml`](../samples/cluster-example.yaml) sample file +The [`cluster-example.yaml`](samples/cluster-example.yaml) sample file defines a simple `Cluster` using the default storage class to allocate disk space: diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_2_rel_notes.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_2_rel_notes.mdx index fa12fc913e9..fc7a1a2a47e 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_2_rel_notes.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_2_rel_notes.mdx @@ -9,4 +9,4 @@ This release of EDB Postgres for Kubernetes includes the following: | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | Upstream merge | Merged with community CloudNativePG 1.18.2. See the community [Release Notes](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/). | | Feature | Support for [Transparent Data Encryption (TDE)](/tde/latest) with EDB Postgtres Advanced Server 15. TDE encrypts, transparently to the user, any user data stored in the database system. | -| Feature | New external backup adaptor to provide a generic way to integrate EDB Postgres for Kubernetes in a third-party tool for backups. See [External Backup Adapter](/postgres_for_kubernetes/latest/addons/#external_backup_adapter) for more information.| \ No newline at end of file +| Feature | New external backup adaptor to provide a generic way to integrate EDB Postgres for Kubernetes in a third-party tool for backups. See [External Backup Adapter](/postgres_for_kubernetes/latest/addons/#external-backup-adapter) for more information.| \ No newline at end of file diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_19_0_rel_notes.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_19_0_rel_notes.mdx index 786968e3d47..ccd829de0bf 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_19_0_rel_notes.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_19_0_rel_notes.mdx @@ -8,5 +8,5 @@ This release of EDB Postgres for Kubernetes includes the following: | Type | Description | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | Upstream merge | Merged with community CloudNativePG 1.19.0. See the community [Release Notes](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/). | -| Feature | Support for [Transparent Data Encryption (TDE)](/tde/latest) with EDB Postgtres Advanced Server 15. TDE encrypts, transparently to the user, any user data stored in the database system. | -| Feature | New external backup adaptor to provide a generic way to integrate EDB Postgres for Kubernetes in a third-party tool for backups. See [External Backup Adapter](/postgres_for_kubernetes/latest/addons/#external_backup_adapter) for more information.| \ No newline at end of file +| Feature | Support for [Transparent Data Encryption (TDE)](/tde/latest) with EDB Postgres Advanced Server 15. TDE encrypts, transparently to the user, any user data stored in the database system. | +| Feature | New external backup adaptor to provide a generic way to integrate EDB Postgres for Kubernetes in a third-party tool for backups. See [External Backup Adapter](/postgres_for_kubernetes/latest/addons/#external-backup-adapter) for more information.| \ No newline at end of file diff --git a/product_docs/docs/tpa/23/INSTALL.mdx b/product_docs/docs/tpa/23/INSTALL.mdx index 7e97de7f572..68d28c10033 100644 --- a/product_docs/docs/tpa/23/INSTALL.mdx +++ b/product_docs/docs/tpa/23/INSTALL.mdx @@ -12,6 +12,8 @@ TPA packages are available to prospects (for a 60 day trial), EDB customers with a valid Extreme HA subscription, or by prior arrangement. Please contact your account manager to request access. +## Distribution Support`` + We publish TPA packages for Debian 10 (buster), Ubuntu 22.04 (jammy), Ubuntu 20.04 (focal), Ubuntu 18.04 (bionic), RHEL/CentOS 7.x and 8.x, Rocky 8.x and AlmaLinux 8.x. These distributions provide a usable Python 3.6+ environment out of the box, diff --git a/src/constants/products.js b/src/constants/products.js index c8940abccf8..2a7301ce014 100644 --- a/src/constants/products.js +++ b/src/constants/products.js @@ -3,15 +3,15 @@ import IconNames from "../components/icon/iconNames"; export const products = { bart: { name: "Backup and Recovery Tool", iconName: IconNames.EDB_BART }, barman: { name: "Barman" }, - bdr: { - name: "BDR (Bi-Directional Replication)", - iconName: IconNames.HIGH_AVAILABILITY, - }, biganimal: { name: "BigAnimal", iconName: IconNames.BIGANIMAL }, edb_plus: { name: "EDB*Plus" }, efm: { name: "Failover Manager", iconName: IconNames.EDB_EFM }, epas: { name: "EDB Postgres Advanced Server", iconName: IconNames.EDB_EPAS }, - pgd: { name: "EDB Postgres Distributed", iconName: IconNames.EDB_EPAS }, + pgd: { + name: "EDB Postgres Distributed (PGD)", + iconName: IconNames.HIGH_AVAILABILITY, + }, + pge: { name: "EDB Postgres Extended Server", iconName: IconNames.POSTGRESQL }, eprs: { name: "EDB Replication Server", iconName: IconNames.EDB_EPAS }, hadoop_data_adapter: { name: "Hadoop Data Adapter", @@ -33,29 +33,33 @@ export const products = { iconName: IconNames.EDB_MIGRATION_TOOLKIT, }, mongo_data_adapter: { - name: "Mongo Data Adapter", + name: "Mongo Foreign Data Wrapper", iconName: IconNames.CONNECT, }, mysql_data_adapter: { - name: "MySQL Data Adapter", + name: "MySQL Foreign Data Wrapper", iconName: IconNames.CONNECT, }, net_connector: { name: ".NET Connector", iconName: IconNames.CONNECT }, ocl_connector: { name: "OCL Connector", iconName: IconNames.CONNECT }, odbc_connector: { name: "ODBC Connector", iconName: IconNames.CONNECT }, pem: { name: "Postgres Enterprise Manager", iconName: IconNames.EDB_PEM }, - patroni: { name: "Patroni" }, + Patroni: { name: "Patroni" }, pgBackRest: { name: "pgBackRest" }, pgbouncer: { name: "PgBouncer", iconName: IconNames.POSTGRESQL }, - pgd: { name: "EDB Postgres Distributed (PGD)", iconName: IconNames.HIGH_AVAILABILITY }, - pge: { name: "EDB Postgres Extended Server", iconName: IconNames.POSTGRESQL }, + pg_failover_slots: { + name: "PG Failover Slots", + iconName: IconNames.POSTGRESQL, + }, + "pglogical 2": { name: "Pglogical 2" }, pgpool: { name: "PgPool-II", iconName: IconNames.POSTGRESQL }, - pglogical: { name: "pglogical" }, postgis: { name: "PostGIS", iconName: IconNames.GLOBE }, + postgresql: { name: "PostgreSQL", iconName: IconNames.POSTGRESQL_MONO }, postgres_for_kubernetes: { name: "EDB Postgres for Kubernetes", iconName: IconNames.KUBERNETES, }, + CloudNativePG: { name: "CloudNativePG" }, repmgr: { name: "repmgr", iconName: IconNames.HIGH_AVAILABILITY }, slony: { name: "Slony Replication", iconName: IconNames.NETWORK2 }, tde: { name: "Transparent Data Encryption", iconName: IconNames.SECURITY },