diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000000..907737c5090 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,5 @@ +# First pass basic codeowners file + +product_docs/docs/pgd/ @djw-m +product_docs/docs/epas/ @nidhibhammar +product_docs/docs/biganimal/ @drothery-edb diff --git a/advocacy_docs/supported-open-source/pglogical2/configuration-options.mdx b/advocacy_docs/supported-open-source/pglogical2/configuration-options.mdx index 50f01f1150d..5e388125e55 100644 --- a/advocacy_docs/supported-open-source/pglogical2/configuration-options.mdx +++ b/advocacy_docs/supported-open-source/pglogical2/configuration-options.mdx @@ -1,11 +1,9 @@ --- title: Configuration options product: pglogical 2 -generatedBy: >- - /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from - originalFilePath +generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath originalFilePath: >- - https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L687-#L767 + https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L691-#L771 --- diff --git a/advocacy_docs/supported-open-source/pglogical2/conflicts.mdx b/advocacy_docs/supported-open-source/pglogical2/conflicts.mdx index ef7ae3b31da..f012be24440 100644 --- a/advocacy_docs/supported-open-source/pglogical2/conflicts.mdx +++ b/advocacy_docs/supported-open-source/pglogical2/conflicts.mdx @@ -1,11 +1,9 @@ --- title: Conflicts product: pglogical 2 -generatedBy: >- - /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from - originalFilePath +generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath originalFilePath: >- - https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L673-#L685 + https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L677-#L689 --- diff --git a/advocacy_docs/supported-open-source/pglogical2/index.mdx b/advocacy_docs/supported-open-source/pglogical2/index.mdx index c2b060d0c62..041c03c0cac 100644 --- a/advocacy_docs/supported-open-source/pglogical2/index.mdx +++ b/advocacy_docs/supported-open-source/pglogical2/index.mdx @@ -1,9 +1,7 @@ --- title: pglogical 2 product: pglogical 2 -generatedBy: >- - /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from - originalFilePath +generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath navigation: - index - release-notes diff --git a/advocacy_docs/supported-open-source/pglogical2/installation.mdx b/advocacy_docs/supported-open-source/pglogical2/installation.mdx index 8285d463737..36ab4a7dfc3 100644 --- a/advocacy_docs/supported-open-source/pglogical2/installation.mdx +++ b/advocacy_docs/supported-open-source/pglogical2/installation.mdx @@ -1,11 +1,9 @@ --- title: Installation product: pglogical 2 -generatedBy: >- - /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from - originalFilePath +generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath originalFilePath: >- - https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L55-#L160 + https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L55-#L164 --- @@ -36,6 +34,7 @@ If you don’t have PostgreSQL already: - PostgreSQL 12: `yum install postgresql12-server postgresql12-contrib` - PostgreSQL 13: `yum install postgresql13-server postgresql13-contrib` - PostgreSQL 14: `yum install postgresql14-server postgresql14-contrib` + - PostgreSQL 15: `yum install postgresql15-server postgresql15-contrib` Then install the “2ndQuadrant’s General Public” repository for your PostgreSQL version, by running the following instructions as root on the destination Linux server: @@ -48,6 +47,7 @@ version, by running the following instructions as root on the destination Linux - PostgreSQL 12: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/12/rpm | bash` - PostgreSQL 13: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/13/rpm | bash` - PostgreSQL 14: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/14/rpm | bash` +- PostgreSQL 15: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/15/rpm | bash` #### Installation @@ -61,6 +61,7 @@ Once the repository is installed, you can proceed to pglogical for your PostgreS - PostgreSQL 12: `yum install postgresql12-pglogical` - PostgreSQL 13: `yum install postgresql13-pglogical` - PostgreSQL 14: `yum install postgresql14-pglogical` +- PostgreSQL 15: `yum install postgresql15-pglogical` You may be prompted to accept the repository GPG key for package signing: @@ -95,6 +96,7 @@ Once pre-requisites are complete, installing pglogical is simply a matter of exe - PostgreSQL 12: `sudo apt-get install postgresql-12-pglogical` - PostgreSQL 13: `sudo apt-get install postgresql-13-pglogical` - PostgreSQL 14: `sudo apt-get install postgresql-14-pglogical` +- PostgreSQL 15: `sudo apt-get install postgresql-15-pglogical` ## From source code diff --git a/advocacy_docs/supported-open-source/pglogical2/limitations-and-restrictions.mdx b/advocacy_docs/supported-open-source/pglogical2/limitations-and-restrictions.mdx index 425f6c98b6a..9c2cb3e8a4a 100644 --- a/advocacy_docs/supported-open-source/pglogical2/limitations-and-restrictions.mdx +++ b/advocacy_docs/supported-open-source/pglogical2/limitations-and-restrictions.mdx @@ -1,11 +1,9 @@ --- title: Limitations and restrictions product: pglogical 2 -generatedBy: >- - /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from - originalFilePath +generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath originalFilePath: >- - https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L769-#L938 + https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L773-#L942 --- diff --git a/advocacy_docs/supported-open-source/pglogical2/release-notes.mdx b/advocacy_docs/supported-open-source/pglogical2/release-notes.mdx index c9693084d36..e092f6379f7 100644 --- a/advocacy_docs/supported-open-source/pglogical2/release-notes.mdx +++ b/advocacy_docs/supported-open-source/pglogical2/release-notes.mdx @@ -1,14 +1,34 @@ --- title: Release Notes product: pglogical 2 -generatedBy: >- - /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from - originalFilePath +generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath originalFilePath: >- - https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L953-#L993 + https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L957-#L1019 --- +## pglogical 2.4.3 + +Version 2.4.3 is a maintenance release of pglogical 2. + +### Changes + +- Apply data filtering on the correct tuple during initial synchronization. + +- Restore the correct memory context while decoding a change. + +- Drop database never completes in PostgreSQL 15. + +- Don't replicate TRUNCATE as global message. + +## pglogical 2.4.2 + +Version 2.4.2 is a maintenance release of pglogical 2. + +### Changes + +- Add support for PostgreSQL 15. + ## pglogical 2.4.1 Version 2.4.1 is a maintenance release of pglogical 2. diff --git a/advocacy_docs/supported-open-source/pglogical2/requirements.mdx b/advocacy_docs/supported-open-source/pglogical2/requirements.mdx index 7803bce0b52..96748d21726 100644 --- a/advocacy_docs/supported-open-source/pglogical2/requirements.mdx +++ b/advocacy_docs/supported-open-source/pglogical2/requirements.mdx @@ -1,9 +1,7 @@ --- title: Requirements product: pglogical 2 -generatedBy: >- - /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from - originalFilePath +generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath originalFilePath: >- https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L36-#L53 @@ -24,4 +22,4 @@ be the same or weaker (more permissive) on the subscriber than the provider. Tables must have the same `PRIMARY KEY`s. It is not recommended to add additional `UNIQUE` constraints other than the `PRIMARY KEY` (see below). -Some additional requirements are covered in [Limitations and Restrictions](limitations-and-restrictions.mdx). \ No newline at end of file +Some additional requirements are covered in [Limitations and Restrictions](limitations-and-restrictions). diff --git a/advocacy_docs/supported-open-source/pglogical2/synchronous-replication.mdx b/advocacy_docs/supported-open-source/pglogical2/synchronous-replication.mdx index e92e268f23b..61f096a3109 100644 --- a/advocacy_docs/supported-open-source/pglogical2/synchronous-replication.mdx +++ b/advocacy_docs/supported-open-source/pglogical2/synchronous-replication.mdx @@ -1,11 +1,9 @@ --- title: Synchronous Replication product: pglogical 2 -generatedBy: >- - /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from - originalFilePath +generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath originalFilePath: >- - https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L663-#L671 + https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L667-#L675 --- diff --git a/advocacy_docs/supported-open-source/pglogical2/usage.mdx b/advocacy_docs/supported-open-source/pglogical2/usage.mdx index 473181e9c5b..dce68454c54 100644 --- a/advocacy_docs/supported-open-source/pglogical2/usage.mdx +++ b/advocacy_docs/supported-open-source/pglogical2/usage.mdx @@ -1,11 +1,9 @@ --- title: Usage product: pglogical 2 -generatedBy: >- - /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from - originalFilePath +generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath originalFilePath: >- - https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L162-#L661 + https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L166-#L665 --- diff --git a/product_docs/docs/epas/11/epas_rel_notes/epas11_21_32_rel_notes.mdx b/product_docs/docs/epas/11/epas_rel_notes/epas11_21_32_rel_notes.mdx index 532a4470ba3..da4d23aa2b9 100644 --- a/product_docs/docs/epas/11/epas_rel_notes/epas11_21_32_rel_notes.mdx +++ b/product_docs/docs/epas/11/epas_rel_notes/epas11_21_32_rel_notes.mdx @@ -8,7 +8,7 @@ Released: 21 Aug 2023 Updated: 30 Aug 2023 !!! Important Upgrading -Once you have upgraded to this version of EDB Postgres Advanced Server, you will need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application will check that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool. +After you upgrade to this version of EDB Postgres Advanced Server, you need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application checks that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool. !!! !!! Note After applying patches diff --git a/product_docs/docs/epas/12/epas_rel_notes/epas12_16_20_rel_notes.mdx b/product_docs/docs/epas/12/epas_rel_notes/epas12_16_20_rel_notes.mdx index 14c6f471edc..cce0cfa56ba 100644 --- a/product_docs/docs/epas/12/epas_rel_notes/epas12_16_20_rel_notes.mdx +++ b/product_docs/docs/epas/12/epas_rel_notes/epas12_16_20_rel_notes.mdx @@ -8,7 +8,7 @@ Released: 21 Aug 2023 Updated: 30 Aug 2023 !!! Important Upgrading -Once you have upgraded to this version of EDB Postgres Advanced Server, you will need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application will check that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool. +After you upgrade to this version of EDB Postgres Advanced Server, you need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application checks that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool. !!! !!! Note After applying patches diff --git a/product_docs/docs/epas/13/epas_rel_notes/epas13_12_17_rel_notes.mdx b/product_docs/docs/epas/13/epas_rel_notes/epas13_12_17_rel_notes.mdx index a08339f7d5c..092e3f925ae 100644 --- a/product_docs/docs/epas/13/epas_rel_notes/epas13_12_17_rel_notes.mdx +++ b/product_docs/docs/epas/13/epas_rel_notes/epas13_12_17_rel_notes.mdx @@ -8,7 +8,7 @@ Released: 21 Aug 2023 Updated: 30 Aug 2023 !!! Important Upgrading -Once you have upgraded to this version of EDB Postgres Advanced Server, you will need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application will check that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool. +After you upgrade to this version of EDB Postgres Advanced Server, you need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application checks that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool. !!! !!! Note After applying patches diff --git a/product_docs/docs/epas/14/epas_rel_notes/epas14_9_0_rel_notes.mdx b/product_docs/docs/epas/14/epas_rel_notes/epas14_9_0_rel_notes.mdx index 960aa9eaad4..5dd0d882c2c 100644 --- a/product_docs/docs/epas/14/epas_rel_notes/epas14_9_0_rel_notes.mdx +++ b/product_docs/docs/epas/14/epas_rel_notes/epas14_9_0_rel_notes.mdx @@ -8,7 +8,7 @@ Released: 21 Aug 2023 Updated: 30 Aug 2023 !!! Important Upgrading -Once you have upgraded to this version of EDB Postgres Advanced Server, you will need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application will check that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool. +After you upgrade to this version of EDB Postgres Advanced Server, you need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application checks that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool. !!! !!! Note After applying patches diff --git a/product_docs/docs/epas/15/epas_rel_notes/epas15_4_0_rel_notes.mdx b/product_docs/docs/epas/15/epas_rel_notes/epas15_4_0_rel_notes.mdx index 1e35e3e32dd..ab3c24e9da7 100644 --- a/product_docs/docs/epas/15/epas_rel_notes/epas15_4_0_rel_notes.mdx +++ b/product_docs/docs/epas/15/epas_rel_notes/epas15_4_0_rel_notes.mdx @@ -9,7 +9,7 @@ Released: 21 Aug 2023 Updated: 30 Aug 2023 !!! Important Upgrading -Once you have upgraded to this version of EDB Postgres Advanced Server, you will need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application will check that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool. +After you upgrade to this version of EDB Postgres Advanced Server, you need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application checks that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool. !!! !!! Note After applying patches diff --git a/product_docs/docs/epas/15/reference/sql_reference/02_data_types/02_character_types.mdx b/product_docs/docs/epas/15/reference/sql_reference/02_data_types/02_character_types.mdx index 984bb6b15cb..1a0746f45ac 100644 --- a/product_docs/docs/epas/15/reference/sql_reference/02_data_types/02_character_types.mdx +++ b/product_docs/docs/epas/15/reference/sql_reference/02_data_types/02_character_types.mdx @@ -39,8 +39,9 @@ VARCHAR, VARCHAR2, NVARCHAR and NVARCHAR2 If the string to assign is shorter than `n`, values of type `VARCHAR`, `VARCHAR2`, `NVARCHAR`, and `NVARCHAR2` store the shorter string without padding. - !!! Note - The trailing spaces are semantically significant in `VARCHAR` values. +!!! Note +The trailing spaces are semantically significant in `VARCHAR` values. +!!! If you explicitly cast a value to a `VARCHAR` type, an over-length value is truncated to `n` characters without raising an error (as specified by the SQL standard). @@ -55,4 +56,4 @@ VARCHAR, VARCHAR2, NVARCHAR and NVARCHAR2 Thus, use of the `CLOB` type is limited by what can be done for `TEXT`, such as a maximum size of approximately 1 GB. - For larger amounts of data, instead of using the `CLOB` data type, use the PostgreSQL *large objects* feature that relies on the `pg_largeobject` system catalog. For information on large objects, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/largeobjects.html). \ No newline at end of file + For larger amounts of data, instead of using the `CLOB` data type, use the PostgreSQL *large objects* feature that relies on the `pg_largeobject` system catalog. For information on large objects, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/largeobjects.html). diff --git a/product_docs/docs/eprs/7/01_introduction/03_certified_supported_versions.mdx b/product_docs/docs/eprs/7/01_introduction/03_certified_supported_versions.mdx index a7f6073b5a3..6c6cefbddc8 100644 --- a/product_docs/docs/eprs/7/01_introduction/03_certified_supported_versions.mdx +++ b/product_docs/docs/eprs/7/01_introduction/03_certified_supported_versions.mdx @@ -5,13 +5,17 @@ title: "Certified and supported product versions" You can use the following database product versions with Replication Server: - PostgreSQL versions 11, 12, 13, 14, and 15 -- Advanced Server versions 11, 12, 13, 14, and 15 +- EDB Postgres Advanced Server versions 11, 12, 13, 14, and 15 - Oracle 11g Release 2 version 11.2.0.2.0 is explicitly certified. Newer minor versions in the 11.2 line are supported as well. - Oracle 12c version 12.1.0.2.0 is explicitly certified. Newer minor versions in the 12.1 line are supported as well. - Oracle 18c version 18.1.0.2.0 is explicitly certified. Newer minor versions in the 18.1 line are supported as well. - Oracle 19c version 19.1.0.2.0 is explicitly certified. Newer minor versions in the 19.1 line are supported as well. - SQL Server 2014 version 12.0.5000.0 is explicitly certified. Newer minor versions in the 12.0 line are supported as well. +!!!Note + All PostgreSQL and EDB Postgres Advanced Server versions available as BigAnimal single-node and primary/standby high availability cluster types are also supported for SMR configurations. Consult the BigAnimal (EDB’s managed database cloud service) [documentation](/biganimal/latest) for more information about BigAnimal’s [supported cluster types](/biganimal/latest/overview/02_high_availability/) and [database version policy](/biganimal/latest/overview/05_database_version_policy/) for the versions of PostgreSQL and EDB Postgres Advanced Server available in BigAnimal. + + As of Replication Server 7.1.0: - SQL Server 2016 version 13.00.5026 is explicitly certified. Newer minor versions in the 13.0 line are supported as well. - SQL Server 2017 version 14.0.1000.169 is explicitly certified. Newer minor versions in the 14.0 line are supported as well. diff --git a/product_docs/docs/eprs/7/08_xdb_cli/03_xdb_cli_commands/52_reload_conf_file.mdx b/product_docs/docs/eprs/7/08_xdb_cli/03_xdb_cli_commands/52_reload_conf_file.mdx index d72d21eb650..a95c78c6c34 100644 --- a/product_docs/docs/eprs/7/08_xdb_cli/03_xdb_cli_commands/52_reload_conf_file.mdx +++ b/product_docs/docs/eprs/7/08_xdb_cli/03_xdb_cli_commands/52_reload_conf_file.mdx @@ -85,11 +85,18 @@ The table shows whether a configuration property can be reloaded. This example reloads the configuration file. -```shell -$ java -jar edb-repcli.jar -reloadconf -repsvrfile ~/subsvr.prop +!!! Note Note +When you execute the reloadconf command, if any configuration options have been changed from their default values, the output includes the configuration option and its new value. +```shell +java -jar edb-repcli.jar -reloadconf -repsvrfile subsvr.prop +__OUTPUT__ Reloading Subscription Server configuration file... Reloaded configuration options from ../etc/xdb_subserver.conf... +The conf option 'snapshotParallelTableLoaderLimit' set to '1' +The conf option 'skipCheckConst' set to 'false' +The conf option 'snapshotParallelLoadCount' set to '1' Configuration was reloaded successfully. ``` +!!! diff --git a/product_docs/docs/eprs/7/10_appendix/03_miscellaneous_xdb_processing_topics/01_publications_and_subscriptions_server_conf_options/01_controlling_logging_level.mdx b/product_docs/docs/eprs/7/10_appendix/03_miscellaneous_xdb_processing_topics/01_publications_and_subscriptions_server_conf_options/01_controlling_logging_level.mdx index a7662b54ed4..90931a7714a 100644 --- a/product_docs/docs/eprs/7/10_appendix/03_miscellaneous_xdb_processing_topics/01_publications_and_subscriptions_server_conf_options/01_controlling_logging_level.mdx +++ b/product_docs/docs/eprs/7/10_appendix/03_miscellaneous_xdb_processing_topics/01_publications_and_subscriptions_server_conf_options/01_controlling_logging_level.mdx @@ -1,5 +1,6 @@ --- -title: "Controlling logging level, log file sizes, and rotation count" +title: "Controlling logging level, log file sizes, rotation count, and locale" +navTitle: "Controlling message logging" redirects: - /eprs/latest/10_appendix/04_miscellaneous_xdb_processing_topics/01_publications_and_subscriptions_server_conf_options/01_controlling_logging_level --- @@ -13,7 +14,7 @@ The following options control various aspects of message logging in the publicat See [Publication and subscription server startup failures](../../02_resolving_problems/02_where_to_look_for_errors/#pub_and_sub_startup_failures) and [Snapshot replication failures](../../02_resolving_problems/02_where_to_look_for_errors/#snapshot_replication_failures) for more information. -`logging.level` +## `logging.level` Set the `logging.level` option to control the severity of messages written to the publication server log file and the subscription server log file. @@ -21,7 +22,7 @@ Set the `logging.level` option to control the severity of messages written to th The default value is `WARNING`. -`logging.file.size` +## `logging.file.size` Set the `logging.file.size` option to control the maximum file size (in megabytes) of the publication server log file and the subscription server log file. @@ -32,7 +33,7 @@ Set the `logging.file.size` option to control the maximum file size (in megabyte The default value is `50`, in megabytes. -`logging.file.count` +## `logging.file.count` Set the `logging.file.count` option to control the number of files in the log file rotation history of the publication server log file and the subscription server log file. @@ -57,7 +58,21 @@ When log file rotation is enabled and the current, active log file (`pubserver.l - Each remaining log file is renamed with the next greater integer suffix (`pubserver.log.m` is renamed to `pubserver.log.m+1`, with m varying from `0` to `n-2`). - A new, active log file is created (`pubserver.log.0`). -`mtk.logging.file.size` +## `logging.default.locale` + +Set the `logging.default.locale` option to use either the current system locale or English (en) for publication and subscription logs. + +`logging.default.locale={system | en}` + +The default value is `system`. + +!!!Note +This option is only applicable for publication and subscription logs and isn't supported for mtk.log. + +The RepCLI and RepConsole logs continue showing text in the default locale. +!!! + +## `mtk.logging.file.size` !!! Note This option applies only to the publication server. @@ -68,7 +83,7 @@ Set the `mtk.logging.file.size` option to control the maximum file size (in mega The default value is `50`, in megabytes. -`mtk.logging.file.count` +## `mtk.logging.file.count` !!! Note This option applies only to the publication server. @@ -94,3 +109,5 @@ When the current, active log file (`mtk.log`) reaches the size specified by `mtk - Each remaining log file with a suffix is renamed with the next greater integer suffix (`mtk.log.m` is renamed to `mtk.log.m+1`, with `m` varying from `1` to `n-1`). - Log file `mtk.log` is renamed to `mtk.log.1`. - A new, active log file is created (`mtk.log`). + + diff --git a/product_docs/docs/eprs/7/eprs_rel_notes/eprs_rel_notes_7.6.0.mdx b/product_docs/docs/eprs/7/eprs_rel_notes/eprs_rel_notes_7.6.0.mdx new file mode 100644 index 00000000000..b9db5af9199 --- /dev/null +++ b/product_docs/docs/eprs/7/eprs_rel_notes/eprs_rel_notes_7.6.0.mdx @@ -0,0 +1,22 @@ +--- +title: Replication Server 7.6.0 release notes +navTitle: "Version 7.6.0" +--- + +Released: 07 Sep 2023 + +New features, enhancements, bug fixes, and other changes in Replication Server 7.6.0 include the following: + +| Type | Description | +| ------- |------------ | +| Enhancement | EDB Replication Server now supports logging Publication and Subscription server logs in the English language, overriding the default locale, using the `logging.default.locale` configuration parameter. [Support ticket #89877] | +| Enhancement | The snapshot operation now uses the table-level parallel loading capability, which reduces overhead on the source database by using range-based criterion for loading each individual table data chunk instead of a fetch-offset approach. This optimization is applicable when the table primary key/unique constraint is based on a non-composite numeric type attribute. [Support ticket # 93360] | +| Enhancement | To help investigate data synchronization gaps, Replication Server’s logging now logs when rows are skipped due to filter criteria. [Support ticket #91296] | +| Bug fix | Fixed an issue where metadata from the primary controller database wasn't replicated when a SQL Server or an Oracle publication database is added as a standby controller database. [Support ticket #82050 and #91884] | +| Bug fix | Fixed the issues related to foreign key violations in the standby controller database that prevented upgrading from version 6.2.x to 7.x. [Support ticket #93129, #92056 and #91588] | +| Bug fix | Corrected a few code paths to release unused resources for timely garbage collection and optimized memory utilization. [Support ticket #91588] | +| Bug fix | Fixed a Data Validator Oracle edge case resulting in a `String index out of range` error for an Oracle to EDB Postgres Advanced Server validation. | +| Bug fix | Fixed an issue resulting in a synchronization failure for `nchar`, `nvarchar`, `xml`, and `sqlvariant` when using the mssql-jdbc-10.2.1.jre8.jar file for a SQL Server to EDB Postgres Advanced Server cluster setup. | +| Bug fix | Updated database type name references of “Postgres Plus Advanced Server” in the Replication Console and Replication CLI to “EDB Postgres Advanced Server”. | +| Bug fix | Fixed an issue that prevented logging of changed configuration parameters at Publication and Subscription server start or when the `reloadconf` command is executed. | +| Bug fix | Fixed a regression that led to an `Invalid custom column type mapping` error being observed for Publication tables with no column mapping. | diff --git a/product_docs/docs/eprs/7/eprs_rel_notes/index.mdx b/product_docs/docs/eprs/7/eprs_rel_notes/index.mdx index 8ba40e8def8..f24b2ccc714 100644 --- a/product_docs/docs/eprs/7/eprs_rel_notes/index.mdx +++ b/product_docs/docs/eprs/7/eprs_rel_notes/index.mdx @@ -1,8 +1,9 @@ --- -title: "Release Notes" +title: "Release notes" redirects: - ../01_whats_new/ navigation: + - eprs_rel_notes_7.6.0 - eprs_rel_notes_7.5.1 - eprs_rel_notes_7.5.0 - eprs_rel_notes_7.4.0 @@ -13,6 +14,7 @@ The Replication Server documentation describes the latest version including mino | Version | Release Date | | -------------------------------- | ------------ | +| [7.6.0](eprs_rel_notes_7.6.0) | 07 Sep 2023 | | [7.5.1](eprs_rel_notes_7.5.1) | 26 May 2023 | | [7.5.0](eprs_rel_notes_7.5.0) | 14 Feb 2023 | | [7.4.0](eprs_rel_notes_7.4.0) | 29 Nov 2022 | diff --git a/product_docs/docs/eprs/7/supported_platforms.mdx b/product_docs/docs/eprs/7/supported_platforms.mdx index 58e1e24015a..c142c40aef6 100644 --- a/product_docs/docs/eprs/7/supported_platforms.mdx +++ b/product_docs/docs/eprs/7/supported_platforms.mdx @@ -1,5 +1,5 @@ --- -title: "Supported platforms" +title: "Supported Java platforms" redirects: - /eprs/latest/01_introduction/04_supported_jdk_versions/ - /eprs/latest/01_introduction/05_supported_jdk_versions/ @@ -22,4 +22,4 @@ Replication Server is certified to work with the following Java platforms: | Debian 10 and 11 | Red Hat OpenJDK 11 | | Ubuntu 18, 20, 22 | OpenJDK 11 | -See [Product Compatibility](https://www.enterprisedb.com/platform-compatibility#eprs) for more information. +See [Product Compatibility](https://www.enterprisedb.com/platform-compatibility#eprs) for more information on operating system support. diff --git a/product_docs/docs/pgd/5/appusage.mdx b/product_docs/docs/pgd/5/appusage.mdx index 3cf4f83520d..7c54778a8cb 100644 --- a/product_docs/docs/pgd/5/appusage.mdx +++ b/product_docs/docs/pgd/5/appusage.mdx @@ -314,8 +314,9 @@ Being asynchronous by default, peer nodes might lag behind, making it possible for a client connected to multiple PGD nodes or switching between them to read stale data. -A [queue wait function](/pgd/latest/reference/functions/#bdrwait_for_apply_queue) is -provided for clients or proxies to prevent such stale reads. +A [queue wait +function](/pgd/latest/reference/functions/#bdrwait_for_apply_queue) is provided +for clients or proxies to prevent such stale reads. The synchronous replication features of Postgres are available to PGD as well. In addition, PGD provides multiple variants for more synchronous @@ -323,287 +324,6 @@ replication. See [Durability and performance options](durability) for an overview and comparison of all variants available and its different modes. -## Application testing - -You can test PGD applications using the following programs, -in addition to other techniques: - -- [Trusted Postgres Architect](#trusted-postgres-architect) -- [pgbench with CAMO/Failover options](#pgbench-with-camofailover-options) -- [isolationtester with multi-node access](#isolationtester-with-multi-node-access) - -### Trusted Postgres Architect - -[Trusted Postgres Architect](/tpa/latest) is the system used by EDB to -deploy reference architectures, including those based on EDB Postgres Distributed. - -Trusted Postgres Architect includes test suites for each reference architecture. -It also simplifies creating and managing a local collection of tests to run -against a TPA cluster, using a syntax like the following: - -``` -tpaexec test mycluster mytest -``` - -We strongly recommend that developers write their own multi-node suite -of Trusted Postgres Architect tests that verify the main expected properties -of the application. - -### pgbench with CAMO/failover options - -In EDB Postgres Extended, the pgbench was extended to allow users to -run failover tests while using CAMO or regular PGD deployments. The following options were added: - -``` --m, --mode=regular|camo|failover -mode in which pgbench should run (default: regular) - ---retry -retry transactions on failover -``` - -In addition to these options, you must specify the connection information about the -peer node for failover in [DSN -form](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). - -- Use `-m camo` or `-m failover` to specify the mode for pgbench. - You can use The `-m failover` specification to test failover in - regular PGD deployments. - -- Use `--retry` to specify whether to retry transactions when - failover happens with `-m failover` mode. This option is enabled by default - for `-m camo` mode. - -Here's an example in a CAMO environment: - -```sh - pgbench -m camo -p $node1_port -h $node1_host bdrdemo \ - "host=$node2_host user=postgres port=$node2_port dbname=bdrdemo" -``` - -This command runs in CAMO mode. It connects to node1 and runs the tests. If the -connection to node1 is lost, then pgbench connects to -node2. It queries node2 to get the status of in-flight transactions. -Aborted and in-flight transactions are retried in camo mode. - -In failover mode, if you specify `--retry`, then in-flight transactions are retried. In -this scenario there's no way to find the status of in-flight transactions. - -### isolationtester with multi-node access - -`isolationtester` was extended to allow users to run tests on multiple -sessions and on multiple nodes. This tool is used for internal PGD testing, -although it's also available for use with user application testing. - -``` -$ isolationtester \ - --outputdir=./iso_output \ - --create-role=logical \ - --dbname=postgres \ - --server 'd1=dbname=node1' \ - --server 'd2=dbname=node2' \ - --server 'd3=dbname=node3' -``` - -Isolation tests are a set of tests for examining concurrent behaviors in -PostgreSQL. These tests require running multiple interacting transactions, -which requires managing multiple concurrent connections and therefore -can't be tested using the normal `pg_regress` program. The name "isolation" -comes from the fact that the original motivation was to test the -serializable isolation level. Tests for other sorts of concurrent -behaviors were added as well. - -It's built using PGXS as an external module. -On installation, it creates the `isolationtester` binary file, which is run by -`pg_isolation_regress` to perform concurrent regression tests and observe -results. - -`pg_isolation_regress` is a tool similar to `pg_regress`, but instead of using -psql to execute a test, it uses isolationtester. It accepts all the same -command-line arguments as `pg_regress`. It was modified to accept multiple -hosts as parameters. It then passes the host conninfo along with server names -to the `isolationtester` binary. Isolation tester compares these server names with the -names specified in each session in the spec files and runs given tests on -respective servers. - -To define tests with overlapping transactions, use test specification -files with a custom syntax. To add -a new test, place a spec file in the `specs/` subdirectory, add the expected -output in the `expected/` subdirectory, and add the test's name to the makefile. - -Isolationtester is a program that uses libpq to open multiple connections -and executes a test specified by a spec file. A libpq connection string -specifies the server and database to connect to. Defaults derived from -environment variables are used otherwise. - -Specification consists of five parts, tested in this order: - -`server ""` - - This part defines the name of the servers for the sessions to run on. - There can be zero or more server `""` specifications. - The conninfo corresponding to the names is provided by the command to - run `isolationtester`. This is described in `quickstart_isolationtest.md`. - This part is optional. - -`setup { }` - - The given SQL block is executed once, in one session only, before running - the test. Create any test tables or other required objects here. This - part is optional. Multiple setup blocks are allowed if needed. Each is - run separately, in the given order. The reason for allowing multiple - setup blocks is that each block is run as a single PQexec submission, - and some statements such as VACUUM can't be combined with others in such - a block. - -`teardown { }` - - The teardown SQL block is executed once after the test is finished. Use - this part to clean up in preparation for the next permutation, such as dropping - any test tables created by setup. This part is optional. - -`session ""` - - There are normally several "session" parts in a spec file. Each - session is executed in its own connection. A session part consists - of three parts: setup, teardown, and one or more "steps." The per-session - setup and teardown parts have the same syntax as the per-test setup and - teardown, but they are executed in each session. The - setup part typically contains a BEGIN command to begin a transaction. - - A session part also consists of `connect_to` specification, - which points to a server name specified in the beginning that - indicates the server on which this session runs. - - `connect_to ""` - - Each step has the syntax: - - `step "" { }` - - Where `` is a name identifying this step, and SQL is a SQL statement - (or statements, separated by semicolons) that's executed in the step. - Step names must be unique across the whole spec file. - -`permutation ""` - - A permutation line specifies a list of steps that are run in that order. - Any number of permutation lines can appear. If no permutation lines are - given, the test program automatically generates all possible orderings - of the steps from each session (running the steps of any one session in - order). The list of steps in a manually specified - "permutation" line doesn't actually have to be a permutation of the - available steps. It can, for instance, repeat some steps more than once - or leave others out. - -Lines beginning with # are comments. - -For each permutation of the session steps (whether these are manually -specified in the spec file or automatically generated), the isolation -tester runs: - -1. The main setup part -1. Per-session setup parts -1. The selected session steps -1. Per-session teardown -1. The main teardown script - -Each selected step is sent to the connection associated -with its session. - -To run isolation tests in a PGD environment that ran all prerequisite make -commands: - -1. Run `make isolationcheck-install` to install the isolationtester submodule. - -2. You can run isolation regression tests using either - of the following commands from the bdr-private repo: - - `make isolationcheck-installcheck` - `make isolationcheck-makecheck` - -To run `isolationcheck-installcheck`, you need to have two or more postgresql -servers running. Pass the conninfo of each server to `pg_isolation_regress` -in the PGD makefile. - Ex: `pg_isolation_regress --server 'd1=host=myhost dbname=mydb port=5434' - --server 'd2=host=myhost1 dbname=mydb port=5432'` - -Next, add a `.spec` file containing tests in the `specs/isolation` directory -of the `bdr-private/` repo. Add a `.out` file in the `expected/isolation` directory of -the `bdr-private/` repo. - -Then run: - - `make isolationcheck-installcheck` - -`Isolationcheck-makecheck` currently supports running isolation tests on a -single instance by setting up PGD between multiple databases. - -You need to pass appropriate database names and the conninfo of bdr instances -to `pg_isolation_regress` in the PGD makefile as follows: - `pg_isolation_regress --dbname=db1,db2 --server 'd1=dbname=db1' - --server 'd2=dbname=db2'` - -Then run: - - `make isolationcheck-makecheck` - -Each step can contain commands that block until further action is taken. -Most likely, some other session runs a step that unblocks it or causes a -deadlock. A test that uses this ability must manually specify valid -permutations, that is, those that don't expect a blocked session to execute a -command. If a test doesn't follow that rule, `isolationtester` cancels it -after 300 seconds. If the cancel doesn't work, `isolationtester` exits -uncleanly after 375 seconds of wait time. Avoid testing invalid -permutations because they can make the isolation tests take -a very long time to run, and they serve no useful testing purpose. - -`isolationtester` recognizes that a command has blocked by checking whether it's shown as waiting in the `pg_locks` view. Therefore, only -blocks on heavyweight locks are detected. - -## Performance testing and tuning - -PGD allows you to issue write transactions onto multiple master nodes. -Bringing those writes back together onto each node has a cost in -performance. - -First, replaying changes from another node has a CPU cost, an I/O cost, -and it generates WAL records. The resource use is usually less -than in the original transaction since CPU overheads are lower as a result -of not needing to reexecute SQL. In the case of UPDATE and DELETE -transactions, there might be I/O costs on replay if data isn't cached. - -Second, replaying changes holds table-level and row-level locks that can -produce contention against local workloads. The conflict-free replicated data types (CRDT) and column-level conflict detection (CLCD) features -ensure you get the correct answers even for concurrent updates, but they -don't remove the normal locking overheads. If you get locking contention, -try to avoid conflicting updates, or keep transactions as short as -possible. A heavily updated row in a larger transaction causes -a bottleneck on performance for that transaction. Complex applications -require some thought to maintain scalability. - -If you think you're having performance problems, -develop performance tests using the benchmarking tools. pgbench -allows you to write custom test scripts specific to your use case -so you can understand the overheads of your SQL and measure the impact -of concurrent execution. - -If PGD is running slow, then we suggest the following: - -1. Write a custom test script for pgbench, as close as you can make it - to the production system's problem case. -2. Run the script on one node to give you a baseline figure. -3. Run the script on as many nodes as occurs in production, using the - same number of sessions in total as you did on one node. This technique - shows you the effect of moving to multiple nodes. -4. Increase the number of sessions for these two tests so you can - plot the effect of increased contention on your application. -5. Make sure your tests are long enough to account for replication delays. -6. Ensure that replication delay isn't growing during your tests. - -Use all of the normal Postgres tuning features to improve the speed -of critical parts of your application. - ## Use of table access methods (TAMs) in PGD PGD 5.0 supports two table access methods released with EDB Postgres 15.0. @@ -627,8 +347,3 @@ After you create the extension, you can use TAM to create a table using This replicates to all the PGD nodes. For more information on these table access methods, see [`CREATE TABLE`](/epas/latest/epas_compat_sql/36_create_table/). - - - - - diff --git a/product_docs/docs/pgd/5/index.mdx b/product_docs/docs/pgd/5/index.mdx index 1285499d76e..ff42aeb253c 100644 --- a/product_docs/docs/pgd/5/index.mdx +++ b/product_docs/docs/pgd/5/index.mdx @@ -36,6 +36,7 @@ navigation: - monitoring - cli - transaction-streaming + - testingandtuning - striggers - scaling - twophase diff --git a/product_docs/docs/pgd/5/known_issues.mdx b/product_docs/docs/pgd/5/known_issues.mdx index 072f22d8c02..a6314e90a4e 100644 --- a/product_docs/docs/pgd/5/known_issues.mdx +++ b/product_docs/docs/pgd/5/known_issues.mdx @@ -44,8 +44,6 @@ release. - Transactions using Eager Replication can't yet execute DDL. The TRUNCATE command is allowed. -- Not all DDL can run when either CAMO or Group Commit is used. - - Parallel apply isn't currently supported in combination with Group Commit. Make sure to disable it when using Group Commit by either: - Setting `num_writers` to 1 for the node group using [`bdr.alter_node_group_config`](/pgd/latest/reference/nodes-management-interfaces/#bdralter_node_group_config). - Using the GUC `bdr.writers_per_subscription`. See [Configuration of generic replication](/pgd/latest/reference/pgd-settings/#generic-replication). diff --git a/product_docs/docs/pgd/5/limitations.mdx b/product_docs/docs/pgd/5/limitations.mdx index 286c2829b5b..87c3db87415 100644 --- a/product_docs/docs/pgd/5/limitations.mdx +++ b/product_docs/docs/pgd/5/limitations.mdx @@ -108,6 +108,33 @@ Be sure to disable transaction streaming when planning to use CAMO. You can configure this option globally or in the PGD node group. See [Transaction streaming configuration](../transaction-streaming#configuration). +- Not all DDL can run when CAMO is used. If unsupported DDL is used a warning is logged +and the transactions commit scope is set to local only. The only supported DDL operations are: + - non-concurrent CREATE INDEX + - non-concurrent DROP INDEX + - non-concurrent REINDEX of an individual table or index + - CLUSTER (of a single relation or index only) + - ANALYZE + - TRUNCATE + +## Group Commit + +[Group Commit](durability/group-commit) is a feature which enables configurable synchronous commits over +nodes in a group. If you use this feature, take the following limitations into account: + +- Not all DDL can run when Group Commit is used. If unsupported DDL is used a warning is logged +and the transactions commit scope is set to local only. The only supported DDL operations are: + - non-concurrent CREATE INDEX + - non-concurrent DROP INDEX + - non-concurrent REINDEX of an individual table or index + - CLUSTER (of a single relation or index only) + - ANALYZE + - TRUNCATE + +## Eager + +[Eager](consistency/eager) is a feature which is available in Group Commit which enables conflicts to be avoided by eagerly aborting transactions that may clash. It is subject to the same limitations as Group Commit. + ## Other limitations This noncomprehensive list includes other limitations that are expected and diff --git a/product_docs/docs/pgd/5/reference/index.json b/product_docs/docs/pgd/5/reference/index.json index 8e5c541ecc2..b846fc85cdc 100644 --- a/product_docs/docs/pgd/5/reference/index.json +++ b/product_docs/docs/pgd/5/reference/index.json @@ -185,6 +185,7 @@ "bdrreplication_set_remove_table": "/pgd/latest/reference/repsets-membership#bdrreplication_set_remove_table", "bdrreplication_set_add_ddl_filter": "/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter", "bdrreplication_set_remove_ddl_filter": "/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter", + "pgd_bench": "/pgd/latest/reference/testingandtuning#pgd_bench", "bdralter_sequence_set_kind": "/pgd/latest/reference/sequences#bdralter_sequence_set_kind", "bdrextract_timestamp_from_snowflakeid": "/pgd/latest/reference/sequences#bdrextract_timestamp_from_snowflakeid", "bdrextract_nodeid_from_snowflakeid": "/pgd/latest/reference/sequences#bdrextract_nodeid_from_snowflakeid", diff --git a/product_docs/docs/pgd/5/reference/index.mdx b/product_docs/docs/pgd/5/reference/index.mdx index 504e73f87c9..8504dba6028 100644 --- a/product_docs/docs/pgd/5/reference/index.mdx +++ b/product_docs/docs/pgd/5/reference/index.mdx @@ -14,6 +14,7 @@ navigation: - repsets-management - repsets-membership - repsets-ddl-filtering +- testingandtuning - sequences - autopartition - streamtriggers @@ -263,6 +264,10 @@ The reference section is a definitive listing of all functions, views and comman * [`bdr.replication_set_remove_ddl_filter`](repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter) +## [Testing and tuning commands](testingandtuning) + * [`pgd_bench`](testingandtuning#pgd_bench) + + ## [Global sequence management interfaces](sequences) ### [Sequence functions](sequences#sequence-functions) * [`bdr.alter_sequence_set_kind`](sequences#bdralter_sequence_set_kind) diff --git a/product_docs/docs/pgd/5/reference/index.mdx.src b/product_docs/docs/pgd/5/reference/index.mdx.src index 4443174c515..b95a72d417c 100644 --- a/product_docs/docs/pgd/5/reference/index.mdx.src +++ b/product_docs/docs/pgd/5/reference/index.mdx.src @@ -14,6 +14,7 @@ navigation: - repsets-management - repsets-membership - repsets-ddl-filtering +- testingandtuning - sequences - autopartition - streamtriggers diff --git a/product_docs/docs/pgd/5/reference/testingandtuning.mdx b/product_docs/docs/pgd/5/reference/testingandtuning.mdx new file mode 100644 index 00000000000..317b6746400 --- /dev/null +++ b/product_docs/docs/pgd/5/reference/testingandtuning.mdx @@ -0,0 +1,120 @@ +--- +title: Testing and tuning commands +navTitle: Testing and tuning +indexdepth: 2 +--- + +EDB Postgres Distributed has tools which help with testing and tuning of your PGD clusters. For background, read the [Testing and Tuning](../testingandtuning) section. + + +## `pgd_bench` + +### Synopsis + +A benchmarking tool for PGD enhanced PostgreSQL. + +```shell +pgd_bench [OPTION]... [DBNAME] [DBNAME2] +``` + +`DBNAME` may be a conninfo string of the format: + `"host=10.1.1.2 user=postgres dbname=master"` + +Consult the [Testing and Tuning - Pgd_bench](../testingandtuning#pgd_bench) section for examples +of `pgd_bench` options and usage. + +### Options + +`pgd_bench` specific options include: + +#### Setting mode + +`-m` or `--mode` + +Which can be set to `regular`, `camo`, or `failover`. It defaults to `regular`. + +* regular — Only a single node is needed to run `pgd_bench` +* camo — A second node must be specified to act as the CAMO-partner (CAMO should be set up) +* failover — A second node must be specified to act as the failover. + +When using `-m failover`, an additional option `--retry` is available. This will +instruct `pgd_bench` to retry transactions when there is a failover. The `--retry` +option is automatically enabled with `-m camo`. + +#### Setting GUC variables + + `-o` or `--set-option` + +This option is followed by `NAME=VALUE` entries, which will be applied using the +Postgresql [`SET`](https://www.postgresql.org/docs/current/sql-set.html) command on each server, and only those servers, that `pgd_bench` connects to. + +The other options are identical to the Community PostgreSQL `pgbench`. For more +details, consult the official documentation on +[`pgbench`](https://www.postgresql.org/docs/current/pgbench.html). + +We list all the options (`pgd_bench` and `pgbench`) below for completeness. + +#### Initialization options: +- `-i, --initialize` — invokes initialization mode +- `-I, --init-steps=[dtgGvpf]+` (default `"dtgvp"`) — run selected initialization steps + - `d` — drop any existing `pgbench` tables + - `t` — create the tables used by the standard `pgbench` scenario + - `g` — generate data client-side and load it into the standard tables, replacing any data already present + - `G` — generate data server-side and load it into the standard tables, replacing any data already present + - `v` — invoke `VACUUM` on the standard tables + - `p` — create primary key indexes on the standard tables + - `f` — create foreign key constraints between the standard tables +- `-F, --fillfactor=NUM` — set fill factor +- `-n, --no-vacuum` — do not run `VACUUM` during initialization +- `-q, --quiet` — quiet logging (one message each 5 seconds) +- `-s, --scale=NUM` — scaling factor +- `--foreign-keys` — create foreign key constraints between tables +- `--index-tablespace=TABLESPACE` — create indexes in the specified tablespace +- `--partition-method=(range|hash)` — partition `pgbench_accounts` with this method (default: range) +- `--partitions=NUM` — partition `pgbench_accounts` into `NUM` parts (default: 0) +- `--tablespace=TABLESPACE` — create tables in the specified tablespace +- `--unlogged-tables` — create tables as unlogged tables (Note: unlogged tables are not replicated) + +#### Options to select what to run: +- `-b, --builtin=NAME[@W]` — add builtin script NAME weighted at W (default: 1). Use `-b list` to list available scripts. +- `-f, --file=FILENAME[@W]` — add script `FILENAME` weighted at W (default: 1) +- `-N, --skip-some-updates` — updates of pgbench_tellers and pgbench_branches. Same as `-b simple-update` +- `-S, --select-only` — perform SELECT-only transactions. Same as `-b select-only` + +#### Benchmarking options: +- `-c, --client=NUM` — number of concurrent database clients (default: 1) +- `-C, --connect` — establish new connection for each transaction +- `-D, --define=VARNAME=VALUE` — define variable for use by custom script +- `-j, --jobs=NUM` — number of threads (default: 1) +- `-l, --log` — write transaction times to log file +- `-L, --latency-limit=NUM` — count transactions lasting more than NUM ms as late +- `-m, --mode=regular|camo|failover` — mode in which pgbench should run (default: `regular`) +- `-M, --protocol=simple|extended|prepared` — protocol for submitting queries (default: `simple`) +- `-n, --no-vacuum` — do not run `VACUUM` before tests +- `-o, --set-option=NAME=VALUE` — specify runtime SET option +- `-P, --progress=NUM` — show thread progress report every NUM seconds +- `-r, --report-per-command` — latencies, failures and retries per command +- `-R, --rate=NUM` — target rate in transactions per second +- `-s, --scale=NUM` — report this scale factor in output +- `-t, --transactions=NUM` — number of transactions each client runs (default: 10) +- `-T, --time=NUM` — duration of benchmark test in seconds +- `-v, --vacuum-all` — vacuum all four standard tables before tests +- `--aggregate-interval=NUM` — data over NUM seconds +- `--failures-detailed` — report the failures grouped by basic types +- `--log-prefix=PREFIX` — prefix for transaction time log file (default: `pgbench_log`) +- `--max-tries=NUM` — max number of tries to run transaction (default: 1) +- `--progress-timestamp` — use Unix epoch timestamps for progress +- `--random-seed=SEED` — set random seed ("time", "rand", integer) +- `--retry` — retry transactions on failover, used with "-m" +- `--sampling-rate=NUM` — fraction of transactions to log (e.g., 0.01 for 1%) +- `--show-script=NAME` — show builtin script code, then exit +- `--verbose-errors` — print messages of all errors + +#### Common options: +- `-d, --debug` — print debugging output +- `-h, --host=HOSTNAME` — database server host or socket directory +- `-p, --port=PORT` — database server port number +- `-U, --username=USERNAME` — connect as specified database user +- `-V, --version` — output version information, then exit +- `-?, --help` — show help, then exit + diff --git a/product_docs/docs/pgd/5/testingandtuning.mdx b/product_docs/docs/pgd/5/testingandtuning.mdx new file mode 100644 index 00000000000..06931bee105 --- /dev/null +++ b/product_docs/docs/pgd/5/testingandtuning.mdx @@ -0,0 +1,153 @@ +--- +title: Testing and Tuning PGD clusters +navTitle: Testing and Tuning +--- + +You can test PGD applications using the following approaches: + +- [Trusted Postgres Architect](#trusted-postgres-architect) +- [pgd_bench with CAMO/Failover options](#pgd_bench) + + +### Trusted Postgres Architect + +[Trusted Postgres Architect](/tpa/latest) is the system used by EDB to +deploy reference architectures, including those based on EDB Postgres Distributed. + +Trusted Postgres Architect includes test suites for each reference architecture. +It also simplifies creating and managing a local collection of tests to run +against a TPA cluster, using a syntax like the following: + +``` +tpaexec test mycluster mytest +``` + +We strongly recommend that developers write their own multi-node suite +of Trusted Postgres Architect tests that verify the main expected properties +of the application. + +### pgd_bench + +The Postgres benchmarking application +[`pgbench`](https://www.postgresql.org/docs/current/pgbench.html) has been +extended in PGD 5.0 in the form of a new applications: `pgd_bench`. + +[`pgd_bench`](/pgd/latest/reference/testingandtuning#pgd_bench) is a regular command-line utility that's added to PostgreSQL's bin +directory. The utility is based on the Community PostgreSQL `pgbench` tool but +supports benchmarking CAMO transactions and PGD specific workloads. + +Functionality of the `pgd_bench` is a superset of those of `pgbench` but +requires the BDR extension to be installed in order to work properly. + +Key differences include: + +- Adjustments to the initialization (`-i` flag) with the standard + `pgbench` scenario to prevent global lock timeouts in certain cases +- `VACUUM` command in the standard scenario is executed on all nodes +- `pgd_bench` releases are tied to the releases of the BDR extension + and are built against the corresponding PostgreSQL flavour (this is + reflected in the output of `--version` flag) + +The current version allows users to run failover tests while using CAMO or +regular PGD deployments. + +The following options were added: + +``` +-m, --mode=regular|camo|failover +mode in which pgbench should run (default: regular) +``` + +- Use `-m camo` or `-m failover` to specify the mode for pgd_bench. + You can use The `-m failover` specification to test failover in + regular PGD deployments. + +``` +--retry +retry transactions on failover +``` + +- Use `--retry` to specify whether to retry transactions when + failover happens with `-m failover` mode. This option is enabled by default + for `-m camo` mode. + +In addition to these options, you must specify the connection information about +the peer node for failover in [DSN +form](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). + +Here's an example in a CAMO environment: + +```sh + pgd_bench -m camo -p $node1_port -h $node1_host bdrdemo \ + "host=$node2_host user=postgres port=$node2_port dbname=bdrdemo" +``` + +This command runs in CAMO mode. It connects to node1 and runs the tests. If the +connection to node1 is lost, then pgd_bench connects to node2. It queries node2 +to get the status of in-flight transactions. Aborted and in-flight transactions +are retried in CAMO mode. + +In failover mode, if you specify `--retry`, then in-flight transactions are +retried. In this scenario there's no way to find the status of in-flight +transactions. + +### Notes on pgd_bench usage + +- When using custom init-scripts it is important to understand implications behind the DDL commands. +It is generally recommended to wait for the secondary nodes to catch-up on the data-load steps +before proceeding with DDL operations such as `CREATE INDEX`. The latter acquire global locks which +can't be acquired until the data-load is complete and thus may time out. + +- No extra steps are taken to suppress client messages, such as `NOTICE`s and `WARNING`s emitted +by PostgreSQL and or any possible extensions including the BDR extension. It is the user's +responsibility to suppress them by setting appropriate variables (e.g. `client_min_messages`, +`bdr.camo_enable_client_warnings ` etc.). + + + +## Performance testing and tuning + +PGD allows you to issue write transactions onto multiple master nodes. Bringing +those writes back together onto each node has a cost in performance. + +First, replaying changes from another node has a CPU cost, an I/O cost, +and it generates WAL records. The resource use is usually less +than in the original transaction since CPU overheads are lower as a result +of not needing to reexecute SQL. In the case of UPDATE and DELETE +transactions, there might be I/O costs on replay if data isn't cached. + +Second, replaying changes holds table-level and row-level locks that can produce +contention against local workloads. The conflict-free replicated data types +(CRDT) and column-level conflict detection (CLCD) features ensure you get the +correct answers even for concurrent updates, but they don't remove the normal +locking overheads. If you get locking contention, try to avoid conflicting +updates, or keep transactions as short as possible. A heavily updated row in a +larger transaction causes a bottleneck on performance for that transaction. +Complex applications require some thought to maintain scalability. + +If you think you're having performance problems, develop performance tests using +the benchmarking tools. pgd_bench allows you to write custom test scripts specific +to your use case so you can understand the overheads of your SQL and measure the +impact of concurrent execution. + +If PGD is running slow, then we suggest the following: + +1. Write a custom test script for pgd_bench, as close as you can make it + to the production system's problem case. +2. Run the script on one node to give you a baseline figure. +3. Run the script on as many nodes as occurs in production, using the + same number of sessions in total as you did on one node. This technique + shows you the effect of moving to multiple nodes. +4. Increase the number of sessions for these two tests so you can + plot the effect of increased contention on your application. +5. Make sure your tests are long enough to account for replication delays. +6. Ensure that replication delay isn't growing during your tests. + +Use all of the normal Postgres tuning features to improve the speed +of critical parts of your application. + + + + + + diff --git a/scripts/source/pglogical2.js b/scripts/source/pglogical2.js index f3596fdc620..6d522df7d1d 100644 --- a/scripts/source/pglogical2.js +++ b/scripts/source/pglogical2.js @@ -2,23 +2,22 @@ // purpose: // Import and convert the pglogical2 docs from https://raw.githubusercontent.com/2ndQuadrant/pglogical/REL2_x_STABLE/docs/README.md, rendering them in /advocacy_docs/supported-open-source/pglogical2/ // -const path = require("path"); -const fs = require("fs/promises"); -const https = require("https"); -const { read, write } = require("to-vfile"); -const remarkParse = require("remark-parse"); -const mdx = require("remark-mdx"); -const unified = require("unified"); -const remarkFrontmatter = require("remark-frontmatter"); -const remarkStringify = require("remark-stringify"); -const admonitions = require("remark-admonitions"); -const yaml = require("js-yaml"); -const visit = require("unist-util-visit"); -const visitAncestors = require("unist-util-visit-parents"); -const mdast2string = require("mdast-util-to-string"); -const { exec } = require("child_process"); -const isAbsoluteUrl = require("is-absolute-url"); -const slugger = require("github-slugger"); +import path from "path"; +import fs from "fs/promises"; +import https from "https"; +import pkg from 'to-vfile'; +const {write, read} = pkg; +import remarkParse from "remark-parse"; +import mdx from "remark-mdx"; +import unified from "unified"; +import remarkFrontmatter from "remark-frontmatter"; +import remarkStringify from "remark-stringify"; +import admonitions from "remark-admonitions"; +import yaml from "js-yaml"; +import visit from "unist-util-visit"; +import visitAncestors from "unist-util-visit-parents"; +import mdast2string from "mdast-util-to-string"; +import slugger from "github-slugger"; const outputFiles = []; const source = new URL( @@ -26,9 +25,13 @@ const source = new URL( ); const originalSource = "https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1"; -const destination = path.resolve( +const docsRoot = path.resolve( process.argv[1], - "../../../advocacy_docs/supported-open-source/pglogical2/", + "../../../", +); +const destination = path.resolve( + docsRoot, + "advocacy_docs/supported-open-source/pglogical2/", ); (async () => { @@ -104,7 +107,7 @@ function pglogicalTransformer() { metadata: { title: title, product: "pglogical 2", - generatedBy: `${process.argv[1]} - re-run to regenerate from originalFilePath`, + generatedBy: `${path.relative(docsRoot, process.argv[1])} - re-run to regenerate from originalFilePath`, }, data: { type: "root",