diff --git a/.gitignore b/.gitignore index e4a4cab8744..f42118e051b 100644 --- a/.gitignore +++ b/.gitignore @@ -82,6 +82,7 @@ product_docs/content_build/ static/nginx_redirects.generated temp_kubernetes/ temp_bdr/ +temp_pglogical3/ # Track base direnv file !.envrc diff --git a/build-sources.json b/build-sources.json index 7a936d61600..ee5579299da 100644 --- a/build-sources.json +++ b/build-sources.json @@ -17,6 +17,7 @@ "odbc_connector": true, "pem": true, "pgbouncer": true, + "pglogical": true, "pgpool": true, "postgis": true, "repmgr": true, diff --git a/gatsby-config.js b/gatsby-config.js index 0a6a931417b..2decdfde872 100644 --- a/gatsby-config.js +++ b/gatsby-config.js @@ -62,6 +62,7 @@ const sourceToPluginConfig = { }, pem: { name: "pem", path: "product_docs/docs/pem" }, pgbouncer: { name: "pgbouncer", path: "product_docs/docs/pgbouncer" }, + pglogical: { name: "pglogical", path: "product_docs/docs/pglogical" }, pgpool: { name: "pgpool", path: "product_docs/docs/pgpool" }, postgis: { name: "postgis", path: "product_docs/docs/postgis" }, repmgr: { name: "repmgr", path: "product_docs/docs/repmgr" }, diff --git a/product_docs/docs/bdr/3.7/appusage.mdx b/product_docs/docs/bdr/3.7/appusage.mdx index 3cb32009428..2871ad84879 100644 --- a/product_docs/docs/bdr/3.7/appusage.mdx +++ b/product_docs/docs/bdr/3.7/appusage.mdx @@ -329,7 +329,8 @@ between them to read stale data. A [queue wait function](functions#bdrwait_for_apply_queue) is provided for clients or proxies to prevent such stale reads. -In addition, BDR provides multiple variants for more synchronous +The synchronous replication features of Postgres are available to BDR +as well. In addition, BDR provides multiple variants for more synchronous replication. Please refer to the [Durability & Performance Options](durability) chapter for an overview and comparison of all variants available and @@ -626,7 +627,7 @@ BDR cannot currently perform conflict resolution where the PRIMARY KEY is change by an UPDATE operation. It is permissible to update the primary key, but you must ensure that no conflict with existing values is possible. -BDR-EE provides the following configuration +When running on EDB Postgres Extended, BDR provides the following configuration parameter to assess how frequently the primary key/replica identity of any table is being subjected to update operations. @@ -661,8 +662,8 @@ Because BDR writer processes operate much like normal user sessions, they are su the usual rules around row and table locking. This can sometimes lead to BDR writer processes waiting on locks held by user transactions, or even by each other. -BDR-EE provides the following configuration parameter -to assess if the application is taking explicit locks. +When running on EDB Postgres Extended, BDR provides the following +configuration parameter to assess if the application is taking explicit locks. ```sql bdr.assess_lock_statement = IGNORE (default) | LOG | WARNING | ERROR diff --git a/product_docs/docs/bdr/3.7/column-level-conflicts.mdx b/product_docs/docs/bdr/3.7/column-level-conflicts.mdx index 72adcea63cf..22c5ed5c7a4 100644 --- a/product_docs/docs/bdr/3.7/column-level-conflicts.mdx +++ b/product_docs/docs/bdr/3.7/column-level-conflicts.mdx @@ -60,6 +60,10 @@ Column-level conflict resolution requires the table to have `REPLICA IDENTITY FULL`. The `bdr.alter_table_conflict_detection` function does check that, and will fail with an error otherwise. +!!! Note + This feature is currently only available on EDB Postgres Extended and + EDB Postgres Advanced. + ## Enabling and Disabling Column-Level Conflict Resolution The Column-Level Conflict Resolution is managed by the diff --git a/product_docs/docs/bdr/3.7/configuration.mdx b/product_docs/docs/bdr/3.7/configuration.mdx index f6027812aff..80fa0a3e973 100644 --- a/product_docs/docs/bdr/3.7/configuration.mdx +++ b/product_docs/docs/bdr/3.7/configuration.mdx @@ -25,7 +25,6 @@ which vary according to the size and scale of the cluster. - `logical_decoding_work_mem` - memory buffer size used by logical decoding. Transactions larger than this will overflow the buffer and be stored temporarily on local disk. Default 64MB, but can be set much higher. - - `max_worker_processes` - BDR uses background workers for replication and maintenance tasks, so there need to be enough worker slots for it to work correctly. The formula for the correct minimal number of workers is: @@ -34,19 +33,16 @@ which vary according to the size and scale of the cluster. writer enabled per peer node in the BDR group, for each database. Additional worker processes may be needed temporarily when node is being removed from a BDR group. - - `max_wal_senders` - Two needed per every peer node. - - `max_replication_slots` - Same as `max_wal_senders`. - - `wal_sender_timeout` and `wal_receiver_timeout` - Determine how quickly an origin considers its CAMO partner as disconnected or reconnected; see [CAMO Failure Scenarios](camo#failure-scenarios) for details. Note that in normal running for a group with N peer nodes, BDR will require -N slots/walsenders. During synchronization, BDR will temporarily use another -N - 1 slots/walsenders, so be careful to set the above parameters high enough +N slots and WAL senders. During synchronization, BDR will temporarily use another +N - 1 slots and WAL senders, so be careful to set the above parameters high enough to cater for this occasional peak demand. With parallel apply turned on, the number of slots needs to be increased to @@ -54,8 +50,8 @@ N slots from above formula \* writers. This is because the `max_replication_slot also sets maximum number of replication origins and some of the functionality of parallel apply uses extra origin per writer. -When WAL Decoder is enabled, the WAL decoder process will require one extra -replication slot per BDR group. +When the [Decoding Worker](nodes#decoding-worker) is enabled, this +process will require one extra replication slot per BDR group. The general safe recommended value on a 4 node BDR Group with a single database is just to set `max_replication_slots` and `max_worker_processes` to something @@ -71,7 +67,7 @@ Applications may also wish to set these parameters. Please see chapter on in a similar way to [physical replication](https://www.postgresql.org/docs/11/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT). - `synchronous_standby_names` - same as above -## 2ndQPostgres Settings for BDR +## 2ndQPostgres/EDB Postgres Extended Settings for BDR The following Postgres settings need to be considered for commit at most once (CAMO), a feature that is only available for BDR in @@ -304,6 +300,7 @@ Unless noted otherwise, values may be set by any user at any time. local two-phase commit or CAMO transaction, and will prevent all Eager transactions on the cluster. May only be set at Postgres server start. + (EDB Postgres Extended) ### Eager Replication @@ -315,6 +312,9 @@ Unless noted otherwise, values may be set by any user at any time. in their commit phase, as a limit for how long to wait for the CAMO partner. +!!! Note + This is only available on EDB Postgres Extended. + ### Commit at Most Once - `bdr.enable_camo` - Used to enable and control the CAMO feature. @@ -345,11 +345,15 @@ Unless noted otherwise, values may be set by any user at any time. guaranteed. This is enabled by default. Well-informed users can choose to disable this to reduce the amount of warnings going into their logs. +!!! Note + This is only available on EDB Postgres Extended. + ### Timestamp-based Snapshots - `bdr.timestamp_snapshot_keep` - For how long to keep valid snapshots for the timestamp-based snapshot usage (default 0, meaning do not keep past snapshots). Also see `snapshot_timestamp` above. + (EDB Postgres Extended) ### Monitoring and Logging @@ -374,23 +378,23 @@ Unless noted otherwise, values may be set by any user at any time. Raft log when doing log compaction (default 100). The value of 0 will disable log compaction. **WARNING: If log compaction is disabled, the log will grow in size forever.** May only be set at Postgres server start. - - `bdr.raft_response_timeout` - To account for network failures, the Raft consensus protocol implemented will time out requests after a certain amount of time. This timeout defaults to 30 seconds. - - `bdr.raft_log_min_apply_duration` - To move the state machine forward, Raft appends entries to its internal log. During normal operation, appending takes only a few milliseconds. This poses an upper threshold on the duration of that append action, above which an `INFO` message is logged. This may indicate an actual problem. Default value of this parameter is 3000 ms. - - `bdr.raft_log_min_message_duration` - When to log a consensus request. Measure round trip time of a bdr consensus request and log an `INFO` message if the time exceeds this parameter. Default value of this parameter is 5000 ms. - +- `bdr.raft_group_max_connections` - The maximum number of connections + across all BDR groups for a Postgres server. These connections carry + bdr consensus requests between the groups' nodes. Default value of this + parameter is 100 connections. May only be set at Postgres server start. - `bdr.backwards_compatibility` - Specifies the version to be backwards-compatible to, in the same numerical format as used by `bdr.bdr_version_num`, e.g. `30618`. Enables exact behavior of a @@ -398,14 +402,14 @@ Unless noted otherwise, values may be set by any user at any time. Defaults to the current BDR version. Since this changes from release to release, we advise against explicit use within the configuration file unless the value is different to the current version. - - `bdr.track_replication_estimates` - Track replication estimates in terms of apply rates and catchup intervals for peer nodes. This information can be used by protocols like CAMO to estimate the readiness of a peer node. This parameter is enabled by default. - + (EDB Postgres Extended) - `bdr.lag_tracker_apply_rate_weight` - We monitor how far behind peer nodes are in terms of applying WAL from the local node, and calculate a moving average of the apply rates for the lag tracking. This parameter specifies how much contribution newer calculated values have in this moving average calculation. Default value is 0.1. + (EDB Postgres Extended) diff --git a/product_docs/docs/bdr/3.7/conflicts.mdx b/product_docs/docs/bdr/3.7/conflicts.mdx index 3b0c1c281e0..e367a0330dd 100644 --- a/product_docs/docs/bdr/3.7/conflicts.mdx +++ b/product_docs/docs/bdr/3.7/conflicts.mdx @@ -23,14 +23,12 @@ This chapter covers row-level conflicts with standard data types in detail. Conflict handling is configurable, as described later in this chapter. Conflicts can be detected and handled differently for each table using +conflict triggers, described in the [Stream Triggers](striggers) chapter. -conflict triggers, available with BDR-EE, -described in the [Stream Triggers](striggers) chapter. - -Column-level conflict detection and resolution is available with BDR-EE, +Column-level conflict detection and resolution is available with BDR, described in the [CLCD](column-level-conflicts) chapter. -If you wish to avoid conflicts, you can use these features in BDR-EE +If you wish to avoid conflicts, you can use these features in BDR. - Conflict-free data types (CRDTs) - described in the [CRDT](crdt) chapter. - Eager replication - described in the [Eager Replication](eager) chapter. @@ -56,8 +54,6 @@ some types of conflict to occur and resolve them when they arise. groups that contain at least one 3.7+ node. Please use the ones in `bdr` schema that are already present in all BDR versions. -. - ## How conflicts happen Inter-node conflicts arise as a result of sequences of events that could not @@ -120,8 +116,8 @@ default resolved by choosing the newer (based on commit time) row and keeping only that one (`update_if_newer` resolver). Other resolvers can be configured - see [Conflict Resolution] for details. -To resolve this conflict, type in the Enterprise Edition; you can also use -column-level conflict resolution and user-defined conflict triggers. +To resolve this conflict type, you can also use column-level conflict +resolution and user-defined conflict triggers. This type of conflict can be effectively eliminated by use of [Global Sequences](sequences). @@ -145,6 +141,8 @@ preserve the row with the correct `PRIMARY KEY` and delete the others. resolution is to proceed with the insert operation, some of the data will always be deleted! +It's also possible to define a different behaviour using a conflict trigger. + #### UPDATE/UPDATE Conflicts Where two concurrent `UPDATE`s on different nodes change the same tuple @@ -410,7 +408,6 @@ these conflicts. Note however that enabling this option opens the door for If these are problems, it's recommended to tune freezing settings for a table or database so that they are correctly detected as `update_recently_deleted`. -This is done automatically in BDR Enterprise Edition. Another alternative is to use [Eager Replication] to prevent these conflicts. @@ -709,6 +706,16 @@ as is normally the case with BDR AlwaysOn architecture. such a statement, the protection against these concurrency issues will not be present. +!!! Warning + The additional WAL logging of TOAST is done using the `BEFORE UPDATE` + trigger. This trigger must be sorted alphabetically + last (based on trigger name) among all `BEFORE UPDATE` triggers on the + table. It's prefixed with `zzzz_bdr_` to make this easier, but make sure + you don't create any trigger with name that would sort after it, otherwise + the protection against the concurrency issues will not be present. + This trigger is not created or used when using BDR with EDB Postgres + Extended. + For the `insert_or_error` conflict resolution, the use of `REPLICA IDENTITY FULL` is however still required. @@ -770,13 +777,8 @@ mechanisms to cope with the conflict. BDR provides these mechanisms for conflict detection: - [Origin Conflict Detection] \(default) - - [Row Version Conflict Detection] - - [Column-Level Conflict Detection](column-level-conflicts) - . - -as well as other mechanisms when using BDR-EE. ### Origin Conflict Detection @@ -811,11 +813,51 @@ frozen rows that they update. Inserts and Deletes are not affected by this situa Users are advised to not leave down nodes for extended outages, as discussed in [Node Restart and Down Node Recovery](nodes). -To handle this situation gracefully, BDR-EE will automatically hold -back the freezing of rows while a node is down. +On EDB Postgres Extended, BDR will automatically hold back the freezing of +rows while a node is down to handle this situation gracefully without the need +for changing parameter settings. + +On other variants of Postgres, users may need to manage this situation with +some care: + +Freezing normally occurs when a row being vacuumed is older than +`vacuum_freeze_min_age` xids from the current xid, which means that you +need to configure suitably high values for these parameters: + +- vacuum_freeze_min_age +- vacuum_freeze_table_age +- autovacuum_freeze_max_age -No changes to parameter settings are required. -. +Values should be chosen based upon the transaction rate, giving +a grace period of downtime before any conflict data is removed +from the database server. For example, a node performing +1000 TPS could be down for just over 5.5 days before conflict +data is removed, when vacuum_freeze_min_age is set to 500 million. +The CommitTS datastructure will take on-disk space of 5 GB with +that setting, so lower transaction rate systems may benefit from +lower settings. + +Initially recommended settings would be: + +```sql +# 1 billion = 10GB +autovacuum_freeze_max_age = 1000000000 + +vacuum_freeze_min_age = 500000000 + +# 90% of autovacuum_freeze_max_age +vacuum_freeze_table_age = 900000000 +``` + +Note that: + +- autovacuum_freeze_max_age can only be set at server start. +- vacuum_freeze_min_age is user-settable, so using a + low value will freeze rows early and could result in conflicts being + ignored. autovacuum_freeze_min_age and toast.autovacuum_freeze_min_age + can also be set for individual tables. +- running the CLUSTER or VACUUM FREEZE commands will also + freeze rows early and could result in conflicts being ignored. ### Row Version Conflict Detection @@ -877,13 +919,10 @@ The recognized methods for conflict detection are: - `row_origin` - origin of the previous change made on the tuple (see [Origin Conflict Detection] above). This is the only method supported which does not require an extra column in the table. - - `row_version` - row version column (see [Row Version Conflict Detection] above). - - `column_commit_timestamp` - per-column commit timestamps (described in the [CLCD](column-level-conflicts) chapter). - - `column_modify_timestamp` - per-column modification timestamp (described in the [CLCD](column-level-conflicts) chapter). diff --git a/product_docs/docs/bdr/3.7/crdt.mdx b/product_docs/docs/bdr/3.7/crdt.mdx index 36d3ddd314b..df0053c2012 100644 --- a/product_docs/docs/bdr/3.7/crdt.mdx +++ b/product_docs/docs/bdr/3.7/crdt.mdx @@ -163,6 +163,10 @@ new data types. CRDT types are handled transparently - both `ANALYZE` and the optimizer work, so estimation and query planning works fine, without having to do anything else. +!!! Note + This feature is currently only available on EDB Postgres Extended and + EDB Postgres Advanced. + ## State-based and operation-based CRDTs Following the notation from [1], we do implement both operation-based diff --git a/product_docs/docs/bdr/3.7/ddl.mdx b/product_docs/docs/bdr/3.7/ddl.mdx index b24aa1f7e88..249a93d040b 100644 --- a/product_docs/docs/bdr/3.7/ddl.mdx +++ b/product_docs/docs/bdr/3.7/ddl.mdx @@ -642,31 +642,19 @@ The following variants of `ALTER TABLE` will only take DDL lock and **not** a DML lock: - `ALTER TABLE ... ADD COLUMN ... (immutable) DEFAULT` - - `ALTER TABLE ... ALTER COLUMN ... SET DEFAULT expression` - - `ALTER TABLE ... ALTER COLUMN ... DROP DEFAULT` - - `ALTER TABLE ... ALTER COLUMN ... TYPE` if it does not require rewrite - + (currently only available on EDB Postgres Extended and EDB Postgres Advanced) - `ALTER TABLE ... ALTER COLUMN ... SET STATISTICS` - - `ALTER TABLE ... VALIDATE CONSTRAINT` - - `ALTER TABLE ... ATTACH PARTITION` - - `ALTER TABLE ... DETACH PARTITION` - - `ALTER TABLE ... ENABLE TRIGGER` (`ENABLE REPLICA TRIGGER` will still take a DML lock) - - `ALTER TABLE ... CLUSTER ON` - - `ALTER TABLE ... SET WITHOUT CLUSTER` - - `ALTER TABLE ... SET ( storage_parameter = value [, ... ] )` - - `ALTER TABLE ... RESET ( storage_parameter = [, ... ] )` - - `ALTER TABLE ... OWNER TO` All other variants of `ALTER TABLE` take a DML lock on the table being modified. @@ -829,6 +817,10 @@ could lead to table rewrites lasting long durations. Also note that the above implicit castable ALTER activity cannot be performed in transaction blocks. +!!! Note + This currently only works on EDB Postgres Extended and EDB Postgres + Advanced. + ### ALTER TYPE Users should note that `ALTER TYPE` is replicated but a Global DML lock is *not* @@ -860,8 +852,16 @@ allowed on a BDR node. ### CREATE TABLE AS and SELECT INTO -`CREATE TABLE AS` and `SELECT INTO` are only allowed on Enteprise Edition of -BDR and only if any sub-commands are also allowed. +`CREATE TABLE AS` and `SELECT INTO` are only allowed on EDB Postgres Extended +and EDB Postgres Advanced and only if any sub-commands are also allowed. + +You can instead achieve the same effect using, in case the +`CREATE TABLE AS` is not supported on your variant of Postgres: + +``` +CREATE TABLE mytable; +INSERT INTO mytable SELECT ... ; +``` ### EXPLAIN @@ -1000,6 +1000,10 @@ Note that the new facility requires the cluster to run with RAFT protocol version 24 and beyond. If the RAFT protocol is not yet upgraded, the old mechanism will be used, resulting in a DML lock request. +!!! Note + This currently only works on EDB Postgres Extended and EDB Postgres + Advanced. + #### Adding a Column To add a column with a volatile default, run these commands in diff --git a/product_docs/docs/bdr/3.7/functions.mdx b/product_docs/docs/bdr/3.7/functions.mdx index 2af35595096..5052a92d8fc 100644 --- a/product_docs/docs/bdr/3.7/functions.mdx +++ b/product_docs/docs/bdr/3.7/functions.mdx @@ -23,7 +23,7 @@ only using the following supplied functions. This function returns a textual representation of the BDR edition. BDR3 is distributed in either Standard Edition (`SE`) or Enterprise Edition (`EE`); this function can be used to check which of those is currently -installed. +installed. Deprecated. ### bdr.bdr_version @@ -44,6 +44,28 @@ value: MAJOR_VERSION * 10000 + MINOR_VERSION * 100 + PATCH_RELEASE ``` +### bdr.wal_sender_stats + +If the [Decoding Worker](nodes#decoding-worker) is enabled, this +view shows information about the decoder slot and current LCR +(`Logical Change Record`) segment file being read by each WAL sender. + +#### Synopsis + +```sql +bdr.wal_sender_stats() → setof record (pid integer, is_using_lcr boolean, decoder_slot_name TEXT, lcr_file_name TEXT) +``` + +#### Output columns + +- `pid` - PID of the WAL sender (corresponds to `pg_stat_replication`'s `pid` column) + +- `is_using_lcr` - Whether the WAL sender is sending LCR files. The next columns will be `NULL` if `is_using_lcr` is `FALSE`. + +- `decoder_slot_name` - The name of the decoder replication slot. + +- `lcr_file_name` - The name of the current LCR file. + ## System and Progress Information Parameters BDR exposes some parameters that can be queried via `SHOW` in `psql` @@ -73,6 +95,9 @@ becomes remotely visible. As soon as Postgres assigns a transaction id, this parameter is updated to show the transaction id just assigned, if CAMO is enabled. +!!! Note + This is only available on EDB Postgres Extended. + ## Utility Functions ### bdr.wait_slot_confirm_lsn @@ -347,6 +372,22 @@ bdrdb=# SELECT bdr.local_group_slot_name(); bdr_bdrdb_bdrgroup ``` +### bdr.node_group_type + +Returns the type of the given node group. Returned value is same as what +was passed to `bdr.create_node_group()` when the node group was created, +except `normal` is returned if the `node_group_type` was passed as NULL +when the group was created. + +#### Example + +```sql +bdrdb=# SELECT bdr.node_group_type('bdrgroup'); + node_group_type +----------------- + normal +``` + ## Global Advisory Locks BDR supports global advisory locks. These locks are very similar to diff --git a/product_docs/docs/bdr/3.7/index.mdx b/product_docs/docs/bdr/3.7/index.mdx index 845b2614f2a..ae715be0eb9 100644 --- a/product_docs/docs/bdr/3.7/index.mdx +++ b/product_docs/docs/bdr/3.7/index.mdx @@ -56,61 +56,61 @@ to as BDR1 and BDR2. There are significant and important differences in BDR3 and you should not refer to earlier docs or rely on anything stated within them. -BDR3 comes in two variants, the Standard Edition (BDR-SE) and the Enterprise -Edition (BDR-EE), these variants are compatible with specific versions of -database server, as shown below. +BDR3 supports different versions and variant of PostgreSQL, as shown below. **BDR version support compatibility matrix:** -| BDR | Variant | Server | Supported Versions | -| --- | ---------- | --------------------- | ------------------ | -| 3.6 | Standard | PostgreSQL | 10, 11 | -| 3.7 | Standard | PostgreSQL | 11, 12, 13 | -| 3.7 | Standard | EDB Postgres Advanced | 11, 12, 13 | -| 3.6 | Enterprise | EDB Postgres Extended | 11 | -| 3.7 | Enterprise | EDB Postgres Extended | 11, 12, 13 | -| 3.7 | Enterprise | EDB Postgres Advanced | 11, 12, 13 | +| BDR | Server | Supported Versions | +| --- | --------------------- | ------------------ | +| 3.6 | PostgreSQL | 10, 11 | +| 3.6 | EDB Postgres Extended | 11 | +| 3.7 | PostgreSQL | 11, 12, 13 | +| 3.7 | EDB Postgres Advanced | 11, 12, 13 | +| 3.7 | EDB Postgres Extended | 11, 12, 13 | EDB Postgres Extended was formerly known as 2ndQ Postgres. -The Enterprise Edition provides these extensive additional features to provide -very high availability, avoid data conflicts and to cope with more advanced -usage scenarios. +Some features are only available on particular versions of Postgres server. - Conflict-free Replicated Data Types - additional data types which provide mathematically proven consistency in asynchronous multi-master update scenarios + (EDB Postgres Advanced, EDB Postgres Extended) - Column Level Conflict Resolution - ability to use per column last-update wins resolution so that UPDATEs on different fields can be "merged" without losing either of them + (EDB Postgres Advanced, EDB Postgres Extended) - Transform Triggers - triggers that are executed on the incoming stream of data providing ability to modify it or to do advanced programmatic filtering + (EDB Postgres Advanced, EDB Postgres Extended) - Conflict triggers - triggers which are called when conflict is detected, providing a way to use custom conflict resolution techniques + (EDB Postgres Advanced, EDB Postgres Extended) - Additional DDL support (CREATE TABLE AS) + (EDB Postgres Advanced, EDB Postgres Extended) - Advanced DDL Handling for NOT VALID constraints and ALTER TABLE + (EDB Postgres Advanced, EDB Postgres Extended) - Additional synchronization for Logical/Physical Standby servers for faster build of failoverable standbys + (EDB Postgres Advanced, EDB Postgres Extended) - Parallel Apply - allow multiple writers to apply the incoming changes + (EDB Postgres Advanced, EDB Postgres Extended) - Eager Replication - synchronizes between the nodes of the cluster before - committing a transaction to provide conflict free replication (currently - only with EDB Postgres Extended) + committing a transaction to provide conflict free replication + (EDB Postgres Extended) - Commit At Most Once - a consistency feature helping an application to commit each transaction only once, even in the - presence of node failures (currently only with EDB Postgres Extended) + presence of node failures + (EDB Postgres Extended) - Timestamp-based Snapshots - providing consistent reads across multiple nodes for retrieving data as they appeared or will appear at a given time - (currently only with EDB Postgres Extended) + (EDB Postgres Extended) - Estimates for Replication Catch-up times - (currently only with EDB Postgres Extended) -- Selective Backup of a Single Database (currently only with EDB Postgres - Extended) + (EDB Postgres Extended) - Hold back freezing to assist resolution of UPDATE/DELETE conflicts - (currently only with EDB Postgres Extended) -- Decoding Worker (currently only with EDB Postgres Extended version 13 and - above) + (EDB Postgres Extended) +- Decoding Worker + (EDB Postgres Extended 13 and above) Features that are currently available only with EDB Postgres Extended are expected to be available with EDB Postgres Advanced 14. - -This documentation is for the Enterprise Edition of BDR3. diff --git a/product_docs/docs/bdr/3.7/monitoring.mdx b/product_docs/docs/bdr/3.7/monitoring.mdx index b7d697882b7..c6708d88eba 100644 --- a/product_docs/docs/bdr/3.7/monitoring.mdx +++ b/product_docs/docs/bdr/3.7/monitoring.mdx @@ -158,6 +158,9 @@ column that refers to the time required for the peer node to catch up to the local node data. The other fields are also available via the `bdr.node_slots` view, as explained below. +!!! Note + This catalog is only present when bdr-enterprise extension is installed. + Administrators may query `bdr.node_slots` for outgoing replication from the local node. It shows information about replication status of all other nodes in the group that are known to the current node, as well as any additional @@ -269,6 +272,27 @@ sub_slot_name | bdr_postgres_bdrgroup_node1 subscription_status | replicating ``` +### Monitoring WAL senders using LCR + +If the [Decoding Worker](nodes#decoding-worker) is enabled, information about the +current LCR (`Logical Change Record`) file for each WAL sender can be monitored +via the function [bdr.wal_sender_stats](functions#bdrwal_sender_stats), +e.g.: + +``` +postgres=# SELECT * FROM bdr.wal_sender_stats(); + pid | is_using_lcr | decoder_slot_name | lcr_file_name +---------+--------------+-------------------------------+------------------------------------------ + 2059904 | f | | + 2059909 | t | bdr_postgres_bdrgroup_decoder | 0000000000000000000000140000000000000000 + 2059916 | t | bdr_postgres_bdrgroup_decoder | 0000000000000000000000140000000000000000 +(3 rows) +``` + +If `is_using_lcr` is `FALSE`, `decoder_slot_name`/`lcr_file_name` will be `NULL`. +This will be the case if the Decoding Worker is not enabled, or the WAL sender is +serving a [logical standby]\(nodes.md#Logical Standby Nodes). + ## Monitoring BDR Replication Workers All BDR workers show up in the system view `bdr.stat_activity`, @@ -522,8 +546,8 @@ bdrdb=# SELECT node_name, postgres_version, pglogical_version, bdr_version FROM bdr.group_versions_details; node_name | postgres_version | pglogical_version | bdr_version -----------+------------------+-------------------+------------- - node1 | 11.7 | 3.6.17 | 3.6.17 - node2 | 11.7 | 3.6.17 | 3.6.17 + node1 | 13.3 | 3.7.10 | 3.7.10 + node2 | 13.3 | 3.7.10 | 3.7.10 ``` The recommended setup is to try to have all nodes running the same @@ -559,19 +583,13 @@ Raft Consensus should be working cluster-wide at all times. The impact of running a BDR cluster without Raft Consensus working might be as follows: -- BDR replication might still be working correctly - +- BDR data changes replication may still be working correctly - Global DDL/DML locks will not work - - Galloc sequences will eventually run out of chunks - -- Eager Replication (EE only) will not work - +- Eager Replication will not work - Cluster maintenance operations (join node, part node, promote standby) are still allowed but they might not finish (simply hang) - - Node statuses might not be correctly synced among the BDR nodes - - BDR group replication slot does not advance LSN, thus keeps WAL files on disk @@ -715,7 +733,7 @@ bdrdb=# SELECT * FROM bdr.monitor_local_replslots(); OK | All BDR replication slots are working correctly ``` -## Tracing Transaction COMMITs +## Monitoring Transaction COMMITs By default, BDR transactions commit only on the local node. In that case, transaction `COMMIT` will be processed quickly. @@ -725,38 +743,7 @@ BDR also provides two new transaction commit modes: CAMO and Eager replication. Each of these modes provides additional robustness features, though at the expense of additional latency at `COMMIT`. The additional time at `COMMIT` can be monitored dynamically using the -`pg_stat_activity` catalog, where processes report different `wait_event` +`bdr.stat_activity` catalog, where processes report different `wait_event` states. A transaction in `COMMIT` waiting for confirmations from one or more synchronous standbys reports a `SyncRep` wait event, whereas the two new modes report `EagerRep`. - -Also, transaction `COMMIT` can be traced in more detail using the -`tracelog_timeout` parameter. If the `COMMIT` takes longer than this -time, additional log messages will show what steps occurred at -which point. All such messages are prefixed with `TRACELOG: ` and are -disabled until the timeout is reached. Thus the first line per -transaction describes the activity during which the timeout triggered. -Timestamps on the following log lines allow us to determine the duration -of all following actions during the `COMMIT`. As an example, the -tracelog lines for a transaction that took too long to write to disk -look similar to: - -``` -LOG: 00000: TRACELOG: Flushed WAL to disk -LOG: 00000: TRACELOG: End of critical section for commit -LOG: 00000: TRACELOG: Finished waiting for SyncRep -LOG: 00000: TRACELOG: XIDs marked as committed in pg_xact -LOG: 00000: TRACELOG: Marked transaction as no longer running -LOG: 00000: TRACELOG: Commit XactCallbacks fired -LOG: 00000: TRACELOG: Released before locks resources -LOG: 00000: TRACELOG: Released all buffer pins -LOG: 00000: TRACELOG: Cleaned up relation cache -LOG: 00000: TRACELOG: Catalog changes visible to all backends -LOG: 00000: TRACELOG: Transaction end for multiXact -``` - -Note that this can cause substantial additional logs, so it should be -enabled with care. If used in production, it should be set to catch -outliers. It is not intended for regular performance monitoring. -Please start with high timeouts and decrease, until a useful amount of -log is available for analysis, to minimize its impact on performance. diff --git a/product_docs/docs/bdr/3.7/nodes.mdx b/product_docs/docs/bdr/3.7/nodes.mdx index 5c1c50862c4..21a3a24f72c 100644 --- a/product_docs/docs/bdr/3.7/nodes.mdx +++ b/product_docs/docs/bdr/3.7/nodes.mdx @@ -86,6 +86,12 @@ The logical join procedure (which uses `bdr.join_node_group()` function) performs data sync doing `COPY` operations and will use multiple writers (parallel apply) if those are enabled. +Node join can execute concurrently with other node joins for the majority of +the time taken to join. Only one regular node at a time can be in either of +the states PROMOTE or PROMOTING, which are typically fairly short. +The subscriber-only nodes are an exception to this rule, and they can be +cocurrently in PROMOTE and PROMOTING states as well. + Note that the join process uses only one node as the source, so can be executed when nodes are down, if a majority of nodes are available. This can cause a complexity when running logical join: @@ -143,7 +149,7 @@ connection string from the client connecting to the node in which it is specified. An example of such a set of parameters using a client certificate is shown here: -``` +```ini sslmode=verify-full sslcert=bdr_client.crt sslkey=bdr_client.key sslrootcert=root.crt ``` @@ -166,7 +172,7 @@ BDR connection. This is usually the user `postgres`. Each node will require matching lines permitting the connection in the `pg_hba.conf` file; for example: -``` +```ini hostssl all postgres 10.1.2.3/24 cert hostssl replication postgres 10.1.2.3/24 cert ``` @@ -175,13 +181,13 @@ Another setup could be to use `SCRAM-SHA-256` passwords instead of client certificates, and not bother about verifying the server identity as long as the certificate is properly signed. Here the DSN parameters might be just: -``` +```ini sslmode=verify-ca sslrootcert=root.crt ``` ...and the corresponding `pg_hba.conf` lines would be like this: -``` +```ini hostssl all postgres 10.1.2.3/24 scram-sha-256 hostssl replication postgres 10.1.2.3/24 scram-sha-256 ``` @@ -238,17 +244,43 @@ slot was last advanced. In extreme cases, this may require a full 16 MB before slots are synced/created on the streaming replica. If a failover or switchover occurs during this interval, the streaming standby cannot be promoted to replace its BDR node, as the -group slot and other dependent slots do not exist yet. This is resolved -automatically by BDR-EE, but not by BDR-SE. +group slot and other dependent slots do not exist yet. -The slot sync-up process on the standby solves this by invoking a -function on the upstream. This function moves the group slot in the +On EDB Postgres Extended and EDB Postgres Advaced, this is resolved +automatically. The slot sync-up process on the standby solves this by +invoking a function on the upstream. This function moves the group slot in the entire BDR cluster by performing WAL switches and requesting all BDR peer nodes to replay their progress updates. The above causes the group slot to move ahead in a short timespan. This reduces the time required by the standby for the initial slot's sync-up, allowing for faster failover to it, if required. +On PostgreSQL, it is important to ensure that slot's sync up has completed on +the standby before promoting it. The following query can be run on the +standby in the target database to monitor and ensure that the slots have +synced up with the upstream. The promotion can go ahead when this query +returns `true`. + +```sql +SELECT true FROM pg_catalog.pg_replication_slots WHERE + slot_type = 'logical' AND confirmed_flush_lsn IS NOT NULL; +``` + +It is also possible to nudge the slot sync-up process in the entire BDR +cluster by manually performing WAL switches and by requesting all BDR +peer nodes to replay their progress updates. This activity will cause +the group slot to move ahead in a short timespan, and also hasten the +slot sync-up activity on the standby. The following queries can be run +on any BDR peer node in the target database for this: + +```sql +SELECT bdr.run_on_all_nodes('SELECT pg_catalog.pg_switch_wal()'); +SELECT bdr.run_on_all_nodes('SELECT bdr.request_replay_progress_update()'); +``` + +Use the monitoring query from above on the standby to check that these +queries indeed help in faster slot sync-up on that standby. + Logical standby nodes can themselves be protected using physical standby nodes, if desired, so Master->LogicalStandby->PhysicalStandby. Note that you cannot cascade from LogicalStandby to LogicalStandby. @@ -326,17 +358,15 @@ For these reasons it's generally recommended to use either logical standby nodes or subscribe-only group instead of physical stanby nodes because they both have better operational characteristics in comparison. -BDR3 Enterprise installations can manually trigger creation of -BDR-related replication slots on a physical standby using the following -SQL syntax: +When bdr-enterprise extension is installed, you can can manually ensure the +group slot is advanced on all nodes (as much as possible), which helps hasten +the creaation of BDR-related replication slots on a physical standby using the +following SQL syntax: ```sql SELECT bdr.move_group_slot_all_nodes(); ``` -This will also advance the BDR group slot used to ensure that all nodes have -reached a minimum LSN across the cluster. - Upon failover, the Standby must perform one of two actions to replace the Primary: @@ -472,9 +502,9 @@ since the WAL Sender process now spends more time on communication. `enable_wal_decoder` is an option for each BDR group, which is currently disabled by default. `bdr.alter_node_group_config()` can be used to enable or -disable WAL decoder for a BDR group. +disable the Decoding Worker for a BDR group. -When WAL decoder is enabled, BDR stores `Logical Change Request` (LCR, in +When the Decoding Worker is enabled, BDR stores `Logical Change Record` (LCR, in short) files to allow buffering of changes between decoding and when all subscribing nodes have received data. LCR files are stored under the `pg_logical` directory within each local node's data directory. The number and @@ -487,21 +517,22 @@ disabled when `bdr.lcr_cleanup_interval` is zero. When disabled, logical decoding is performed by the WAL Sender process for each node subscribing to each node. In this case, no LCR files are written. -Even though WAL decoder is enabled for a BDR group, following GUCs control the -production and usage of LCR per node. By default these are `false`. For -production and usage of LCRs we need WAL decoder to be enabled for the BDR -group and these GUCs to be set to `true` on each of the nodes in BDR group. +Even though the Decoding Worker is enabled for a BDR group, following +GUCs control the production and usage of LCR per node. By default +these are `false`. For production and usage of LCRs we need the +Decoding Worker to be enabled for the BDR group and these GUCs to be +set to `true` on each of the nodes in BDR group. -- `pglogical.enable_wal_decoder` - when turned `false` it stops WAL decoder, if - running, and any WAL Senders using LCRs are restarted to use WAL. When `true` - along with the BDR group config, a WAL decoder is run to produce LCR and WAL - Senders use LCR. +- `pglogical.enable_wal_decoder` - when turned `false`, all WAL + Senders using LCRs are restarted to use WAL directly. When `true` + along with the BDR group config, a Decoding Worker process is + started to produce LCR and WAL Senders use LCR. - `bdr.receive_lcr` - when `true` on the subscribing node, it requests WAL Sender on the publisher node to use LCRs if available. ### Notes -As of now, a WAL decoder decodes changes corresponding to the node where it is +As of now, a Decoding Worker decodes changes corresponding to the node where it is running. A Logical standby is sent changes from all the nodes in BDR group through a single source. Hence a WAL sender serving a Logical standby can not use LCRs right now. @@ -527,6 +558,8 @@ sub-segment. LCR files are binary and variable sized. The maximum size of an LCR file can be controlled by pglogical.max_lcr_segment_file_size, which defaults to 1GB. +EDB Postgres Extended 13 and above is required for this feature to work. + ## Node Restart and Down Node Recovery BDR is designed to recover from node restart or node disconnection. @@ -564,9 +597,9 @@ PANIC: could not write to file "pg_wal/xlogtemp.559": No space left on device In addition, slots for offline nodes also hold back the catalog xmin, preventing vacuuming of catalog tables. -In BDR-EE, offline nodes also hold back freezing of data to prevent losing -conflict resolution data (see: [Origin Conflict Detection](conflicts)). -BDR-SE users may need to alter their configuration settings as specified. +On EDB Postgres Extended, offline nodes also hold back freezing of data to +prevent losing conflict resolution data +(see: [Origin Conflict Detection](conflicts)). Administrators should monitor for node outages (see: [monitoring](monitoring)) and make sure nodes have sufficient free disk space. If the workload is @@ -632,11 +665,11 @@ The group slot can: - join new nodes to the BDR group without having all existing nodes up and running (although the majority of nodes should be up), without incurring data loss in case the node which was down during join starts - replicating again. + replicating again - part nodes from cluster consistently, even if some nodes have not - caught up fully with the parted node. -- hold back the freeze point to avoid missing some conflicts. -- keep the historical snapshot for timestamp based snapshots. + caught up fully with the parted node +- hold back the freeze point to avoid missing some conflicts (EDB Postgres Extended) +- keep the historical snapshot for timestamp based snapshots (EDB Postgres Extended) The group slot is usually inactive, and is only fast-forwarded periodically in response to Raft progress messages from other nodes. @@ -1037,7 +1070,7 @@ bdr.alter_node_group_config(node_group_name text, this node group, -1 means the default (as specified by the pglogical GUC pglogical.writers_per_subscription) will be used. Valid values are either -1 or a positive integer. -- `enable_wal_decoder` - Enables/disables the WAL decoder process. +- `enable_wal_decoder` - Enables/disables the Decoding Worker process. Note that all of the options parameters are simply used to control the pglogical writer. @@ -1336,7 +1369,8 @@ using a fast block-level copy operation. When starting from an empty data directory, if the selective backup option is chosen, then only that database will be copied from the source node. The -excluded databases will be dropped and cleaned up on the new node. +excluded databases will be dropped and cleaned up on the new node +(EDB Postgres Extended). If the specified data directory is non-empty, this will be used as the base for the new node. If the data directory is already active as a @@ -1367,27 +1401,18 @@ bdr_init_physical [OPTION] ... - `-D, --pgdata=DIRECTORY` - The data directory to be used for the new node; it can be either empty/non-existing directory, or a directory populated using the `pg_basebackup -X stream` command (required). - - `-l, --log-file=FILE` - Use FILE for logging; default is bdr_init_physical_postgres.log . - - `-n, --node-name=NAME` - The name of the newly created node (required). - - `--replication-sets=SETS` - The name of a comma-separated list of replication set names to use; all replication sets will be used if not specified. - - `--standby` - Create a logical standby (receive only node) rather than full send/receive node. - - `--node-group-name` - Group to join, defaults to the same group as source node. - - `-s, --stop` - Stop the server once the initialization is done. - - `-v` - Increase logging verbosity. - - `-L` - Perform selective pg_basebackup when used in conjunction with an - empty/non-existing data directory (-D option). - + empty/non-existing data directory (-D option). (EDB Postgres Extended) - `-S` - Instead of dropping logical replication subscriptions, just disable them. diff --git a/product_docs/docs/bdr/3.7/overview.mdx b/product_docs/docs/bdr/3.7/overview.mdx index 296956e134c..e072f76a2ec 100644 --- a/product_docs/docs/bdr/3.7/overview.mdx +++ b/product_docs/docs/bdr/3.7/overview.mdx @@ -13,10 +13,9 @@ other servers that are part of the same BDR group. ![node diagram](img/nodes.png) By default BDR uses asynchronous replication, applying changes on -the peer nodes only after the local commit. An optional -[eager all node replication](eager) - is available in the -Enterprise Edition. +the peer nodes only after the local commit. An optional +[eager all node replication](eager) feature allows for commiting +on all nodes using consensus. ## Basic Architecture @@ -77,6 +76,8 @@ ensuring transactional consistency is guaranteed for the changes from any single node. Changes from different nodes are applied independently of other nodes to ensure the rapid replication of changes. +Replicated data is sent in binary form, when it is safe to do so. + ### High Availability Each master node can be protected by one or more standby nodes, so any node diff --git a/product_docs/docs/bdr/3.7/release-notes.mdx b/product_docs/docs/bdr/3.7/release-notes.mdx index eabbf3400e0..6f1fcb3af2b 100644 --- a/product_docs/docs/bdr/3.7/release-notes.mdx +++ b/product_docs/docs/bdr/3.7/release-notes.mdx @@ -5,6 +5,88 @@ originalFilePath: release-notes.md --- +## BDR 3.7.12 + +This is a maintenance release for BDR 3.7 which includes minor improvements +as well as fixes for issues identified in previous versions. + +Check also release notes for pglogical 3.7.12 for resolved issues which affect +BDR as well. + +### Improvements + +- Tweak Single Decoding performance by caching and better locking (BDR-1311, BDR-1312) + Add caching for BDR-internal catalog information about the Decoding + Worker. Split a single global lock into multiple locks (one per WAL + sender) for access to internal status information of the WAL sender. + This improves performance especially with many concurrent WAL sender + processes. + +- Add a new view bdr.replication_status (BDR-1412) + This is similar to the view `pglogical.replication_status` and shows + information about the replication status of the local node with + respect to all other BDR nodes in the cluster. + +- Add decoder monitoring parameters for wal sender + Extend the WAL sender statistics to report whether it uses LCRs + emitted by a Decoding Worker as well as the exact LCR file being + read from at the moment, if applicable. + +- Prevent CAMO to be used in combination with Decoding Worker (BDR-792) + These features cannot currently work in combination. This release + prevents enabling them both in many cases. This is just a + best-effort strategy to prevent mis-configuration. + +- Allow to specify a postgresql.auto.conf file for `bdr_init_physical` (RT72989, BDR-1400) + Add a command line argument to `bdr_init_physical` allowing to + provide a custom file to be used for `postgresql.auto.conf`. + +### Resolved Issues + +- Fix a potential data loss issue with bdr_init_physical (RT71888) + When reusing a slot name, previous state was not properly cleaned up + in all cases. This has caused potential data loss during physical + join as the slot is created ahead of time by `bdr_init_physical` + with the same name. The transition from physical to logical + replication could miss part of the replication stream, as this drops + and recreates the slot. This release properly cleans slot + information when dropped and thereby prevents data loss. + +- Fix `bdr.camo_local_mode_delay` to really kick in (BDR-1352) + This artificial delay allows throttling a CAMO node that is not + currently connected to its CAMO partner to prevent it from producing + transactions faster than the CAMO partner can possibly apply. In + previous versions, it did not properly kick in after + `bdr.global_commit_timeout` amount of lag, but only 1000 times + later (due to erroneously comparing seconds to milliseconds). + +- Prevent segfault in combination with third-party output plugins (BDR-1424, RT72006) + Adjust handling of logical WAL messages specific to BDR's Eager All + Node Replication mode for output plugins unrelated to BDR. This + allows for example Debezium's decoderbufs output plugin to work + alongside BDR. + +- Improve compatibility with Postgres 13 (BDR-1396) + Adjust to an API change in ReplicationSlotAcquire that may have led + to unintended blocking when non-blocking was requestend and vice + versa. This version of PGLogical eliminates this potential problem, + which has not been observed on production systems so far. + +- Fix serialization of Raft snapshots including commit decisions (CAMO, BDR-1454) + A possible mismatch in number of tuples could lead to serialization + or deserialization errors for a Raft snapshot taken after + transactions using CAMO or Eager All Node replication were used + recently and stored their commit decisions. + +- Fix `--recovery-conf` option in `bdr_init_physical` + +### Upgrades + +This release supports upgrading from following versions of BDR: + +- 3.7.9 and higher +- 3.6.27 + ## BDR 3.7.11 This is a maintenance release for BDR 3.7 which includes minor improvements diff --git a/product_docs/docs/bdr/3.7/security.mdx b/product_docs/docs/bdr/3.7/security.mdx index 3e866c61a17..b4d04120d7b 100644 --- a/product_docs/docs/bdr/3.7/security.mdx +++ b/product_docs/docs/bdr/3.7/security.mdx @@ -76,7 +76,7 @@ which users to copy across to the new node. PostgreSQL allows you to dump all users with the command: -```postgresql +```shell pg_dumpall --roles-only > roles.sql ``` @@ -290,51 +290,28 @@ EXECUTE privilege on EXECUTE privilege on - All functions for column_timestamps datatypes - - All functions for CRDT datatypes - - `bdr.alter_sequence_set_kind` - - `bdr.create_conflict_trigger` - - `bdr.create_transform_trigger` - - `bdr.drop_trigger` - - `bdr.get_configured_camo_partner_of` - - `bdr.get_configured_camo_origin_for` - - `bdr.global_lock_table` - - `bdr.is_camo_partner_connected` - - `bdr.is_camo_partner_ready` - - `bdr.logical_transaction_status` - - `bdr.ri_fkey_trigger` - - `bdr.seq_nextval` - - `bdr.seq_currval` - - `bdr.seq_lastval` - - `bdr.trigger_get_committs` - - `bdr.trigger_get_conflict_type` - - `bdr.trigger_get_origin_node_id` - - `bdr.trigger_get_row` - - `bdr.trigger_get_type` - - `bdr.trigger_get_xid` - - `bdr.wait_for_camo_partner_queue` - - `bdr.wait_slot_confirm_lsn` Note that many of the above functions have additional privileges diff --git a/product_docs/docs/bdr/3.7/sequences.mdx b/product_docs/docs/bdr/3.7/sequences.mdx index bcf1ccec430..3fee64ffbac 100644 --- a/product_docs/docs/bdr/3.7/sequences.mdx +++ b/product_docs/docs/bdr/3.7/sequences.mdx @@ -212,7 +212,7 @@ to the next value. ```sql -- determine highest sequence value across all nodes SELECT max((x->'response'->'command_tuples'->0->>'nextval')::bigint) -FROM json_array_elements( +FROM jsonb_array_elements( bdr.run_on_all_nodes( E'SELECT nextval(\'public.sequence\')' )) AS x; diff --git a/product_docs/docs/pglogical/3.7/configuration.mdx b/product_docs/docs/pglogical/3.7/configuration.mdx new file mode 100644 index 00000000000..65873cb66ff --- /dev/null +++ b/product_docs/docs/pglogical/3.7/configuration.mdx @@ -0,0 +1,300 @@ +--- +navTitle: PostgreSQL Configuration +title: PostgreSQL settings which affect pglogical +originalFilePath: configuration.md + +--- + +Several PostgreSQL configuration options may need adjusting for pglogical +to work. + +PostgreSQL must be configured for logical replication: + +``` +wal_level = 'logical' +``` + +The pglogical library need to be loaded at server start, so the parameter +`shared_preload_libraries` must contain pglogical, e.g.: + +``` +shared_preload_libraries = 'pglogical' +``` + +As pglogical uses additional worker processes to maintain state and apply +the replicated changes, enough worker process slots need to be present: + +``` +max_worker_processes = 10 +``` + +The formula for computing the correct value of `max_worker_processes` is: +one for instance + one per database on the provider (upstream), one for instance + one +per database + two per subscription on the subscriber (downstream). + +The replication slots and origins are used so enough slots for those need +to exist; both replication slots and origins are controlled by same +configuration option: + +``` +max_replication_slots = 10 +``` + +One per subscription on both provider and subscriber is needed. + +The replication data is sent using walsender (just like physical replication): + +``` +max_wal_senders = 10 +``` + +There is one walsender needed for every subscriber (on top of any standbys +or backup streaming connections). + +If you are using PostgreSQL 9.5+ (this won't work on 9.4) and want to handle +conflict resolution with last/first update wins (see [pglogical writer](pglogical-writer)), +you can add this additional option to postgresql.conf: + +``` +track_commit_timestamp = on +``` + +Also `pg_hba.conf` has to allow replication connections from the subscribers. + +## pglogical specific settings + +There are additional pglogical specific configuration options. Some generic +options are mentioned below, but most of the configuration options depend on +which [writer](subscriptions) is used and are documented as part of the +individual [writer](subscriptions) documentation. + +### `pglogical.synchronous_commit` + +This controls whether pglogical apply worker should use synchronous commit. By +default this is off. Turning it on has performance implications - the maximum +replication throughput will be much lower. However in low TPS environments which +use `synchronous_commit = remote_apply` on the provider, turning this option on +can improve the transaction latency. This guidance may change in later releases. + +The pglogical.synchronous_commit setting for a subscription determines what +happens to the things that the subscription's apply worker writes +locally. The subscription's apply worker operates much like a normal +client backend, and whatever it writes and commits is subject to its +current pglogical.synchronous_commit setting. + +In most cases, pglogical.synchronous_commit off is the best setting because it +avoids the flushing work at commit time, and it is safe because in case +of a crash the data can be re-obtained from the publishing server. + +But if you use synchronous replication on the publishing server, then +the publishing server will wait for the subscribing server to send +feedback messages when the sent data has been flushed to disk on the +subscribing server (depending on the particular setting). If the +subscriber has pglogical.synchronous_commit off, then the flushing happens at some +random later time, and then the upstream publisher has to wait for that +to happen. In order to speed that up, you need to make the subscriber +flush stuff faster, and the way to do that is to set pglogical.synchronous_commit +to a value other than off on the subscriber. + +Also if you have standbys connected to this subscriber server then you can set the +value of pglogical.synchronous_commit to wait for confirmation from its standbys. + +**NOTE** As per design, if on, this configuration will always wait for the local +flush confirmation, even if the `synchronous_standby_names` would point to any +physical standby/s. + +The default is off. + +### `pglogical.track_subscription_apply` + +This controls whether to track per subscription apply statistics. If this is on, the +`pglogical.stat_subscription` view will contain performance statistics for +each subscription which has received any data, otherwise the view is empty. + +Collecting statistics requires additional CPU resources on the subscriber. + +The default is on. + +### `pglogical.track_relation_apply` + +This controls whether to track per table apply statistics. If this is on, the +`pglogical.stat_relation` view will contain performance statistics for +each subscribed relation which has received any data, otherwise the view is +empty. + +Collecting statistics requires additional CPU resources on the subscriber. + +The default is off. + +### `pglogical.temp_directory` + +This defines system path for where to put temporary files needed for schema +synchronization. This path needs to exist and be writeable by users running +Postgres. + +The default is empty, which tells pglogical to use the default temporary +directory based on environment and operating system settings. + +### `pglogical.extra_connection_options` + +This option may be set to assign connection options that apply to all +connections made by pglogical. This can be a useful +place to set up custom keepalive options, etc. + +pglogical defaults to enabling TCP keepalives to ensure that it notices +when the upstream server disappears unexpectedly. To disable them, add +`keepalives = 0` to `pglogical.extra_connection_options`. + +### `pglogical.synchronize_failover_slot_names` + +This standby option allows setting which logical slots should be synchronized +to this physical standby. It's comma separated list of slot filters. + +Slot filter is defined as `key:value` pair (separated by colon) where `key` +can be one of: + +- `name` - specifies to match exact slot name +- `name_like` - specifies to match slot name against SQL `LIKE` expression +- `plugin` - specifies to match slot plugin name agains the value + +The `key` can be omitted and will default to `name` in that case. + +For example `'my_slot_name,plugin:pglogical_output,plugin:pglogical'` will +synchronize slot named "my_slot_name" and any pglogical slots. + +If this is set to empty string, no slots will be synchronized to this physical +standby. + +Default value is `'plugin:pglogical,plugin:pglogical_output'` meaning pglogical +slots will be synchronized. + +### `pglogical.synchronize_failover_slots_drop` + +This standby option controls what happens to extra slots on standby that are +not found on primary using pglogical.synchronize_failover_slot_names filter. +If it's set to true, they will be dropped, otherwise they will be kept. + +The default value is `true`. + +### `pglogical.synchronize_failover_slots_dsn` + +A standby option for specifying which connection string to use to connect to +primary when fetching slot information. + +If empty (and default) is to use same connection string as `primary_conninfo`. + +Note that `primary_conninfo` cannot be used if there is a `password` field in +the connection string because it gets obfuscated by PostgreSQL and pglogical +can't actually see the password. In this case the +`pglogical.synchronize_failover_slots_dsn` must be used. + +### `pglogical.standby_slot_names` + +This option is typically used in failover configurations to ensure that the +failover-candidate streaming physical replica(s) for this pglogical provider +have received and flushed all changes before they ever become visible to any +subscribers. That guarantees that a commit cannot vanish on failover to a +standby for the provider. + +Replication slots whose names are listed in the comma-separated +`pglogical.standby_slot_names` list are treated specially by the walsender +on a pglogical provider. + +pglogical's logical replication walsenders will ensure that all local changes +are sent and flushed to the replication slots in `pglogical.standby_slot_names` +before the provider sends those changes to any other pglogical replication +clients. Effectively it provides a synchronous replication barrier between the +named list of slots and all pglogical replication clients. + +Any replication slot may be listed in `pglogical.standby_slot_names`; both +logical and physical slots work, but it's generally used for physical slots. + +Without this safeguard, two anomalies are possible where a commit can be +received by a subscriber then vanish from the provider on failover because +the failover candidate hadn't received it yet: + +- For 1+ subscribers, the subscriber may have applied the change but the new + provider may execute new transactions that conflict with the received change, + as it never happened as far as the provider is concerned; + +and/or + +- For 2+ subscribers, at the time of failover, not all subscribers have applied + the change.The subscribers now have inconsistent and irreconcilable states + because the subscribers that didn't receive the commit have no way to get it + now. + +Setting `pglogical.standby_slot_names` will (by design) cause subscribers to +lag behind the provider if the provider's failover-candidate replica(s) are not +keeping up. Monitoring is thus essential. + +Note that this setting is generally not required for BDR3 nodes (which are +based on pglogical). Unlike base pglogical3, BDR3 is capable of reconciling +lost changes from surviving peer nodes. + +### `pglogical.standby_slots_min_confirmed` + +Controls how many of the `pglogical.standby_slot_names` have to confirm before +we send data to pglogical subscribers. + +### `pglogical.writer_input_queue_size` + +This option is used to specify the size of the shared memory queue used +by the receiver to send data to the writer process. If the writer process is +stalled or making slow progress, then the queue might get filled up, stalling +the receiver process too. So it's important to provide enough shared memory for +this queue. The default is 1MB and the maximum allowed size is 1GB. While any +storage size specifier can be used to set the GUC, the default is kB. + +### `pglogical.writer_output_queue_size` + +This option is used to specify the size of the shared memory queue used +by the receiver to receive data from the writer process. Since the writer is +not expected to send a large amount of data, a relatively smaller sized queue +should be enough. The default is 32kB and the maximum allowed size is 1MB. +While any storage size specifier can be used to set the GUC, the default is +kB. + +### `pglogical.min_worker_backoff_delay` + +Rate limit pglogical background worker launches by preventing a given worker +from being relaunched more often than every +`pglogical.min_worker_backoff_delay` milliseconds. Time-unit suffixes are +supported. + +The default is 0, meaning no rate limit. The delay is a time limit applied from +launch-to-launch, so a value of `'500ms'` would limit all types of workers to +at most 2 (re)launches per second. + +If the backoff delay setting is changed and the PostgreSQL configuration is +reloaded then all current backoff waits will be reset. Additionally, the +`pglogical.worker_task_reset_backoff_all()` function is provided to allow the +administrator to force all backoff intervals to immediately expire. + +A tracking table in shared memory is maintained to remember the last launch +time of each type of worker. This tracking table is not persistent; it is +cleared by PostgreSQL restarts, including soft-restarts during crash recovery +after an unclean backend exit. + +The view [`pglogical.worker_tasks`](troubleshooting#pglogical.worker_tasks) +may be used to inspect this state so the administrator can see any backoff +rate-limiting currently in effect. + +For rate limiting purposes, workers are classified by "task". This key consists +of the worker role, database oid, subscription id, subscription writer id, +extension library name and function name, extension-supplied worker name, and +the remote relation id for sync writers. `NULL` is used where a given +classifier does not apply, e.g. manager workers don't have a subscription ID +and receivers don't have a writer id. + +### `pglogical.max_writers_per_subscription` + +Specifies the maximum number of parallel writers that a subscription may use. +Values between 1 and 64 are allowed, with the default being 8. When set to 1, +parallel apply is effectively disabled. + +### `pglogical.writers_per_subscription` + +Sets the default number of parallel writers for subscriptions without an +explicitly set value. Values between 1 and 64 are allowed, with the default +being 4. diff --git a/product_docs/docs/pglogical/3.7/credits.mdx b/product_docs/docs/pglogical/3.7/credits.mdx new file mode 100644 index 00000000000..4fb8722351a --- /dev/null +++ b/product_docs/docs/pglogical/3.7/credits.mdx @@ -0,0 +1,21 @@ +--- +navTitle: Credits and License +title: Credits and Licence +originalFilePath: credits.md + +--- + +pglogical has been designed, developed and tested by this team: + +- Petr Jelinek +- Craig Ringer +- Simon Riggs +- Peter Eisentraut +- Tomas Vondra +- Pallavi Sontakke +- Nikhil Sontakke +- Pavan Deolasee +- Umair Shahid +- Markus Wanner + +Copyright (c) 2021 EnterpriseDB UK Ltd diff --git a/product_docs/docs/pglogical/3.7/ddl.mdx b/product_docs/docs/pglogical/3.7/ddl.mdx new file mode 100644 index 00000000000..018066ef432 --- /dev/null +++ b/product_docs/docs/pglogical/3.7/ddl.mdx @@ -0,0 +1,263 @@ +--- +title: DDL Replication +originalFilePath: ddl.md + +--- + +pglogical3 supports *transparent DDL replication*, where schema change commands +run on provider node(s) are automatically captured and replicated to subscriber +node(s) without the need for wrapper functions like +`pglogical.replicate_ddl_command` or external schema management tools. + +The main difference from normal replication of table rows ("table replication") +is that DDL replication replicates statements themselves rather than the +effects of those statements. Normal data replication replicates the changes +made by a statement, e.g. it sends the rows that got `UPDATE`d by an `UPDATE` +command rather than replicating and executing the `UPDATE` statement itself. +pglogical's DDL replication captures, replicates and executes *the text of the +DDL statement itself*. + +## Minimal example + +Enabling DDL replication on a pglogical provider can be as simple as: + +``` +SELECT * FROM pglogical.replication_set_add_ddl('ddl_sql', 'all', NULL, NULL); +``` + +to replicate any captureable DDL statements executed by any user on the +provider database to any subscriber(s) that subscribe to the enabled-by-default +`ddl_sql` replication set. + +However it's generally *recommended* to enable replication of a targeted subset +of DDL instead. + +There are also caveats relating to replication of changes to "global objects" +like roles, the handling of some `ALTER TABLE` modes, etc that are important to +understand. See ["Restrictions"](#Restrictions) below. + +## How to use DDL replication + +Transparent DDL replication in pglogical builds on the same [Replication +Sets](replication-sets) model that's used by replication of table contents. +The same replication set(s) may be used for both replicating table contents +and for DDL. + +To replicate future DDL commands on a provider, a DDL replication filter must +be added to the replication set(s) used by subscribers that should receive +and apply the DDL. + +The DDL filter can specify a `command_tag` and `role_name` to allow +replication of only some DDL statements. The `command_tag` is same as those +used by [EVENT TRIGGERs](https://www.postgresql.org/docs/current/static/event-trigger-matrix.html). +The `role_name` is used for matching against the current role which is executing the +command. Both `command_tag` and `role_name` are evaluated as regular expressions +which are case sensitive. + +## Functions for managing DDL replication filters + +The following functions are provided for managing DDL replication filters using +replication sets: + +### pglogical.replication_set_add_ddl + +Adds a DDL replication filter to a replication set. + +#### Synopsis + +```postgresql +pglogical.replication_set_add_ddl ( + set_name name, + ddl_filter_name text, + command_tag text, + role_name text +) +``` + +#### Parameters + +- `set_name` - name of the existing replication set +- `ddl_filter_name` - name of the new DDL replication filter +- `command_tag` - regular expression for matching command tags +- `role_name` - regular expression for matching role name + +The `command_tag` and `role_name` parameters can be set to `NULL` in which case +they will match any command tag or role respectively. They are both regular +expressions, so you can use patterns like `'CREATE.*'` or `'(CREATE|DROP).*'`. + +The target object identity (oid, name, etc) are not exposed, so you cannot +filter on them. + +### pglogical.replication_set_remove_ddl + +Remove a DDL replication filter from replication set. + +#### Synopsis + +```postgresql +pglogical.replication_set_remove_ddl(set_name name, ddl_filter_name text) +``` + +#### Parameters + +- `set_name` - name of the existing replication set +- `ddl_filter_name` - name of the DDL replication filter to be removed from the set + +## Additional functions and views + +## `pglogical.ddl_replication` + +This view lists ddl replication configuration as set up by current ddl_filters. + +### `pglogical.ddl_replication` Columns + +| Name | Type | Description | +| ------------ | ---- | ------------------------------------------------------------ | +| set_ddl_name | name | Name of DDL filter | +| set_ddl_tag | text | Which command tags it applies to (regular expression) | +| set_ddl_role | text | Which roles it applies to (regular expression) | +| set_name | name | Name of the replication set for which this filter is defined | + +## `pglogical.ddl_replication` + +This view lists ddl replication configuration as set up by current ddl_filters. + +### `pglogical.ddl_replication` Columns + +| Name | Type | Description | +| ------------ | ---- | ------------------------------------------------------------ | +| set_ddl_name | name | Name of DDL filter | +| set_ddl_tag | text | Which command tags it applies to (regular expression) | +| set_ddl_role | text | Which roles it applies to (regular expression) | +| set_name | name | Name of the replication set for which this filter is defined | + +### pglogical.replicate_ddl_command + +This function can be used to explicitly replicate a command as-is using the +specified set of replication sets. The command will also be executed locally. + +In most cases `pglogical.replicate_ddl_command` is rendered obsolete by +pglogical's support for DDL replication filters. + +#### Synopsis + +``` +pglogical.replicate_ddl_command(command text, replication_sets text[])` +``` + +#### Parameters + +- `command` - DDL query to execute +- `replication_sets` - array of replication sets which this command should be + associated with; default "{ddl_sql}" + +## Restrictions + +When the DDL replication filter matches a DDL command it will modify the +`search_path` configuration parameter used to execute the DDL on both provider +and subscriber(s) to include only `pg_catalog` i.e. the system catalogs. This +means that all the user objects referenced in the query must be fully schema +qualified. For example `CREATE TABLE foo...` will raise an error when +executed and has to be rewritten as `CREATE TABLE public.foo...`. + +DDL that matches the DDL replication filter and does not comply with this +requirement will fail with an error like this: + +``` +ERROR: no schema has been selected to create in +STATEMENT: CREATE TABLE foo (id integer); +``` + +or will raise an `ERROR` message complaining that data types, tables, etc +referenced by the DDL statement do not exist even when they can be queried +normally, are shown by `psql`, etc. + +For example, attempting to drop some table `public.a` will fail: + +``` +ERROR: table "a" does not exist +STATEMENT: DROP TABLE a; +``` + +and must be reframed as: + +``` +DROP TABLE public.a; +``` + +The same restriction applies to any command executed using the +`pglogical.replicate_ddl_command` function. The function call has the +additional restriction that it cannot execute special commands which need to be +run outside of a transaction. Most notably `CREATE INDEX CONCURRENTLY` will +fail if run using `pglogical.replicate_ddl_command` but will work via DDL +replication sets. + +For testing purposes it can be useful to simulate the behaviour of +DDL replication capture manually in psql. To do so, set the `search_path` to +the empty string, e.g. + +``` +BEGIN; +SET LOCAL search_path = ''; +CREATE TABLE mytable(id integer); +COMMIT; +``` + +will fail with `ERROR: no schema selected to create in`. + +## Considerations with global objects + +Because PostgreSQL has objects that exist within one database, objects shared +by all databases, and objects that exist outside the catalogs, some care is +required when you may potentially replicate a subset of DDL or replicate DDL +from more than one database: + +- pglogical can capture and replicate DDL that affects global objects like + roles, users, groups, etc, but only if the commands are run in a database + with pglogical ddl replication enabled. So it's easy to get into inconsistent + states if you do something like `CREATE ROLE` in the `postgres` db then + `ALTER ROLE` in the `my_pglogical_enabled`. The resulting captured DDL may + not apply on the downstream, requiring a transaction to be skipped over or + non-replicated DDL to be run on the downstream to create the object that's + targeted by the replicated DDL. + +- pglogical can also capture and replicate DDL that references global objects + that may not exist on the other node(s), such as tablespaces and users/roles. + So an `ALTER TABLE ... OWNER TO ...` can fail to apply if the role, a global + object, does not exist on the downstream. You may have to create a dummy + global object on the downstream or if absolutely necessary, skip some changes + from the stream. + +- DDL that references local paths like tablespaces may fail to apply on the other + end if paths differ. + +In general you should run all your DDL via your pglogical-enabled database, and +ensure that all global objects *exist* on the provider and all subscribers. This +may require the creation of dummy roles, dummy tablespaces, etc. + +## `pglogical.tables` + +This view lists information about table membership in replication sets. +If a table exists in multiple replication sets it will appear multiple times +in this table. + +### `pglogical.tables` Columns + +| Name | Type | Description | +| ----------- | ------- | ---------------------------------------------------- | +| relid | oid | The OID of the relation | +| nspname | name | Name of the schema relation is in | +| relname | name | Name of the relation | +| set_name | name | Name of the replication set | +| set_ops | text\[] | List of replicated operations | +| rel_columns | text\[] | List of replicated columns (NULL = all columns) (\*) | +| row_filter | text | Row filtering expression | + +## `pglogical.queue` + +DDL can also be queued up with a message to state the replication information. +This can be seen in ascending order, on this view. + +## `pglogical.queue_truncate` + +A function that erase's all the logging information of the view. diff --git a/product_docs/docs/pglogical/3.7/failover.mdx b/product_docs/docs/pglogical/3.7/failover.mdx new file mode 100644 index 00000000000..d70b9898cbb --- /dev/null +++ b/product_docs/docs/pglogical/3.7/failover.mdx @@ -0,0 +1,214 @@ +--- +navTitle: Failover +title: Failover with pglogical3 +originalFilePath: failover.md + +--- + +pglogical has support for following failover of both the provider (logical +master) and subscriber (logical replica) if the conditions described in the +following sections are met. + +Only failover to streaming physical replicas is supported. pglogical subscribers +cannot switch from replication from the provider to replicating from another +peer subscriber. + +## Provider failover setup + +*With appropriate configuration of the provider and the provider's physical +standby(s)*, pglogical subscriber(s) can follow failover of the provider to a +promoted physical streaming replica of the provider. + +Given a topology like this: + +``` +[Provider1] -------------> [Provider2] + | ^ + | | physical + | |-streaming + | replication + | (specific configuration + | required) + | + | + | <- logical + | replication + | + | +[Subscriber1] +``` + +On failure of Provider1 and promotion of Provider2 to replace it, pglogical on +Subscriber1 can consistently follow the failover and promotion if: + +- Provider1 and Provider2 run PostgreSQL 10 or newer +- The connection between Provider1 and Provider2 uses streaming replication + with hot standby feedback and a physical replication slot. It's OK if WAL + archiving and a `restore_command` is configured as a fallback. +- Provider2 has: + - `recovery.conf`: + - `primary_conninfo` pointing to Provider1 + - `primary_slot_name` naming a physical replication slot on Provider1 to be + used only by Provider2 + - `postgresql.conf`: + - `pglogical` in its `shared_preload_libraries` + - `hot_standby = on` + - `hot_standby_feedback = on` + - [`pglogical.synchronize_failover_slot_names`](configuration#pglogical-synchronize-failover-slot-names) + can be modified to specify which slots should be synchronized (default is all pglogical/bdr slots) +- Provider1 has: + - `postgresql.conf`: + - [`pglogical.standby_slot_names`](configuration#pglogical-standby-slot-names) + lists the physical replication slot used for Provider2's `primary_slot_name`. + Promotion will still work if this is not set, but subscribers may be inconsistent + per the linked documentation on the setting. +- Provider2 has had time to sync and has created a copy of Subscriber1's + logical replication slot. pglogical3 creates master slots on replicas + automatically once the replica's resource reservations can satisfy the master + slot's requirements, so just check that all pglogical slots on the master exist + on the standby, and have `confirmed_flush_lsn` set. +- Provider2 takes over Provider1's IP address or hostname *or* Subscriber1's + existing subscription is reconfigured to connect to Provider2 using + `pglogical.alter_node_add_interface` and `pglogical.alter_subscription_interface`. + +It is not necessary for Subscriber1 to be aware of or able to connect to +Provider2 until it is promoted. + +The post-failover topology is: + +``` +XXXXXXXXXXX +xProvider1x [Provider2] +XXXXXXXXXXX ^ + | + | + | + | + |------------------------------ + | + | <- logical + | replication + | + | +[Subscriber1] +``` + +The reason pglogical must run on the provider's replica, and the provider's +replica must use a physical replication slot, is due to limitations in +PostgreSQL itself. + +Normally when a PostgreSQL instance is replaced by a promoted physical replica +of the same instance, any replication slots on that node are lost. Replication +slot status is not itself replicated along physical replication connections and +does not appear in WAL. So if the failed-and-replaced node was the upstream +provider of any logical subscribers, those subscribers stop being able to +receive data and cannot recover. Physical failover breaks logical replication +connections. + +To work around this, pglogical3 running on the failover-candidate replica syncs +the state of the master provider's logical replication slot(s) to the replica. +It also sends information back to the master to ensure that those slots +guarantees' (like `catalog_xmin`) are respected by the master. That +synchronization requires a physical replication slot to avoid creating +excessive master bloat and to ensure the reservation is respected by the master +even if the replication connection is broken. + +## Subscriber failover setup + +pglogical automatically follows failover of a subscriber to a streaming physical +replica of the subscriber. No additional configuration is required. + +**WARNING**: At present it's possible for the promoted subscriber to lose some +transactions that were committed on the failed subscriber and confirmed-flushed +to the provider, but not yet replicated to the new subscriber at the time of +promotion. That's because the provider will silently start replication at the +greater of the position the subscriber sends from its replication origin and +the position the master has recorded in its slot's `confirmed_flush_lsn`. + +Where possible you should execute a planned failover by stopping the +subscription on Subscriber1 and waiting until Subscriber2 is caught up to +Subscriber1 before failing over. + +Given the server topology: + +``` +[Provider1] + | + | + | <- logical + | replication + | + | + | +[Subscriber1]------------> [Subscriber2] + ^ + | physical + |-streaming + replication +``` + +Upon promotion of Subscriber2 to replace a failed Subscriber1, logical +replication will resume normally. It doesn't matter whether Subscriber2 has the +same IP address or not. + +For replication to resume promptly it may be necessary to explicitly terminate +the walsender for Subscriber1 on Provider1 if the connection failure is not +detected promptly by Provider1. pglogical enables TCP keepalives by default so +in the absence of manual action it should exit and release the slot +automatically in a few minutes. + +It is important that Subscriber1 be fenced or otherwise conclusively terminated +before Subscriber2 is promoted. Otherwise Subscriber1 can interfere with +Subscriber2's replication progress tracking on Provider1 and create gaps in the +replication stream. + +After failover the topology is: + +``` +[Provider1] + | + | + | <- logical + | replication + | + |------------------------------- + | + | +XXXXXXXXXXXXXXX | +X[Subscriber1]X [Subscriber2] +XXXXXXXXXXXXXXX +``` + +Note: at this time it is possible that there can be a small window of +replicated data loss around the window of failover. pglogical on Subscriber1 +may send confirmation of receipt of data to Provider1 before ensuring that +Subscriber2 has received and flushed that data. + +## Additional functions + +### pglogical.sync_failover_slots() + +Signal the supervisor to restart the mecanism to synchronize the failover +slots specifyed in the [`pglogical.synchronize_failover_slot_names`](configuration#pglogical-synchronize-failover-slot-names) + +#### Synopsis + + pglogical.syncfailover_slots(); + +This function should be run on the subscriber. + +## Legacy: Provider failover with pglogical2 using failover slots + +An earlier effort to support failover of logical replication used the "failover +slots" patch to PostgreSQL 9.6. This patch is carried in 2ndQPostgres 9.6 +(only), but did not get merged into any community PostgreSQL version. pglogical2 +supports using 2ndQPostgres and failover slots to follow provider failover. + +The failover slots patch is neither required nor supported by pglogical3. +pglogical3 only supports provider failover on PostgreSQL 10 or newer, since +that is the first PostgreSQL version that contains support for sending +`catalog_xmin` in hot standby feedback and for logical decoding to follow +timeline switches. + +This section is retained to explain the change in failover models and reduce +any confusion that may arise when updating from pglogical2 to pglogical3. diff --git a/product_docs/docs/pglogical/3.7/index.mdx b/product_docs/docs/pglogical/3.7/index.mdx new file mode 100644 index 00000000000..f8a762a18f7 --- /dev/null +++ b/product_docs/docs/pglogical/3.7/index.mdx @@ -0,0 +1,53 @@ +--- +navigation: + - index + - nodes + - replication-sets + - ddl + - subscriptions + - configuration + - failover + - restrictions + - troubleshooting + - credits + - release-notes +navTitle: pglogical +title: pglogical 3 +originalFilePath: index.md + +--- + +The pglogical 3 extension provides logical streaming replication for PostgreSQL, +using a publish/subscribe model. It is based on technology developed as part +of the BDR3 project. + +We use the following terms to describe data streams between nodes: + +- Nodes - PostgreSQL database instances +- Providers and Subscribers - roles taken by Nodes +- Replication Set - a collection of tables + +These terms have been deliberately reused from the earlier Slony technology. + +pglogical is new technology utilizing the latest in-core features, so we +require both provider and subscriber nodes to run PostgreSQL 10 or newer. + +Use cases supported are: + +- Upgrades between major versions (given the above restrictions) +- Full database replication +- Selective replication of sets of tables using replication sets +- Selective replication of table rows at either provider or subscriber side (row_filter) +- Selective replication of table columns at provider side +- Data gather/merge from multiple upstream servers + +Architectural details: + +- pglogical works on a per-database level, not whole server level like + physical streaming replication +- One Provider may feed multiple Subscribers without incurring additional disk + write overhead +- One Subscriber can merge changes from several origins and detect conflict + between changes with automatic and configurable conflict resolution (some, + but not all aspects required for multi-master). +- Cascading replication is implemented in the form of changeset forwarding. diff --git a/product_docs/docs/pglogical/3.7/nodes.mdx b/product_docs/docs/pglogical/3.7/nodes.mdx new file mode 100644 index 00000000000..b422026deca --- /dev/null +++ b/product_docs/docs/pglogical/3.7/nodes.mdx @@ -0,0 +1,192 @@ +--- +title: Nodes +originalFilePath: nodes.md + +--- + +Each database that participates in pglogical replication must be represented +by its own node. Each node must have a name that is unique amongst all nodes +that will interact with and communicate with each other. + +A pglogical node associates an operator-supplied node name with a connection +string that pglogical subscriber nodes may use to communicate with the node. +It also serves to mark a database as pglogical-enabled and as something with +which to associate other pglogical state like replication sets or +subscriptions. + +Creating a pglogical node in an existing PostgreSQL databse using the +[`pglogical.create_node`](#pglogical_create_node) function causes pglogical to +launch a manager process for the node but doesn't cause any sort of replication +to begin. + +A node can be set up as a pglogical provider by having replication sets created +and/or tables and DDL filters added to the default replication set. A node can +be set up as a pglogical subscriber by having a subscription created on it +after the node is created. Any given node can be a provider, a subscriber, or +both; nothing in node creation marks the node as one or the other. + +## Setting up a node as a provider + +Newly created pglogical nodes have empty replication sets and do not have any +DDL replication filters enabled. + +[Replication sets](replication-sets) specify what data from which tables and +sequences should be sent to any subscriptions that use this node as a provider. + +[DDL replication filters](ddl) capture and replicate schema changes to +subscribers so that subscribers' definitions of tables, types, etc remain in +sync with the provider. + +The simplest way to set up a new node as a provider is to re-create the +'default' replication set with all existing and future tables and sequences +auto-added to it. Then add a DDL replication filter to it to enable automatic +schema replication. For example: + +```postgresql +SELECT pglogical.create_node('mynode'); +SELECT pglogical.drop_replication_set('default'); +SELECT pglogical.create_replication_set('default', autoadd_tables := true, autoadd_sequences := true, autoadd_existing := true); +SELECT pglogical.replication_set_add_ddl('default', 'all_ddl', '.*', NULL); +``` + +A replication set drop and create is used here because +`pglogical.alter_replication_set` cannot autoadd existing tables. + +## Setting up a node as a subscriber + +Newly created nodes do not have any subscriptions so they won't receive +changes from anywhere. Use +[`pglogical.create_subscription(...)`](subscriptions#pglogical_create_subscription) +to populate a node with initial data and start receiving streams of changes. Or use +the separate [`pglogical_create_subscriber`](subscriptions#pglogical_create_subscriber) +to create a node and subscription from a basebackup or physical streaming replica. + +Creating subscriber nodes is discussed in detail in ["Subscriptions"](subscriptions). + +## Node information + +### pglogical.local_node + +A view containing node information but only for the +local node. + +## `pglogical.node` + +This table lists all PGL nodes. + +### `pglogical.node` Columns + +| Name | Type | Description | +| --------- | ---- | ---------------- | +| node_id | oid | Id of the node | +| node_name | name | Name of the node | + +### `pglogical.node_interface` + +This is a view that elaborates the information in `pglogical.node`, +showing the DSN and node interface information. + +### `pglogical.node_interface` Columns + +| Name | Type | Description | +| --------- | ---- | ------------------------------------- | +| if_id | oid | Node Interface ID | +| if_name | name | Name of the node the interface is for | +| if_nodeid | oid | ID of the node | +| if_dsn | text | DSN of the node | + +## Node management + +Nodes can be added and removed dynamically using SQL calls to functions +provided by pglogical. + +### pglogical.create_node + +Creates a pglogical node. Only one pglogical node may exist on a given +PostgreSQL database. There's nothing special to mark a node as a provider +or subscriber - one node may be either, or both. + +#### Synopsis + +```postgresql +pglogical.create_node(node_name name, dsn text) +``` + +#### Parameters + +- `node_name` - name of the new node; only one node is allowed per database +- `dsn` - connection string to the node. For nodes that are supposed to be + providers; this should be reachable from outside + +### pglogical.drop_node + +Removes the node. + +#### Synopsis + +```postgresql +pglogical.drop_node(node_name name, ifexists bool) +``` + +#### Parameters + +- `node_name` - name of an existing node +- `ifexists` - if true, error is not thrown when subscription does not exist; + default is false + +### pglogical.alter_node_add_interface + +Adds an interface to a node. + +When a node is created, the interface for it is also created with the `dsn` +specified in the `create_node` and with the same name as the node. + +If the address or connection options required for a subscriber to connect to a +provider change, create a new interface *for* the provider *on* the subscriber, +then associate it with the subscription so that the subscriber switches to the +new connection string. For example: + +```postgresql +SELECT pglogical.alter_node_add_interface('providername', 'providername_newconnname', 'connstr'); +SELECT pglogical.alter_subscription_interface('subscription_name', 'providername_newconnname'); +``` + +At this time creating a new interface on the provider side has no effect on any +subscribers. Subscribers use the connection string specified at subscription +creation time, not the connection string declared for a node. + +#### Synopsis + +```postgresql +pglogical.alter_node_add_interface ( + node_name name, + interface_name name, + dsn text +) +``` + +When a node is created, the interface for it is also created with the `dsn` +specified in the `create_node` and with the same name as the node. This +interface allows adding alternative interfaces with different connection +strings to an existing node. + +#### Parameters + +- `node_name` - name of an existing node +- `interface_name` - name of a new interface to be added +- `dsn` - connection string to the node used for the new interface + +### pglogical.alter_node_drop_interface + +Remove an existing named interface from a node. + +#### Synopsis + +```postgresql +pglogical.alter_node_drop_interface(node_name name, interface_name name) +``` + +#### Parameters + +- `node_name` - name of an existing node +- `interface_name` - name of an existing interface diff --git a/product_docs/docs/pglogical/3.7/release-notes.mdx b/product_docs/docs/pglogical/3.7/release-notes.mdx new file mode 100644 index 00000000000..4a10fa09e49 --- /dev/null +++ b/product_docs/docs/pglogical/3.7/release-notes.mdx @@ -0,0 +1,1069 @@ +--- +navTitle: 'Appendix A: Release Notes' +title: 'Appendix A: Release Notes for pglogical3' +originalFilePath: release-notes.md + +--- + +## pglogical 3.7.12 + +This is a maintenance release for pglogical 3.7 which includes minor +improvements as well as fixes for issues identified previously. + +### Improvements + +- Add replication status monitoring (BDR-865) + Track the connection establishment and drops, the number of + transactions committed as well as the restart LSN to detect repeated + restarts and diagnose stalled replication. + +- Improve performance when used in combination with synchronous replication (BDR-1398) + Override `synchronous_commit` to `local'` for all PGLogical + processes performing only bookkeeping transactions which do not need + to be replicated. Only the PGLogical writer processes applying user + transactions need to follow the `synchronous_commit` mode configured + for synchronous replication. + +- Internal improvements and additional hooks to better support BDR + +### Resolved Issues + +- Performance improvements for Decoding Worker (BDR-1311, BDR-1357) + Speed up lookups of the WAL decoder worker, reduce delays after + reaching up to the LSN proviously known to be decoded by the + Decoding Worker, and reduce number of system calls when writing one + LCR chunk to an LCR segment file. + +- Improve compatibility with Postgres 13 (BDR-1396) + Adjust to an API change in ReplicationSlotAcquire that may have led + to unintended blocking when non-blocking was requestend and vice + versa. This version of PGLogical eliminates this potential problem, + which has not been observed on production systems so far. + +### Upgrades + +This release supports upgrading from following versions of pglogical: + +- 3.7.9 and higher +- 3.6.27 +- 2.4.0 + +## pglogical 3.7.11 + +This is a maintenance release for pglogical 3.7 which includes minor +improvements as well as fixes for issues identified previously. + +### Resolved Issues + +- Add protection against malformed parameter values in pgl output plugin + This fixes potential crash when some parameters sent to the output plugin + were malformed. + +- Get copy of slot tuple when logging conflict (BDR-734) + Otherwise we could materialize the row early causing wrong update in presence + of additional columns on the downstream. + +- Use a separate memory context for processing LCRs (BDR-1237, RT72165) + This fixes memory leak when using the decoding worker feature of BDR. + +- Truncate LCR segment file after recovery (BDR-1236, BDR-1259) + This fixes memory errors reported by the decoding worker after crash. + +- Ensure `pg_read_and_filter_lcr` will exit when postmaster dies + (BDR-1226, BDR-1209, RT72083) + Solves issues with hanging decoder worker on shutdown. + +- Fix memory leak in the pglogical COPY handler (BDR-1219, RT72091) + This fixes memory leak when synchronizing large tables. + +- Allow binary and internal protocol on more hardware combinations. This + currently only affects internal testing. + +### Upgrades + +This release supports upgrading from following versions of pglogical: + +- 3.7.9 and higher +- 3.6.27 +- 2.4.0 + +## pglogical 3.7.10 + +This is a maintenance release for pglogical 3.7 which includes minor +improvements as well as fixes for issues identified previously. + +### Improvements + +- Windows support improvements (BDR-976, BDR-1083) + +### Resolved Issues + +- Fix potential crash during cleanup of bulk copy replication (BDR-1168) + +- Fix issues in generic WAL message handling when the WAL message was produced + by something other than pglogical (BDR-670) + +- Redefine `werr_age` of `pglogical.worker_error_summary` to report correct age + +- Only use key attributes of covering unique index when used as replica identity + This only affects what is being sent over network, no logic change. + +## pglogical 3.7.9 + +This is a maintenance release for pglogical 3.7 which includes minor +improvements as well as fixes for issues identified previously. + +### Improvements + +- Support two-phase commit transaction with Decoding Worker (BDR-811) + + A two-phase commit transaction is completely decoded and sent downstream when + processing its PREPARE WAL record. COMMIT/ROLLBACK PREPARED is replicated + separately when processing the corresponding WAL record. + +- Reduce writes to `pg_replication_origin` when using parallel apply (RT71077) + Previously, especially on EPAS pglogical could produce thousands of dead rows + in `pg_replication_origin` system catalog if it had connection problems to + upstream. + +## Resolved Issues + +- Fix flush queue truncation (BDR-890) + During queue compaction flush to correct LSN instead of always truncating + whole flush queue. + +- Fix the pglogical.worker_error columns `werr_worker_index` and `werr_time` + Used to report wrong values. + +- Fix snapshot handling in internal executor usage (BDR-904) + Row filtering didn't correctly pushed snapshot in some situations. + + Caught thanks to upcoming change in PostgreSQL that double checks for this. + +- Fix handling of domains over arrays and composite types (BDR-29) + The replication protocol previously could not always handle columns using + domains over arrays and domains over composite types. + +## pglogical 3.7.8 + +This is a first stable release of the pglogical 3.7. + +pglogical 3.7 is a major release of pglogical. This release includes +major new features as well as smaller enhancements. + +Upgrades are supported from pglogical 3.6.25 and 3.7.7 in this release. + +### Improvements + +- Allow parallel apply on EDB Advanced Server (EE) + +### Resolved Issues + +- Fix divergence after physical failover (BDR-365, RT68894 and RM19886) + Make sure that we don't report back LSN on subscriber that + hasn't been received by named standbys (pglogical.standby_slot_names). + + This will ensure provider side keeps slot position behind + far enough so that if subscriber is replaced by one of said + named standbys, the standby will be able to fetch any missing + replication stream from the original provider. + +- Fix crash when ERROR happened during fast shutdown of pglogical writer + +- Don't re-enter worker error handling loop recursively (BDR-667) + This should help make what happens clearer in any cases where we do + encounter errors during error processing. + +- Assign collation to the index scan key (BDR-561) + When doing lookups for INSERT/UPDATE/DELETE, either to find conflicts + or key for the operation to be applied, we should use correct collation. + + This fixes index lookups for index on textual fields on Postgres 12+. + +- Use `name` data type for known fixed length fields (BDR-561) + This solves potential index collation issues with pglogical catalogs. + +- Progress WAL sender's slot based on WAL decoder input (BDR-567) + Without slot progression the server will eventually stop working. + +- Fix begin of transaction write when LCR file does not have enough space (BDR-606) + +- Restart decoding a transaction that was not completed in single decoding worker (BDR-247) + If we crashed during a long transaction that spawns on more then one lcr file + we start decoding again from the lsn of the beginning of the transaction and + find the last lcr file where we can write into. + +- Generate temp slot with correct length in subscription sync + Otherwise the name of the slot might be shortened by Postgres leading to + confusion on the slot cleanup. + +- Improve detection of mixing temporary and nontemporary objects in DDLs (BDR-485) + These can break replication so it's important to not allow them. + +- Fix pre-commit message handling in Eager replication (BDR-492) + +- Override GUCs in all pglogical workers, not just in writers. + +## pglogical 3.7.7 + +This is a beta release of the pglogical 3.7. + +pglogical 3.7 is a major release of pglogical. This release includes +major new features as well as smaller enhancements. + +Upgrades are supported from pglogical 3.6.25 and 3.7.6 in this release. + +### Improvements + +- Use parallel apply during initial sync during logical join + +- Add worker process index to the worker_error catalog (BDR-229) + The column `werr_worker_index` in worker_error table keeps track + of the writers for the same subscription. + +- Various improvements for WAL decoder/sender coordination (BDR-232, BDR-335) + +- Name LCR segment similar to XLOG segments (BDR-236, BDR-253, BDR-321, BDR-322) + An LCR segment file is identified by five 8-digit hex numbers. + Timeline first group, XLOG the next two groups and file number the last + two groups. + +### Resolved Issues + +- Restrict adding queue table to the replication set. (EDBAS, EBD-45) + +- Fix deadlock between receiver and writer during queue flush (BDR-483) + +- Force and wait for all writers to exit when one writer dies (BDR-229) + +- Name LCR directory after the replication slot (BDR-60) + Logical output plugin may be used by multiple replication slots. Store + the LCRs from a given replication slot in a directory named after that + replication slot to avoid mixing LCRs for different slots. + +- Fix EXPLAIN...INTO TABLE replication issue (EBC-46) + +## pglogical 3.7.6 + +This is a beta release of the pglogical 3.7. + +pglogical 3.7 is a major release of pglogical. This release includes +major new features as well as smaller enhancements. + +Upgrades are supported from pglogical 3.6.25 in this release. + +### Improvements + +- Enable parallel apply for CAMO and Eager (RM17858) + +- Improve error handling in Eager/CAMO, especially with parallel apply + (BDR-106) + +- Reduce severity of Eager/CAMO feedback errors + +- Add infrastructure necessary for allowing separation of WAL decoding from + WalSender process in BDR (RM18868, BDR-51, BDR-60) + +### Resolved Issues + +- Improve relcache invalidation handling in heap writer + This should solve missed invalidations after opening table for DML apply. + +- Wait for the writer that has XID assigned rather then one that doesn't + (BDR-137) + This fixes deadlock in parallel apply when there is a mix of empty and + non-empty transactions where the non-empty ones conflict. + +- Correct writer state tracking for Eager/CAMO (BDR-107) + +- Correct and improve CAMO misconfiguration handling (BDR-105) + Properly abort the transaction in case of sync CAMO, so the internal + state of the PGL writer is cleared + +- Fix transaction state tracking for CAMO/Eager + Do not abort the transaction at pre-commit time for non-CAMO nodes, but + keep it open until the final commit. Adjust the transaction state + tracking accordingly + +- Fix MERGE handling in 2ndQPostgres 12 and 13 + We'd before allow the MERGE command on replicated tables without appropriate + REPLICA IDENTITY which could result in broken replication. + +- Fix CAMO feedback sending (RM17858) + Fixes stalls in CAMO feedback, improving performance compared to previous + 3.7 beta releases. This is especially visible with parallel apply enabled. + +## pglogical 3.7.5 + +This is a beta release of the pglogical 3.7. + +pglogical 3.7 is a major release of pglogical. This release includes +major new features as well as smaller enhancements. + +### Improvements + +- Optimize utility command processing (RT69617) + For commands that won't affect any DB objects and don't affect + pglogical we can skip the processing early without reading any + pglogical or system catalogs or calling to DDL replication plugin + interfaces. This is optimization for systems with large number of + such utility command calls (that is primarily applications that + do explicit transaction management). + +- Add upgrade path from pglogical 2. + +### Resolved Issues + +- Ensure that `pglogical.standby_slot_names` takes effect when + `pglogical.standby_slots_min_confirmed` is at the default value + of -1. + + On 3.6.21 and older `pglogical.standby_slot_names` was ignored + if `pglogical.standby_slot_names` is set to zero (RM19042). + + Clusters satisfying the following conditions may experience inter-node + data consistency issues after a provider failover: + + - Running pglogical 3.0.0 through to 3.6.21 inclusive; + - Using pglogical subscriptions/or providers directly (BDR3-managed + subscriptions between pairs of BDR3 nodes are unaffected); + - Have a physical standby (streaming replica) of a pglogical provider + intended as a failover candidate; + - Have `pglogical.standby_slot_names` on the provider configured to list that + physical standby; + - Have left `pglogical.standby_slots_min_confirmed` unconfigured or set it + explicitly to zero; + + This issue can cause inconsistencies between pglogical provider and subscriber + and/or between multiple subscribers when a provider is replaced using + physical replication based failover. It's possible for the subscriber(s) to + receive transactions committed to the pre-promotion original provider + that will not exist on the post-promotion replacement provider. This + causes provider/subscriber divergence. If multiple subscribers are + connected to the provider, each subscriber could also receive a different + subset of transactions from the pre-promotion provider, leading to + inter-subscriber divergence as well. + + The `pglogical.standby_slots_min_confirmed` now defaults to the newly + permitted value `-1`, meaning "all slots listed in + `pglogical.standby_slot_names`". The default of 0 on previous releases + was intended to have that effect, but instead effectively disabled + physical-before-logical replication. + + To work around the issue on older versions the operator is advised to + set `pglogical.standby_slots_min_confirmed = 100` in `postgresql.conf`. + This has no effect unless `pglogical.standby_slot_names` is also set. + + No action is generally required for this issue on BDR3 clusters. + BDR3 has its own separate protections to ensure consistency during + promotion of replicas. + +- Fix pglogical_create_subscriber when "-v" is passed. + It will make pg_ctl emit meaningful information, making it easier to + debug issues where pg_ctl fails + +## pglogical 3.7.4 + +This is a beta release of the pglogical 3.7. + +pglogical 3.7 is a major release of pglogical. This release includes +major new features as well as smaller enhancements. + +### Improvements + +- Support PostgreSQL 13 + +- Add `pglogical.replication_origin_status` view (RM17074) + Same as `pg_replication_origin_status` but does not require superuser + permissions to access it. + +- Beta support of upgrades from 3.6 (currently from 3.6.22) + +- Improved SystemTAP support + +### Resolved Issues + +- Fix race condition in replication table filtering which could cause + crash (RM18839) + The cached info about table might get invalidated while used which would + crash the backend during one of the following operations: + + - reading the pglogical.tables view + - new subcription creation + - table resynchronization + +- Don't do transparent DDL replication on commands that affect temporary objects (RM19491, RT69170) + These are likely to not exist on the subscription. + +- Only run pgl specific code in deadlock detector when inside writer (RM18402) + It's not relevant in user backends and would cause ERRORs there. + +## pglogical 3.7.3 + +This is a beta release of the pglogical 3.7. + +pglogical 3.7 is a major release of pglogical. This release includes +major new features as well as smaller enhancements. + +### Improvements + +- Parallel Apply (RM6503) + Allows configuring number of writers per subscriptions. By default this is + still 1, which mean parallel apply is off by default but can be enabled + both globally (`pglogical.writers_per_subscription`) and per subscription + (`num_writers` option in `pglogical.create_subscription()` and + `pglogical.alter_subscription_num_writers()`). + +- Split "replicating" subscription status into two statuses + One is still "replicating" and is reported only if something was actually + replicated by the subcription since last worker start. The other is "started" + and just means that the worker for the subscription is running at the time + the status query was executed. + This should reduce confusion where subscription would report "replicating" + status but worker is in restart loop due to an apply error. + +- Substantial test and testing framework improvements + +- Improve 2ndQPostgres and BDR version dependency handling (RM17024) + +- Add PostgreSQL 12 support to `pglogical_create_subscriber` + +- Rework table resynchronization to use separate receiver process from the + main one + This improves performance of both main apply (primarily latency) and the + resynchronization itself. + It also fixes potential issue where table could be considered synchronized + before the catchup finished completely. + +### Resolved Issues + +- Fix crash on resynchronization of large partitioned tables (RM18154, RM15733, + RT68455, RT68352) + The resync process would keep crashing due to cache invalidation race + condition if the `COPY` took very long or if there was DDL activity on the + copied table during the `COPY`. + +- Prohibit MERGE and UPSERT on a table without replica identity (RM17323, RT68146) + These commands can end up doing `UPDATE` which will break replication if + the table has no replica identity as the downstream has no way to find the + matching row for updating. + +- Resolve relcache reference leak reports (RM16956) + Close the relation correctly in `pglogical.show_repset_tables_info()` + +- Resolve rare crash in HeapWriter row cleanup code (RM16956) + +- Resolve rare crash on worker exit (RM11686) + If a pglogical worker exited before it finished initialization it could + crash instead of exiting cleanly. + +- Fix apply errors parallel index rebuild afrer `TRUNCATE` (RM17602) + +## pglogical 3.7.2 + +This is a beta release of the pglogical 3.7. + +pglogical 3.7 is a major release of pglogical. This release includes +major new features as well as smaller enhancements. + +pglogical 3.7 introduces several major new features as well as +architectural changes some of which affect backward compatibility with +existing applications. + +### Important Notes + +- Beta software is not supported in production - for application test only + +- Upgrade from 3.6 is not supported in this release, yet. + +### Improvements + +- Add support for Postgres 12, deprecate support for older versions + pglogical 3.7 now requires at least Postgres 10 and supports up to + Postgres 12. + +### Resolved Issues + +- Keep open the connection until pglogical_create_subscriber finishes (RM13649) + Set idle_in_transaction_session_timeout to 0 so we avoid any user setting + that could close the connection and invalidate the snapshot. + +## pglogical 3.6.19 + +This is a security and maintenance release for pglogical 3.6 which includes +minor features as well as fixes for issues identified previously. + +### Resolved Issues + +- SECURITY: Set search_path to empty for internal PGLogical SQL statements (RM15373) + Also, fully qualify all operators used internally. PGLogical is now + protected from attack risks identified in CVE-2018-1058, when the + user application avoids the insecure coding practices identified there. + +- Correct parsing of direct WAL messages (RT67762) + Custom WAL messages emitted by PGLogical (or plugins building on top + of it) can be broadcast or direct types. Decoding of the latter was + incorrect and could in rare cases (depending on the node name) lead + to "insufficient data left in message" or memory allocation errors. + Decoding of such direct WAL messages has been corrected. + +- Add pglogical.sync_failover_slots() function (RM14318) + Signal the supervisor process to restart the mechanism to synchronize the + failover slots specifyed in the "pglogical.synchronize_failover_slot_name". + +- Fix the `--extra-basebackup-args` argument passed to pg_basebackup (RM14808) + Corrects how the `pglogical_create_subscriber` tool passes on such + extra arguments to pg_backbackup. + +### Improvements + +- Add more diagnostic information to pglogical.queue message (RM15292) + A new key `info` has been added to `pglogical.queue` providing + additional information about a queued DDL operation. + +## pglogical 3.6.18 + +This is a maintenance release for pglogical 3.6 which includes minor features +as well as fixes for issues identified previously. + +### Improvements + +- Warn about failover issues if standby_slot_names is not set (RT66767, RM12973) + If pglogical.standby_slot_names is not set and a physical standby is + configured; failover to this standby will have data consistency issues as + per our documentation. However, the replica could just be a simple read + replica. In any case, we now warn on the replica about the potential data + corruption/divergence that could result if failover is desired to such a + standby. + +- Check repsets in create_subscription for pgl2 upstreams also. + +- Various improvements to systemtap integration. + +### Resolved Issues + +- Prevent a hang in case of an early error in the PGL writer (RT67433, RM14678) + +- Allow postgres to start with pglogical library loaded but activity suspended + Add start_workers commandline-only GUC to facilitate this. + +## pglogical 3.6.17 + +This is a maintenance release for pglogical 3.6 which includes minor features +as well as fixes for issues identified previously. + +### Improvements + +- Make the slot synchronization to standby more configurable (RM13111) + Added several new configuration parameters which tune the behavior of the + synchronization of logical replication slots from a primary to a standby + PostgreSQL servers. This allows for better filtering, inclusion of + non-pglogical replication sets and also using different connection string + than physical replication uses (useful when different user or database + should be used to collect information about slots). + +### Resolved Issues + +- Fix issue with UPDATEs on partitions with different physical row + representation than partition root (RM13539, RT67045) + The partitions must have same logical row as partition root they can have + different physical representation (primarily due to dropped columns). UPDATEs + on such partitions need to do special handling to remap everything correctly + otherwise constraints and not-updated TOAST columns will refer to wrong + incoming data. + +- Fix truncation of `\_tmp` slot names in sync slots + Long slot names could previously cause the temporary slot to be suffixed + by `\_tm` rather than the expected `\_tmp` suffix. + +### Support, Diagnostic and Logging Changes + +These changes don't directly change existing behaviour or add new user-facing +features. They are primarily of interest to technical support operations and +for advanced diagnostic analysis. + +- Expand non-invasive tracing (SystemTap, linux-perf, DTrace, etc) + support to cover inspection of the pglogical receiver's input protocol + stream, walsender output plugin protocol stream, and other useful events. + (RM13517) + +- Add a test and debug utility that decodes captured pglogical protocol streams + into human-readable form (RM13538) + +- Improve error context logging in the pglogical writer to show more + information about the transaction being applied and its origin. + +- Fix incorrectly reported commit lsn in errcontext messages from the pglogical + heap writer (RM13796). This fix only affects logging output. The writer would + report the lsn of the original forwarded transaction not the lsn of the + immediate source transaction. + +- Add subscription, local node and peer node names to heap writer errcontext + log output. + +## pglogical 3.6.16 + +This is the sixteenth minor release of the Pglogical 3.6 series. This release +includes mostly just enables BDR 3.6.16 without any significant changes to +pglogical. + +## pglogical 3.6.15 + +This is the fifteenth minor release of the Pglogical 3.6 series. This release +includes fixes for issues identified previously. + +### Resolved Issues + +- Fix backwards-compatibility to PGLogical 2 (RM13333, RT66919) + Recent releases performed additional checks during + `create_subscription`, which are fine against other PGLogical 3 + installations, but not backwards-compatible. This release corrects + the check to account for backwards-compatibility. + +- Correct a warning about GUC nest level not being reset (EE) (RM13375) + The addition of the `lock_timeout` in 3.6.14 led to a warning being + issued for CAMO and Eager All Node transaction ("GUC nest level = 1 + at transaction start"). With this release, GUC nest levels are + properly managed and the warning no longer occurs. + +### Improvements + +- Add a new `pglogical.worker_tasks` view that tracks and records pglogical's + background worker use. The view exposes information about the number of times + a given type of worker has been restarted, how long it has been running, + whether it accomplished any useful work, and more. This offers administrators + more insight into pglogical's internal activity when diagnosing problems, + especially when joined against the `pglogical.worker_error` table. + +- Add support for rate-limiting pglogical background worker (re)launches. The + new `pglogical.min_worker_backoff_delay` configuration option sets a minimum + delay between launches of all types of pglogical background workers so that + rapid respawning of workers cannot fill the log files and or excessive load + on the system that affects other operations. + + For example, if configured with `pglogical.min_worker_backoff_delay = + '500ms'`, pglogical will not retry any given background worker startup more + often than twice per second (`1000/500 = 2`). + + A simple fixed-rate factor was deemed to be the most predictable and + production-safe initial approach. Future enhancements may add a heuristic + delay factor based on worker type, time from start to exit, number of recent + launches, etc. + + The launch backoff delay defaults to 0 (off) to prevent surprises for + upgrading users. + + A setting of `pglogical.min_worker_backoff_delay = '5s'` or similar is a + reasonable starting point, and may become the default in a future release. + +### Upgrades + +The PostgreSQL Global Development Group has phased out support for +PostgreSQL 9.4 on all Debian based distributions. Following that, +this release covers only PostgreSQL 9.5 and newer. We advise to +upgrade to a newer version. + +For RedHat based distributions, this release is still available for +PostgreSQL 9.4. + +## pglogical 3.6.14 + +This is the fourteenth minor release of the Pglogical 3.6 series. This release +includes fixes for issues identified previously. + +### Resolved Issues + +- Resolve deadlocked CAMO or Eager transactions (RM12903, RM12910) + Add a `lock_timeout` as well as an abort feedback to the origin node + to resolve distributed deadlocking due to conflicting primary key + updates. This also prevents frequent restarts and retries of the + PGL writer process for Eager All Node and sync CAMO transactions. + +## pglogical 3.6.12 + +This is the twelveth minor release of the Pglogical 3.6 series. This release +includes fixes for issues identified previously. + +### Improvements + +- Add infrastructure for `check_full_row` in DELETE operations used by + BDR 3.6.12 (RT66493) + +- Validate requested replication sets at subscribe time (RM12020, RT66310) + `pglogical.create_subscription()` now checks that all requested replication + sets actually exist on the provider node before returning. If any are + missing it will raise an ERROR like: + `ERROR: replication set(s) "nonexistent_repset" requested by subscription are missing on provider` + with a DETAIL message listing the full sets requested, etc. + On prior releases subscriptions with missing repsets would fail after + `pglogical.create_subscription(...)` returned, during initial sync. The + failure would only be visible in the logs where it is much less obvious to + the user. Or if schema sync was not enable they could appear to succeed but + not populate the initial table contents. + + +### Resolved Issues + +- Fix a potential deadlock at CAMO partner startup. (RM12187) + After a restart, the CAMO partner resends all confirmations for + recent CAMO protected transactions. In case these fill the internal + queue between the receiver and writer processes, a deadlock was + possible. This release ensures the receiver consumes pending + feedback messages allowing the writer to make progress. + +## pglogical 3.6.11 + +This is the eleventh minor release of the Pglogical 3.6 series. This release +includes fixes for issues identified previously. + +### Improvements + +- Implement remote_commit_flush for CAMO. (RM11564) + Additional level of robustness for CAMO, only replying when xact + is known committed and flushed on partner node. + +- Make receiver-writer shared queues of configurable size. (RM11779) + Two new GUCs are introduced: + pglogical.writer_input_queue_size (default 1MB) + pglogical.writer_output_queue_size (default 1MB) + +- Add a warning when user tries to set update_origin_change to skip + +- Add callback to request replay progress update. (RM6747) + +### Resolved Issues + +- Send TimeZone GUC when replicating DDL (RT66019) + To ensure that timezone dependent expressions in DDL get evaluated to + same value on all nodes. + +- Only use isvalid indexes when searching for conflicts (RT66036) + Indexes currently being created or failed index creations + will be ignored, to prevent concurrency issues with change apply + and `CREATE INDEX CONCURRENTLY`. + +- Fix crash when replication invalidations arrive outside a transaction (RM11159) + +- Make the receiver apply the queue before shutting down (RM11778) + Upon smart shutdown, the PGL writer no longer terminates + immediately, requiring queued transactions to be resent, but applies + already received transactions prior to shutting down. + +## pglogical 3.6.10 + +This is the tenth minor release of the Pglogical 3.6 series. This release +includes fixes for issues identified previously. + +### Improvements + +- Add support for a CAMO remote_write mode (RM6749) + +### Resolved Issues + +- COMMIT after initial sync of a table. This avoids treating the first + catchup xact as if it was part of the initial COPY, which could lead to + strange errors or false conflicts. (RM11284). + +- Remove the 4 billion row limit during the initial subscription + synchronization (RT66050). + +- Cleanup table replication cache when replication set configuration changes. + Previously we could use stale cache on multiple calls for table replication + info on same connection if user changed the configuration in meantime. + This could result in initial sync missing replicated table if the + configuration was changed while the subscription was being created. + +- Remember repsets when caching table replication info. + If the client calls the table replication info with different parameters, + we need to remember them otherwise we might return cached value for wrong + replication sets. + This could result in initial sync copying data from table which were + not supposed to be replicated. + +## pglogical 3.6.9 + +This is the ninth minor release of the Pglogical 3.6 series. This release +includes minor improvements. + +### Improvements + +- Add support for local, remote_apply and remote_write. (RM11069, RT65801) + We now accept the use of all the values that PostgreSQL accepts when configuring + the "pglogical.synchronous_commit". + +- Immediately forward all messages from the PGL receiver back to origin (BDR CAMO) + Confirmations for CAMO protected transactions flow + from the PGL writer applying the transaction back to origin node via + the PGL receiver. This process used to consume only one + confirmation message per iteration. It now consumes all pending + confirmations from the PGL writer and immediately sends them back to + the origin. It also decreases latency for BDR CAMO transactions in case + confirmations queue up. + +## pglogical 3.6.8 + +This is the eigth minor release of the Pglogical 3.6 series. This release +includes fixes for issues identified previously. + +### Resolved Issues + +- Use RelationGetIndexAttrBitmap to get pkey columns. (RT65676, RT65797) + No need to try to fetch pkey columns from index itself, we have + relcache interface that does exactly what we need and does so + in more performant way. + +## pglogical 3.6.7.1 + +This is a hot-fix release on top of 3.6.7. + +### Resolved Issues + +- Fix a protocol violation after removal of an origin. (RT65671, RM10605) + Removal of a replication subscription may lead to a walsender trying + to forward data for unknown origins. Prevent emission of an invalid + message in that case. + +## pglogical 3.6.7 + +pglogical 3.6.7 is the seventh minor release of the pglogical 3.6 series. This release includes minor new features as well as fixes for issues identified earlier. + +### Improvements + +- Replicate TRUNCATE on a partition if only parent table is published in + replication set (RT65335) + Previously, we'd skip such TRUNCATE unless the partition was also published. +- Generate `target_table_missing` for TRUNCATE which is executed against + non-existing table (RT10291) + Allows for user-configured decision if it should be a replication-stopping + issue or not. +- Improve performance of repeated UPDATEs and DELETEs executed on origin node + by caching the replication configuration of tables in a user session. +- Reduce CPU usage of receiver worker when writer queue is full (RM10370). + +### Resolved Issues + +- Fix partition replication set membership detection for multi-level + partitioned tables + Replicate changes correctly for multi-level partitioned tables, where only + the intermediate partition is part of replication set (not root or leaf + partitions). +- Support replication `TRUNCATE CASCADE` on tables referenced by + `FOREIGN KEY` (RT65518) + Previously this would throw error on the subscriber. This will only work if + all tables on subscriber which have `FOREIGN KEY` on the table being + `TRUNCATEd` are replicated tables. Also it's only supported on + PostgreSQL 11 and higher. +- Flush writer between data copy and constraint restore + Otherwise there could in some rare cases still be unapplied changes when + creating constraints during initial synchronization of a subscription, + potentially causing deadlocks. +- Fix potential writer queue corruption on very wide (1000+ columns) tables + +### Upgrades + +This release supports upgrading from following versions of pglogical: + +- 2.2.0 +- 2.2.1 +- 2.2.2 +- 3.2.0 and higher + +## pglogical 3.6.6 + +pglogical 3.6.6 is the sixth minor release of the pglogical 3.6 series. This release includes minor new features as well as fixes for issues identified earlier. + +### Improvements + +- New conflict type `update_pkey_exists` (RM9976) + Allows resolving conflicts when a `PRIMARY KEY` was updated to one which + already exists on the node which is applying the change. + +- Add `pglogical.apply_log_summary` (RM6596) + View over `pglogical.apply_log` which shows the human-readable conflict + type and resolver string instead of internal id. + +- Improve logging during both the initial data synchronization of a + subscription and the individual table resynchronization. + +### Resolved Issues + +- Make sure writer flushes changes after initial data copy (RT65185) + Otherwise depending on timing and I/O load the subscription might not + update positioning info and get data both via initial copy and replication + stream catchup that follows. + +### Upgrades + +This release supports upgrading from following versions of pglogical: + +- 2.2.0 +- 2.2.1 +- 2.2.2 +- 3.2.0 and higher + +## pglogical 3.6.5 + +pglogical 3.6.5 is the fifth minor release of the pglogical 3.6 series. This release includes minor new features as well as fixes for issues identified in 3.6.4. + +### Improvements + +- Improve tuple lock waits during apply for deletes (RM9569) + This should improve performance of replication of deletes and updates in + contentious situation. + +### Resolved Issues + +- Use consistent table list in initial data copy (RM9651/RT64809) + To prevent issues during initial data copy and concurrent table drop. +- Cleanup worker_dsm_handle on worker detach (internal) + Otherwise we could leave dangling DSM segment handle for a worker after a + crash, which could confuse plugins using this API. +- Fix handling of empty eager transactions (RM9550) + In case no relevant change remains to be applied on a replica node, + the prepare of such an empty transaction now works just fine. +- Fix the replication sets output in `pglogical.pglogical_node_info()` + Previously it could be garbled. +- Reduce log level for messages when resolving + ERRCODE_T_R_SERIALIZATION_FAILUREs (RM9439) + +### Upgrades + +This release supports upgrading from following versions of pglogical: + +- 2.2.0 +- 2.2.1 +- 2.2.2 +- 3.2.0 and higher + +Note that upgrades from 2.2.x are only supported on systems with +`pglogical.conflict_resolution` set to `last_update_wins`. + +## pglogical 3.6.4 + +pglogical 3.6.4 is the fourth minor release of the pglogical 3.6 series. This release includes minor new features as well as fixes for issues identified in 3.6.3. + +### New Features + +- Apply statistics tracking (RM9063) + We now track statistics about replication and resource use for individual + subscriptions and relations and make them available in + `pglogical.stat_subscription` and `pglogical.stat_relation` views. + The tracking can be configured via `pglogical.stat_track_subscription` and + `pglogical.stat_track_relation` configuration parameters. +- The `replicate_inserts` option now affects initial COPY + We now do initial copy of data only if the table replicates inserts. + +### Resolved Issues + +- Fix initial data copy of multi-level partitioned tables (RT64809) + The initial data copy used to support only single level partitioning, + multiple levels of partitioning are now supported. +- Don't try to copy initial data twice for partitions in some situations (RT64809) + The initial data copy used to try to copy data from all tables that are in + replication sets without proper regard to partitioning. This could result in + partition data to be copied twice if both root partition and individual + partitions were published via replication set. This is now solved, we only + do the initial copy on the root partition if it's published. +- Fix handling of indexes when replicating INSERT to a partition (RT64809) + Close the indexes correctly in all situations. +- Improve partitioning test coverage (RM9311) + In light of the partitioning related issues, increase the amount of + automated testing done against partitioned tables. +- Fix a leak in usage of the relation cache (RT64935) +- Fix a potential queue deadlock between writer and receiver (RT64935, RT64714) + +## pglogical 3.6.3 + +pglogical 3.6.3 is the third minor release of the pglogical 3.6 series. This release includes minor new features as well as fixes for issues identified in 3.6.2. + +### New Features + +- Support `DoNotReplicateId` special origin + This allows correct handling of "do not replicate" origin which allows skipping replication of some changes. Primarily needed internally for other features. +- Persist the last_xact_replay_timestamp (RT63881) + So that it's visible even if the subscription connection is down. +- Rework documentation build procedure for better consistency between HTML and PDF documentation + This mainly changes the way docs are structured into chapters so that there is single source of chapter list and ordering for both PDF and HTML docs. + +### Resolved Issues + +- Invalidate local cache when adding new invalidation + Fixes visibility of changes in the catalog cache view of the transaction which did those changes. Not triggered yet by any code but will be in the future releases. +- Open indexes after partition routing + Otherwise we might be opening indexes of the root table rather than the partition, causing issues with handling conflicts for `INSERT` operation replication. + +## pglogical 3.6.2 + +pglogical 3.6.2 is the second minor release of the pglogical 3.6 series. This release includes minor new features as well as fixes for issues identified in 3.6.1. + +### New Features + +- Support DEFERRED UNIQUE indexes + They used to work only in limited cases before this release. +- Support covering UNIQUE indexes (RT64650) + The use of covering UNIQUE indexes could result in ambiguous error messages in some cases before. +- Add `--log-file` option to `pglogical_create_subscriber` (RT64129) + So that log can be saved somewhere other than the current working directory + +### Resolved Issues + +- Fix error message when the database name in the connection string in `pglogical_create_subscriber` is missing (RT64129) + The previous message was ambiguous. +- Raise error when unknown parameter was specified for `pglogical_create_subscriber` (RT64129) + Otherwise mistakes in command line arguments could be silently ignored. +- Solve timing issue with workers exiting while another one tries to start using same worker slot + Before, we could corrupt the worker information causing the newly starting worker to crash (and having to start again later), this will no longer happen. +- Set statement time on start of every transaction in pglogical workers (RT64572) + Fixes reporting of `xact_start` in `pg_stat_activity` + +## pglogical 3.6.1 + +pglogical 3.6.1 is the first minor release of the pglogical 3.6 series. This release includes minor new features and fixes including all the fixes from 3.6.0.1. + +### New Features + +- Add slot failover documentation +- Add `pglogical.get_sub_progress_timestamp` for retrieving origin timestamp of the last committed change by the subscription + +### Resolved Issues + +- Stop retrying subscription synchronization after unrecoverable error (RT64463) + If the schema synchronization failed (which is an unrecoverable error) don't keep retrying forever. Instead mark the subscription synchronization as failed and disable the subscription. +- Improve handling and messaging with missing replication sets in output plugin (RT64451) + Report all missing and found sets and make sure the sets are looked up using current snapshot. + +## pglogical 3.6.0.1 + +The pglogical 3.6.0.1 is the first bug-fix release in the pglogical 3.6 series. + +### Resolved Issues + +- Improve synchronous `remote_write` replication performance (RT64397) +- Re-add support for binary protocol + +## pglogical 3.6.0 + +The version 3.6 of pglogical is a major update which brings performance improvements, better conflict handling, bug fixes and infrastructure necessary for BDR 3.6. + +### New Features + +- Significant replication performance improvement + - Cache table synchronization state + - Only send keepalives when necessary + - Only do flush when necessary + - Serialize transactions in fewer cases in walsender (2ndQPostgres) +- Improved replication position reporting which is more in line with how physical streaming replication reports it +- Conflict detection and resolution improvements + - Add new types of conflicts (like `target_table_missing`) + - Add new types of conflict resolvers + - Make conflict resolution configurable by subscription and conflict type + - Improve conflict detection for updates + +### Resolved Issues + +- Don't try to replicate REINDEX on temporary indexes + +### Other Improvements + +- Fix potential message parsing error for two-phase commits +- Make initial COPY of data interruptible diff --git a/product_docs/docs/pglogical/3.7/replication-sets.mdx b/product_docs/docs/pglogical/3.7/replication-sets.mdx new file mode 100644 index 00000000000..a7930fd0255 --- /dev/null +++ b/product_docs/docs/pglogical/3.7/replication-sets.mdx @@ -0,0 +1,466 @@ +--- +navTitle: Replication Sets +title: Replication sets +originalFilePath: replication-sets.md + +--- + +Replication sets provide a mechanism to control which tables in the database +will be replicated and which actions on those tables will be replicated. + +Each replicated set can specify individually if `INSERTs`, `UPDATEs`, +`DELETEs` and `TRUNCATEs` on the set are replicated. Every table can be in +multiple replication sets and every subscriber can subscribe to multiple +replication sets as well. The resulting set of tables and actions replicated +is the union of the sets the table is in. The tables are not replicated until +they are added into a replication set. + +There are three preexisting replication sets, named "default", +"default_insert_only" and "ddl_sql". The "default" replication set is defined +to replicate all changes to tables in it. The "default_insert_only" replication +set only replicates INSERTs and is meant for tables that don't have primary key +(see [Restrictions](restrictions) section for details). The "ddl_sql" replication +set is defined to replicate schema changes specified by the +`pglogical.replicate_ddl_command`. + +*Note: Table are **not** added automatically to the "default" replication set, +the name "default" just means it exists by default. This behavior can be +changed using `pglogical.alter_replication_set`.* + +## Behavior of partitioned tables + +From PostgreSQL 11 onwards, pglogical supports partitioned tables +transparently. This means that a partitioned table can be added to a replication +set and changes to any of the partitions will be replicated downstream. + +The partitioning definition on the subscription side can be set up differently to the +one on the provider. This means that one can also replicate a partitioned table to a +single table, or a single table to a partitioned table, or a partitioned tabled to a +differently'partitioned table (repartitioning). + +It's also possible to add individual partitions to the replication set, in +which case they will be replicated like regular tables (to the table of the +same name as the partition on the downstream). This has some performance +advantages in case the partitioning definition is same on both provider and +subscriber, as the partitioning logic does not have to be executed. + +**Note: If the root-partitioned table is part of any replication set, memberships +of individual partitions are ignored and only the membership of said root +table will be taken into account.** + +### Older versions of PostgreSQL + +In PostgreSQL 10 and older, pglogical only allows the replication of partitions +directly to other partitions. Which means the partitioned table itself cannot be +added to a replication set and can't be target of replication on the subscriber +either (one can't replicate a normal table to a partitioned table). + +## Replication set manipulation interfaces + +The following functions are provided for managing the replication sets: + +### pglogical.create_replication_set + + This function creates a new replication set. + +#### Synopsis + +```postgresql +pglogical.create_replication_set ( + set_name name, + replicate_insert boolean, + replicate_update boolean, + replicate_delete boolean, + replicate_truncate boolean, + autoadd_tables boolean, + autoadd_sequences boolean, + autoadd_existing boolean +) +``` + +#### Parameters + +- `set_name` - name of the set, must be unique +- `replicate_insert` - specifies if `INSERT` is replicated; default true +- `replicate_update` - specifies if `UPDATE` is replicated; default true +- `replicate_delete` - specifies if `DELETE` is replicated; default true +- `replicate_truncate` - specifies if `TRUNCATE` is replicated; default true +- `autoadd_tables` - specifies if newly created tables should be automatically + added to the new replication set; default false +- `autoadd_sequences` - specifies if newly created sequences should be automatically + added to the new replication set; default false +- `autoadd_existing` - this in combination with `autoadd_tables` or `autoadd_sequences` + specifies if any existing tables and sequences should be added as well + +The autoadd options will ignore tables that are in +`information_schema` or `pg_catalog` schemas or are part of an extension. + +The autoadd options will also allow automatic removal of tables from the +replication set. So there will be no dependency check on replication membership +when the table which is part of the autoadd replication set is being dropped. + +If you want to replicate tables which are part of some extension, you still +have to add them manually. + +### pglogical.alter_replication_set + +This function changes the parameters of the existing replication set. + +#### Synopsis + +```postgresql +pglogical.alter_replication_set ( + set_name name, + replicate_insert boolean, + replicate_update boolean, + replicate_delete boolean, + replicate_truncate boolean, + autoadd_tables boolean, + autoadd_sequences boolean +) +``` + +#### Parameters + +- `set_name` - name of the existing replication set +- `replicate_insert` - specifies if `INSERT` is replicated +- `replicate_update` - specifies if `UPDATE` is replicated +- `replicate_delete` - specifies if `DELETE` is replicated +- `replicate_truncate` - specifies if `TRUNCATE` is replicated +- `autoadd_tables` - specifies if newly created tables should be automatically + added to the new replication set +- `autoadd_sequences` - specifies if newly created sequences should be automatically + added to the new replication set + +If any of these replication set parameters is NULL (which is the default +value if nothing else is specified), the current setting for that parameter will +remain unchanged. + +### pglogical.drop_replication_set + +Removes the replication set. + +#### Synopsis + +```postgresql +pglogical.drop_replication_set(set_name text) +``` + +#### Parameters + +- `set_name` - name of the existing replication set + +### pglogical.replication_set_add_table + +Adds a table to a specified existing replication set, optionally requesting +resynchronization by subscribers. + +#### Synopsis + +```postgresql +pglogical.replication_set_add_table ( + set_name name, + relation regclass, + synchronize_data boolean, + columns text[], + row_filter text +) +``` + +#### Parameters + +- `set_name` - name of the existing replication set +- `relation` - name or OID of the table to be added to the set +- `synchronize_data` - if true, the table data is synchronized on all + subscribers which are subscribed to given replication set; default false +- `columns` - list of columns to replicate. Normally when all columns + should be replicated, this will be set to NULL which is the + default. +- `row_filter` - row filtering expression; default NULL (no filtering). + See [Row Filtering On Provider](#row-filtering-on-provider) for more info. + +**WARNING: Use caution when synchronizing data with a valid row filter.** +Using `synchronize_data=true` with a valid `row_filter` is like a one-time operation for a table. +Executing it again with a modified `row_filter` won't synchronize data to subscriber. Subscribers +may need to call `pglogical.alter_subscription_resynchronize_table()` to fix it. + +Also, note that if `synchronize_data` is enabled, a synchronization request is +scheduled on each subscriber and actioned asynchronously. Adding to the +replication set *does not wait for synchronization to complete*. + +To wait until the resync has completed, first, on the provider, run: + +``` + SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); +``` + +To ensure each subscriber has received the request, then on each subscriber +run: + +``` + SELECT pglogical.wait_for_subscription_sync_complete('sub_name'); +``` + +**NOTE**: There is currently no function to alter the row filter or columns of +a table's replication set membership (RM#5960). However, you can use a *single +transaction* to remove the table from the replication set and then re-add it with +the desired row filter and column filter. Make sure to set `synchronize_data := +false`. This provides a seamless transition from the old to the new membership +and will not skip or lose any rows from concurrent transactions. + +### pglogical.replication_set_add_all_tables + +Adds all tables in given schemas. + +#### Synopsis + +```postgresql +pglogical.replication_set_add_all_tables ( + set_name name, + schema_names text[], + synchronize_data boolean +) +``` + +Only existing tables are added; any tables created later will not +be added automatically. To see how to automatically add tables +to the correct replication set at creation time, see +[Automatic assignment of replication sets for new tables](#automatic-assignment-of-replication-sets-for-new-tables). + +#### Parameters + +- `set_name` - name of the existing replication set +- `schema_names` - array of names name of existing schemas from which tables + should be added +- `synchronize_data` - if true, the table data is synchronized on all + subscribers which are subscribed to the given replication set; default false + +### pglogical.replication_set_remove_table + +Removes a table from a specified existing replication set. + +#### Synopsis + +```postgresql +pglogical.replication_set_remove_table(set_name name, relation regclass) +``` + +#### Parameters + +- `set_name` - name of the existing replication set +- `relation` - name or OID of the table to be removed from the set + +### pglogical.replication_set_add_sequence + +Adds a sequence to a replication set. + +#### Synopsis + +```postgresql +pglogical.replication_set_add_sequence ( + set_name name, + relation regclass, + synchronize_data boolean +) +``` + +#### Parameters + +- `set_name` - name of the existing replication set +- `relation` - name or OID of the sequence to be added to the set +- `synchronize_data` - if true, the sequence value will be synchronized immediately; default false + +### pglogical.replication_set_add_all_sequences + +Adds all sequences from the given schemas. + +#### Synopsis + +```postgresql +pglogical.replication_set_add_all_sequences ( + set_name name, + schema_names text[], + synchronize_data boolean +) +``` + +Only existing sequences are added; any sequences created later will not +be added automatically. + +#### Parameters + +- `set_name` - name of the existing replication set +- `schema_names` - array of names of existing schemas from which tables + should be added +- `synchronize_data` - if true, the sequence value will be synchronized + immediately; default false + +### pglogical.replication_set_remove_sequence + +Remove a sequence from a replication set. + +#### Synopsis + +```postgresql +pglogical.replication_set_remove_sequence(set_name name, relation regclass) +``` + +#### Parameters + +- `set_name` - name of the existing replication set +- `relation` - name or OID of the sequence to be removed from the set + +You can view the information about which table is in which set by querying the +`pglogical.tables` view. + +## Automatic assignment of replication sets for new tables + +The event trigger facility can be used for describing rules which define +replication sets for newly created tables. + +Example: + +```postgresql +CREATE OR REPLACE FUNCTION pglogical_assign_repset() +RETURNS event_trigger AS $$ +DECLARE obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + IF obj.object_type = 'table' THEN + IF obj.schema_name = 'config' THEN + PERFORM pglogical.replication_set_add_table('configuration', obj.objid); + ELSIF NOT obj.in_extension THEN + PERFORM pglogical.replication_set_add_table('default', obj.objid); + END IF; + END IF; + END LOOP; +END; +$$ LANGUAGE plpgsql; + +CREATE EVENT TRIGGER pglogical_assign_repset_trg + ON ddl_command_end + WHEN TAG IN ('CREATE TABLE', 'CREATE TABLE AS') + EXECUTE PROCEDURE pglogical_assign_repset(); +``` + +The above example will put all new tables created in schema `config` into +replication set `configuration` and all other new tables which are not created +by extensions will go to the `default` replication set. + +## Additional functions + +### pglogical.synchronize_sequence + +Push sequence state to all subscribers. + +#### Synopsis + +``` +pglogical.synchronize_sequence(relation regclass) +``` + +Unlike the subscription and table +synchronization function, this function should be run on the provider. It forces +an update of the tracked sequence state which will be consumed by all +subscribers (replication set filtering still applies) once they replicate the +transaction in which this function has been executed. + +#### Parameters + +- `relation` - name of existing sequence, optionally qualified + +## Row Filtering on Provider + +On the provider side, row filtering can be done by specifying the `row_filter` +parameter for the `pglogical.replication_set_add_table` function. The +`row_filter` is a normal PostgreSQL expression with the same limitations as a +[`CHECK` constraint](https://www.postgresql.org/docs/current/static/ddl-constraints.html#DDL-CONSTRAINTS-CHECK-CONSTRAINTS). + +You can see which row filters are active in the `pglogical.tables` view. + +The table's column(s) are exposed to the row filter as simple identifiers; +there's no qualifier or namespace. + +Unlike a CHECK constraint's body, the row-filter is passed as a string which is +parsed and checked by pglogical. So to avoid quoting issues you should use +PostgreSQL's dollar-quoting, like this: + +``` +SELECT pglogical.replication_set_add_table( + 'setname', 'tblname'::regclass, + synchronize_data := false, + row_filter := $FILTER$ id > 0 $FILTER$ + ); +``` + +A simple `row_filter` would look something like `row_filter := 'id > 0'` which +would replicate only those rows where values of column `id` are greater than zero. +This *will not affect any already-committed rows pending +replication, or any already-replicated rows*. + +**Important**: Caveats apply when re-synchronizing tables with row filters +using `replication_set_add_table`. See `pglogical.replication_set_add_table`. + +### Writing safer row filters + +Be very cautious when writing row filter expressions, and keep them as simple +as possible. If a row-filter expression raises an error during replication, it +is generally necessary to drop and re-create the subscription, resynchronizing +*all* tables, not just the table with the problem row-filter. So row filters +should be simple and defensively written. A non-exhaustive list of rules for +writing filters is that they: + +- *Should* be simple expressions wherever possible. Try to use only + built-in PostgreSQL operators and `IMMUTABLE` functions if you can. + +- *Must* avoid using any expression that could raise an `ERROR` at runtime, + such as casting from `text` to a more strictly validated data type. They + must tolerate any value that the table's constraints permit to appear in + the table. + +- *May* use `VOLATILE` or `STABLE` functions, but any functions must obey the + same constraints as the filter expression itself. + + E.g. you can call `random()` but not `txid_current()` or + `my_audit_log_function()`. + +- *May* call user-defined functions written in SQL, Pl/PgSQL, or (with care) C. + Use of other languages is untested and not recommended. PL/PgSQL functions + *must not* use `EXCEPTION` blocks, and may have other as-yet-undiscovered + issues so their use is not recommended. Stick to SQL where possible. + +- *Should not* attempt to access any tables. Only the column values should + be used. + + Direct use of subqueries in the row-filter expression is blocked. + + It's possible to call a user-defined function within the filter, and that + *can* access table contents. This is *not recommended* and may be subject to + surprising behaviour. The function *must* only access tables in + `pg_catalog.*` or tables marked with the `user_catalog_table=true` attribute. + Accessing other tables will not raise an error, but may cause undefined + behaviour, errors, or crashes. + +- *Must never* attempt any write operation or anything that assigns a + transaction-id. Similar to queries on a read-replica. Attempting writes + will break replication. + +- *May* safely use columns of the filtered table that are not part of the + replication set's column list. Filtering happens on the provider side + so non-replicated columns will have their values accessible. This lets + you do things like pre-compute complex filter criteria in triggers. + +- *Should not* rely on session state, since the `row_filter` is running inside + the replication session. Session specific expressions such as + `CURRENT_USER` will have values of the replication session and not the session + which did the writes. The same is true for GUCs etc. + +### Changing row filters + +To change a row-filter expression on a table, use a single transaction to remove the +table from the replication set, then add it again with the new row filter expression. +Do not specify data sync and make sure to explicitly repeat the set of replicated +columns. You can check the `pglogical.tables` view for the old column set and row filter. + +See `pglogical.replication_set_add_table`. diff --git a/product_docs/docs/pglogical/3.7/restrictions.mdx b/product_docs/docs/pglogical/3.7/restrictions.mdx new file mode 100644 index 00000000000..5369d3a50b1 --- /dev/null +++ b/product_docs/docs/pglogical/3.7/restrictions.mdx @@ -0,0 +1,114 @@ +--- +title: Restrictions +originalFilePath: restrictions.md + +--- + +pglogical currently has the following restrictions or missing functionality. +These might be addressed in future releases. + +## Superuser is required + +Currently pglogical replication and administration requires superuser +privileges. It may be later extended to more granular privileges. + +## UNLOGGED and TEMPORARY not replicated + +`UNLOGGED` and `TEMPORARY` tables will not and cannot be replicated, similar to +physical streaming replication. + +## One database at a time + +To replicate multiple databases you must set up individual provider/subscriber +relationships for each. There is no way to configure replication for all databases +in a PostgreSQL install at once. + +## PRIMARY KEY or REPLICA IDENTITY required + +When replicating `UPDATE`s and `DELETE`s for tables that lack a `PRIMARY +KEY`, the `REPLICA IDENTITY` must be set to `FULL`. However it's important +to note that without `PRIMARY KEY` every `UPDATE` or `DELETE` will produce a +sequential scan on a table which will have severe detrimental effect on +performance of replication and subsequently the replication lag. + +Note: On regular PostgreSQL nodes it's only possible to set the +`REPLICA IDENTITY` to `FULL` via `ALTER TABLE`, however on pglogical nodes +tables can be created with `REPLICA IDENTITY FULL` directly using the following +syntax: + +```sqlpostgresql +CREATE TABLE name (column_a int) WITH (replica_identity = full); +``` + +See for details on replica identity. + +## DDL + +There are several limitations of DDL replication in pglogical, for details +check the [DDL Replication](ddl) chapter. + +## Sequences + +The state of sequences added to replication sets is replicated periodically +and not in real-time. A dynamic buffer is used for the value being replicated so +that the subscribers actually receive the future state of the sequence. This +minimizes the chance of the subscriber's notion of the sequence's last_value +falling behind but does not completely eliminate the possibility. + +It might be desirable to call `synchronize_sequence` to ensure all subscribers +have up to date information about a given sequence after "big events" in the +database such as data loading or during the online upgrade. + +The types bigserial and bigint are recommended for sequences on multi-node +systems as smaller sequences might reach the end of the sequence space fast. + +Users who want to have independent sequences on the provider and subscriber can +avoid adding sequences to replication sets and create sequences with a step +interval equal to or greater than the number of nodes, and then set a different +offset on each node. Use the `INCREMENT BY` option for `CREATE SEQUENCE` or +`ALTER SEQUENCE`, and use `setval(...)` to set the start point. + +## PostgreSQL Version differences + +PGLogical can replicate across PostgreSQL major versions. Despite that, long +term cross-version replication is not considered a design target, though it may +often work. Issues where changes are valid on the provider but not on the +subscriber are more likely to arise when replicating across versions. + +It is safer to replicate from an old version to a newer version since PostgreSQL +maintains solid backward compatibility but only limited forward compatibility. +Initial schema synchronization is only supported when replicating between the +same version of PostgreSQL or from lower version to a higher version. + +Replicating between different minor versions makes no difference at all. + +### pglogical.pglogical_version + +This function retrieves the textual representation of the PGL version that is currently in use. + +``` +SELECT pglogical.pglogical_version(); +``` + +### pglogical.pglogical_version_num + +This function retrieves a numerical representation of the PGL version that is currently in use. +Version numbers are monotonically increasing, allowing this value to be used for less-than and greater-than comparisons. + +## Database encoding differences + +PGLogical does not support replication between databases with different +encoding. We recommend using `UTF-8` encoding in all replicated databases. + +## Large objects + +PostgreSQL's logical decoding facility does not support decoding changes +to large objects, so pglogical cannot replicate Large Objects. This does +not restrict the use of large values in normal columns. + +## Additional restrictions + +Please node that additional restrictions may apply depending on which +[writers.md](writer) is being used and which version of PostgreSQL is being +used. These additional restrictions are documented in their respective +sections (ie., every writer documents it's own additional restrictions). diff --git a/product_docs/docs/pglogical/3.7/subscriptions/index.mdx b/product_docs/docs/pglogical/3.7/subscriptions/index.mdx new file mode 100644 index 00000000000..011f84d7054 --- /dev/null +++ b/product_docs/docs/pglogical/3.7/subscriptions/index.mdx @@ -0,0 +1,963 @@ +--- +navigation: + - subscriptions + - pglogical-writer +redirects: + - ../subscriptions +navTitle: Subscriptions +title: Subscription Overview +originalFilePath: subscriptions.md + +--- + +A subscription is the receiving side (or downstream) of the pglogical +replication setup. Just like on the upstream, the subscription first needs +local node to be created (see [#Nodes](nodes)). + +## Subscription information + +## pglogical.stat_subscription + +Apply statistics for each subscription. Only contains data if the tracking +is enabled. + +## `pglogical.stat_subscription` Columns + +| Column | Type | Description | +| -------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------- | +| sub_name | name | Name of the subscription | +| subid | oid | Oid of the subscription | +| nconnect | bigint | Number of times this subscription has connected upstream | +| ncommit | bigint | Number of commits this subscription did | +| nabort | bigint | Number of aborts writer did for this subscription | +| nerror | bigint | Number of errors writer has hit for this subscription | +| nskippedtx | bigint | Number of transactions skipped by writer for this subscription (currently normally 0 for pgl subscription) | +| ninsert | bigint | Number of inserts this subscription did | +| nupdate | bigint | Number of updates this subscription did | +| ndelete | bigint | Number of deletes this subscription did | +| ntruncate | bigint | Number of truncates this subscription did | +| nddl | bigint | Number of DDL operations this subscription has executed | +| ndeadlocks | bigint | Number of errors that were caused by deadlocks | +| nretries | bigint | Number of retries the writer did (without going for full restart/reconnect) | +| shared_blks_hit | bigint | Total number of shared block cache hits by the subscription | +| shared_blks_read | bigint | Total number of shared blocks read by the subscription | +| shared_blks_dirtied | bigint | Total number of shared blocks dirtied by the subscription | +| shared_blks_written | bigint | Total number of shared blocks written by the subscription | +| blk_read_time | double precision | Total time the subscription spent reading blocks, in milliseconds (if `track_io_timing` is enabled, otherwise zero) | +| blk_write_time | double precision | Total time the subscription spent writing blocks, in milliseconds (if `track_io_timing` is enabled, otherwise zero) | +| connect_time | timestamp with time zone | Time when the current upstream connection was established, NULL if not connected | +| last_disconnect_time | timestamp with time zone | Time when the last upstream connection was dropped | +| start_lsn | pg_lsn | LSN from which this subscription requested to start replication from the upstream | +| retries_at_same_lsn | bigint | Number of attempts the subscription was restarted from the same LSN value | +| curr_ncommit | bigint | Number of commits this subscription did after the current connection was established | + +## `pglogical.stat_relation` + +Apply statistics for each relation. Only contains data if the tracking +is enabled and something was replicated for a given relation. + +## `pglogical.stat_relation` Columns + +| Column | Type | Description | +| ------------------- | ---------------- | -------------------------------------------------------------------------------------------------------------------- | +| nspname | name | Name of the relation's schema | +| relname | name | Name of the relation | +| relid | oid | OID of the relation | +| total_time | double precision | Total time spent processing replication for the relation | +| ninsert | bigint | Number of inserts replicated for the relation | +| nupdate | bigint | Number of updates replicated for the relation | +| ndelete | bigint | Number of deletes replicated for the relation | +| ntruncate | bigint | Number of truncates replicated for the relation | +| shared_blks_hit | bigint | Total number of shared block cache hits for the relation | +| shared_blks_read | bigint | Total number of shared blocks read for the relation | +| shared_blks_dirtied | bigint | Total number of shared blocks dirtied for the relation | +| shared_blks_written | bigint | Total number of shared blocks written for the relation | +| blk_read_time | double precision | Total time spent reading blocks for the relation, in milliseconds (if `track_io_timing` is enabled, otherwise zero) | +| blk_write_time | double precision | Total time spent writing blocks for the relation, in milliseconds (if `track_io_timing` is enabled, otherwise zero) | +| lock_acquire_time | double precision | Total time spent acquiring locks on the relation (if `pglogical.track_apply_lock_timing` is enabled, otherwise zero) | + +## `pglogical.replication_status` + +Replication status view for each subscription. We consider replication +to be blocked when the subscription has restarted from the same LSN at +least twice and not a single transaction is yet applied after the +current upstream connection was established. If the very first +transaction after restart is very big and still being applied, the +`replication_blocked` result maybe wrong. + +## `pglogical.replication_status` Columns + +| Column | Type | Description +|-----|---------------+--------------------------+-------------------------- +| sub_name | name | Name of the subscription +| connected | boolean | Is the subscription connected to the upstream? +| replication_blocked | boolean | Is the replication currently blocked? +| connect_time | timestamp with time zone | Time when the current connection was established +| disconnect_time | timestamp with time zone | Time when the last connection was dropped +| uptime | interval | Duration since the current connection is active + +## pglogical.local_sync_status + +An updated view of the synchronization locally. Columns include subscription ID, sync status and kind. + +## pglogical.show_workers + +A function to bring the user information of the worker PID, role and subscription ID. + +## SQL interfaces + +### pglogical.create_subscription + +Creates a subscription from the current node to the provider node. Command +does not block, just initiates the action. + +#### Synopsis + +```postgresql +pglogical.create_subscription ( + subscription_name name, + provider_dsn text, + replication_sets text[], + synchronize_structure boolean, + synchronize_data boolean, + create_slot boolean, + slot_name text, + forward_origins text[], + strip_origins boolean, + num_writers int, + apply_delay interval, + writer name, + writer_options text[] +) +``` + +The `subscription_name` is used as `application_name` by the replication +connection. This means that it's visible in the `pg_stat_replication` +monitoring view. It can also be used in `synchronous_standby_names` when +pglogical is used as part of the synchronous replication setup. + +Subscription setup is asynchronous. `pglogical.create_subscription` returns +after creating the replication slot (unless `create_slot` is false) but +before the subscription is synchronized and streaming. Use +`pglogical.wait_for_subscription_sync_complete` to wait until the subscription +is up and has completed any requested schema and/or data sync. + +`synchronize_structure` internally uses `pg_dump` and `pg_restore` to copy schema +definitions. If more than one upstream is being subscribed to, only use +`synchronize_data` on the first one, because it cannot de-duplicate schema +definitions. + +`synchronize_data` internally uses `COPY` to unload and load the data +from the provider. + +If both `synchronize_structure` and `synchronize_data` are used, take care to +create table definitions, then copy data, and only create indexes etc. at the end. + +**Note**: An alternative to `pglogical.create_subscription` is the +`pglogical_create_subscriber` tool, which takes a `pg_basebackup` or uses a +pre-existing streaming replica of the provider node and converts it into a new +logical replica. It's often much faster where network bandwidth is sufficient, +but cannot filter the initial dump to exclude some databases/tables/etc. + +**Note**: `pglogical.create_subscription` will appear to hang (it will wait +indefinitely without returning) if the database pointed to by `provider_dsn` is +on the same PostgreSQL instance as the subscriber and `create_slot` is true. +This happens because the replication slot creation command on the provider +waits for all transactions that were in-progress at the time it started to +commit, but the transaction running `pglogical.create_subscription` cannot +commit until after the create replication slot command returns, so the two +deadlock. The PostgreSQL deadlock detector does not identify this condition as +it is not a deadlock on heavyweight locks, and is not visible in the `pg_locks` +view. To make this fairly uncommon use case work, manually create a logical +replication slot for the pglogical subscription on the provider using the +`'pglogical_output'` output plugin, e.g. + +``` +SELECT pg_catalog.create_logical_replication_slot( + pglogical.gen_slot_name( + 'SUBSCRIBER_DBNAME', + 'PROVIDER_NODE_NAME', + 'SUBSCRIPTION_NAME' + ), 'pglogical_output'); +``` + +then specify `create_slot := false` to `pglogical.create_subscription()`. You may +alternately choose your own replication slot name instead of using +`gen_slot_name` then pass it as the `slot_name` parameter to +`create_subscription`. + +#### Parameters + +- `subscription_name` - name of the subscription; must be unique +- `provider_dsn` - connection string to a provider +- `replication_sets` - array of replication sets to subscribe to, these must + already exist, default is "{default,ddl_sql}" +- `synchronize_structure` - specifies if to synchronize structure from + provider to the subscriber; default false +- `synchronize_data` - specifies if to synchronize data from provider to + the subscriber; default true +- `create_slot` - set to false to suppress automatic creation of a logical + replication slot on the provider in order to use a pre-created one; default true +- `slot_name` - override the autogenerated replication slot name pglogical + generates in order to supply your own; default is same as that generated by + `pglogical.pglogical_gen_slot_name()` +- `forward_origins` - array of replication origin names to forward. Currently the only + supported values are: an empty array meaning don't forward any changes + that didn't originate on provider node (this is useful for two-way + replication between the nodes); or "{all}" which means replicate all + changes no matter what is their origin. The default is "{all}" +- `apply_delay` - how much to delay replication; default is 0 seconds. + Mainly used for application testing, but also useful for delayed + standbys. +- `forward_origins` - array of origin names to forward; currently only + supported values are empty array meaning don't forward any changes + that didn't originate on provider node (this is useful for two-way + replication between the nodes), or "{all}" which means replicate all + changes no matter what is their origin; default is "{all}" +- `apply_delay` - how much to delay replication; default is 0 seconds +- `strip_origins` - determines whether to remove origin names from + forwarded data, making it look like the data originate from local node, + and allowing to forward the data to a subscription in the same instance + (default is "false" which keeps origin info). The negative effect is + it makes it impossible to redirect the subscription to the first node. +- `num_writers` - number of parallel writers for this subscription, -1 + means the subscription will use the default as specified by the GUC + pglogical.writers_per_subscription. Valid values are either -1 or a + positive integer. +- `writer` - which writer to use for writing the data from the replication + stream. Available writers currently are `local`, `HeapWriter` and + `SPIWriter`; the local is an alias that automatically selects either + `HeapWriter` or `SPIWriter` based on the version of PostgreSQL being used. +- `writer_options` - writer-specific options as an array of keys and values + +### `pglogical_create_subscriber` + +`pglogical_create_subscriber` isn't a SQL function, it's a standalone command +that provides an alternative way to create a subscriber. By default it will +take a `pg_basebackup` of the provider node and convert that into a `pglogical` +subscriber. + +This can be a lot faster than `pglogical.create_subscription` where network and +disk bandwidth is sufficient. However, it cannot filter out individual tables +or table subsets, and it copies all databases whether or not they are intended +for use with pglogical. It does not respect replication sets for the initial +data copy. Unlike `pglogical.create_subscription`, it copies indexes rather +than rebuilding them on the subscriber side. + +It may be necessary to specify a customized `postgresql.conf` and/or `pg_hba.conf` +for the copied node. In particular, you *must* copy the provider's `postgresql.conf` +and edit it to change the `port` if you plan on creating a subscriber on the same +host, where the port number would otherwise conflict. + +`pglogical_create_subscriber` may also be used to convert an existing, running +streaming replica of the provider into a subscriber. This lets the user clone +the provider using alternative methods like `pg_start_backup()`, `rsync`, and +`pg_stop_backup()`, or from a SAN snapshot. This conversion is done +automatically when the target data directory is non-empty and instead contains +a suitable PostgreSQL streaming replica. + +#### Synopsis + +```shell +pglogical_create_subscriber [OPTION]... +``` + +#### Options + +##### General Options + +- `-D, --pgdata=DIRECTORY` - data directory to be used for new node; + can be either empty/non-existing directory, + or directory populated using + pg_basebackup -X stream command +- `--databases` - optional list of databases to replicate +- `-n, --subscriber-name=NAME` - name of the newly created subscriber +- `--subscriber-dsn=CONNSTR` - connection string to the newly created subscriber +- `--provider-dsn=CONNSTR` - connection string to the provider +- `--replication-sets=SETS` - comma separated list of replication set names +- `--apply-delay=DELAY` - apply delay in seconds (by default 0) +- `--drop-slot-if-exists` - drop replication slot of conflicting name +- `-s, --stop` - stop the server once the initialization is done +- `-v` - increase logging verbosity +- `--extra-basebackup-args` - additional arguments to pass to pg_basebackup. + Safe options: `-T, -c, --xlogdir/--waldir` + +##### Configuration Files Override + +- `--hba-conf` - path to the new pg_hba.conf +- `--postgresql-conf` - path to the new postgresql.conf + +**WARNING: pglogical will always overwrite the `recovery.conf`, this behavior +will be fixed in the next release.** + +### pglogical.drop_subscription + +Disconnects the subscription and removes it from the catalog. + +#### Synopsis + +```postgresql +pglogical.drop_subscription ( + subscription_name name, + ifexists bool +) +``` + +#### Parameters + +- `subscription_name` - name of the existing subscription +- `ifexists` - if true, error is not thrown when subscription does not exist; + default is false + +### pglogical.alter_subscription_disable + + Disables a subscription and disconnects it from the provider. + +#### Synopsis + +```postgresql +pglogical.alter_subscription_disable ( + subscription_name name, + immediate bool +) +``` + +#### Parameters + +- `subscription_name` - name of the existing subscription +- `immediate` - if true, the subscription is stopped immediately, otherwise + it will be only stopped at the end of the current transaction; default is false + +### pglogical.alter_subscription_enable + +Enables disabled subscription. + +```postgresql +pglogical.alter_subscription_enable(subscription_name name, immediate bool) +``` + +#### Parameters + +- `subscription_name` - name of the existing subscription +- `immediate` - if true, the subscription is started immediately, otherwise + it will be only started at the end of current transaction; default is false + +### pglogical.alter_subscription_num_writers + +Changes the number of writers for a subscription. + +```postgresql +pglogical.alter_subscription_num_writers(subscription_name name, num_writers int, immediate bool) +``` + +#### Parameters + +- `subscription_name` - name of the existing subscription +- `num_writers` - number of writers for this subscription, -1 means the + subscription will use value set by pglogical.writers_per_subscription GUC +- `immediate` - if true, the subscription is started immediately, otherwise + it will be only started at the end of current transaction, default is false + +### pglogical.alter_subscription_interface + +Switch the subscription to use a different interface to connect to the provider node. +This is how you change the address, port etc that a subscription uses when connecting +to a provider. + +See [`pglogical.alter_node_create_interface()`](nodes#pglogical_alter_node_add_interface) +for usage. + +#### Synopsis + +```postgresql +pglogical.alter_subscription_interface ( + subscription_name name, + interface_name name +) +``` + +#### Parameters + +- `subscription_name` - name of an existing subscription +- `interface_name` - name of an existing interface of the current provider + node + +### pglogical.alter_subscription_synchronize + +All unsynchronized tables in all sets are synchronized in a single operation. + +#### Synopsis + +```postgresql +pglogical.alter_subscription_synchronize ( + subscription_name name, + truncate bool +) +``` + +Tables are copied and synchronized one by one. Command does not block, just +initiates the action. + +Use `pglogical.wait_for_subscription_sync_complete('sub_name')` to wait for +the resynchronization to complete. + +#### Parameters + +- `subscription_name` - name of the existing subscription +- `truncate` - if true, tables will be truncated before copy; default false + +### pglogical.alter_subscription_resynchronize_table + +Asynchronously resynchronize one existing table. + +**WARNING: This function will truncate the table first.** The table will be +visibly empty to transactions between when the resync is scheduled and +when it completes. + +Use `pglogical.wait_for_subscription_sync_complete('sub_name')` to wait for all +pending resynchronizations to complete, or +`pglogical.wait_for_table_sync_complete` for just the named table. + +#### Synopsis + +``` +pglogical.alter_subscription_resynchronize_table ( + subscription_name name, + relation regclass +) +``` + +#### Parameters + +- `subscription_name` - name of the existing subscription +- `relation` - name of existing table, optionally qualified + +### pglogical.show_subscription_status + +Shows status and basic information about a subscription. + +``` +pglogical.show_subscription_status (subscription_name name) +``` + +#### Parameters + +- `subscription_name` - optional name of the existing subscription, when no + name was provided, the function will show status for all subscriptions on + local node + +### pglogical.show_subscription_table + +Shows the synchronization status of a table. + +#### Synopsis + +```postgresql +pglogical.show_subscription_table ( + subscription_name name, + relation regclass +) +``` + +#### Parameters + +- `subscription_name` - name of the existing subscription +- `relation` - name of existing table, optionally qualified + +### pglogical.show_subscription_clock_drift + +Shows clock drift between provider and subscriber. + +On the subscriber at apply time, we track the commit timestamp +received from the provider and the current local timestamp. When +the above function is invoked, we generate a diff (interval) of +these values. A negative value will indicate clock drift. + +``` +pglogical.show_subscription_clock_drift (subscription_name name) +``` + +#### Parameters + +- `subscription_name` - optional name of the existing subscription; when no + name is provided, the function will show clock drift information for all + subscriptions on the local node + +### pglogical.alter_subscription_add_replication_set + +Adds one replication set into a subscriber. Does not synchronize, only +activates consumption of events. + +#### Synopsis + +```postgresql +pglogical.alter_subscription_add_replication_set ( + subscription_name name, + replication_set name +) +``` + +#### Parameters + +- `subscription_name` - name of the existing subscription +- `replication_set` - name of replication set to add + +### pglogical.alter_subscription_remove_replication_set + +Removes one replication set from a subscriber. + +#### Synopsis + +```postgresql + pglogical.alter_subscription_remove_replication_set ( + subscription_name name, + replication_set name +) +``` + +#### Parameters + +- `subscription_name` - name of the existing subscription +- `replication_set` - name of replication set to remove + +### pglogical.wait_for_subscription_sync_complete + +Wait on the subscriber side until the named subscription is fully synchronized. +The function waits for both the initial schema and data syncs (if any) and any +currently outstanding individual table resyncs. + +To ensure that this function sees and waits for pending resynchronizations +triggered by provider-side replication set changes, make sure to +`pglogical.wait_slot_confirm_lsn(NULL, NULL)` on the provider after any +replication set changes. + +#### Synopsis + +```postgresql + pglogical.wait_for_subscription_sync_complete( + subscription_name name + ) +``` + +#### Parameters + +- `subscription_name` - name of the existing subscription to wait for + +### pglogical.wait_for_table_sync_complete + +Same as `pglogical.wait_for_subscription_sync_complete`, except that +it waits for the subscription to be synced and for exactly one named table, +which must exist on the downstream. You can use this variant to wait for +a specific table resync to complete while ignoring other pending resyncs. + +#### Synopsis + +```postgresql + pglogical.wait_for_table_sync_complete( + subscription_name name, + relid regclass + ) +``` + +#### Parameters + +- `subscription_name` - name of the existing subscription to wait for +- `relid` - possibly schema-qualified relation name (cast to regclass if needed) + for the relation to wait for sync completion of. + +### `pglogical.wait_slot_confirm_lsn(name, pg_lsn)` + +On a pglogical provider, wait for the specified replication slot(s) to pass +all the requested WAL position. + +Note that to wait for a subscriber this function should be called on the +*provider*, not the subscriber. + +Waits for one specified slot if named explicitly, or all logical slots that use +the pglogical output plugin if the slot name is null. + +If no position is supplied the current WAL write position on the Pg instance +this function is called on is used. + +No timeout is offered, use a `statement_timeout`. + +This function can only wait for physical slots and for logical slots with +output plugins other than 'pglogical' if specified as a single named slot +argument. + +For physical slots the LSN waited for is the `restart_lsn`, because +physical slots don't have the same two-phase advance as logical slots +and they have a NULL `confirmed_flush_lsn`. Because physical standbys +guarantee durability (flush) before visibility (replay), if you want +to ensure transactions are actually visible you should call +`pglogical.standby_wait_replay_upstream_lsn` on the standby instead. + +Waiting with default (null) position can cause delays on idle systems +because the slot position may not advance until the next standby status +update if there are no further txns to replay. If you can ensure there will +be are no concurrent transactions you can instead capture +`pg_current_wal_insert_lsn()` after the writes you are interested in but +before you commit the transaction, then wait for that. Ideally commit would +report the commit lsn, and you could wait for that, but Pg doesn't do that +yet. Doing this may lead to waits ending prematurely if there are concurrent +txns, so only do it on test harness setups that do only one thing at a time. + +#### Synopsis + +```postgresql + SELECT pglogical.wait_slot_confirm_lsn( + slotname name, + target_lsn pg_lsn + ); +``` + +Typically it's sufficient to use: + +```postgresql +SELECT pglogical.wait_slot_confirm_lsn(NULL, NULL); +``` + +to wait until all pglogical (and bdr3) subscriber replication slots' +`confirmed_flush_lsn`s have confirmed a successful flush to disk of all WAL +that was written on the provider as of the start of the +`pglogical.wait_slot_confirm_lsn` call. + +#### Parameters + +- `slotname` - name of the replication slot to wait for, or NULL for all + pglogical slots +- `target_lsn` - xlog position to wait for slots to confirm, or NULL for current + xlog insert location. + +### `pglogical.standby_wait_replay_upstream_lsn(pg_lsn)` + +On a physical streaming replica (hot standby), wait for the +standby to replay WAL from the upstream up to or past the +specified lsn before returning. + +Does not support an explicit timeout. Use a `statement_timeout`. + +ERRORs if called on a non-standby, or when a standby is promoted +while waiting. + +Use this where you need to guarantee that changes are replayed and +visible on a replica, not just safe on disk. The sender-side function +`pglogical.wait_slot_confirm_lsn()` only ensures durability, not +visibility, when applied to physical replicas, because there's +no guarantee the flushed WAL is replayed and commits become visible +before the flush position is reported to the upstream. + +This is effectively a convenience function for a loop over +`pg_last_wal_replay_lsn()` for use in testing. + +### pglogical.alter_subscription_skip_changes_upto + +Because logical replication can replicate across versions, doesn't replicate +global changes like roles, and can replicate selectively, sometimes the logical +replication apply process can encounter an error and stop applying changes. + +Wherever possible such problems should be fixed by making changes to the +subscriber side. `CREATE`ing any missing table that's blocking replication, +`CREATE` a needed role, `GRANT` a necessary permission, etc. But occasionally a +problem can't be fixed that way and it may be necessary to skip entirely over a +transaction. + +There's no support in pglogical for skipping over only parts of a transaction, +i.e. subscriber-side filtering. Changes are skipped as entire transactions, +all or nothing. To decide where to skip to, use log output to find the commit +LSN, per the example below, or peek the change stream with the logical decoding +functions. + +Unless a transaction only made one change, it's often necessary to manually +apply the transaction's effects on the downstream side, so it's important to +save the problem transaction whenever possible. See the example below. + +It's possible to skip over changes without +`pglogical.alter_subscription_skip_changes_upto` by using +`pg_catalog.pg_logical_slot_get_binary_changes` to skip to the LSN of interest, +so this is really a convenience function. It does do a faster skip; however, it +may bypass some kinds of errors in logical decoding. + +This function only works on disabled subscriptions. + +The usual sequence of steps is: + +- identify the problem subscription and LSN of the problem commit +- disable the subscription +- save a copy of the transaction(s) using `pg_catalog.pg_logical_slot_peek_changes` *on the provider* (if possible) +- `pglogical.alter_subscription_skip_changes_upto` on the subscriber +- apply repaired or equivalent changes on the subscriber manually if necessary +- re-enable the subscription + +**WARNING**: It's easy to make problems worse when using this function. Don't +do anything unless you're really, really sure it's the only option. + +#### Synopsis + +```postgresql + pglogical.alter_subscription_skip_changes_upto( + subname text, + skip_upto_and_including pg_lsn + ); +``` + +#### Example + +Apply of a transaction is failing with an ERROR, and you've determined that +lower-impact fixes such as changes to the subscriber side will not resolve this +issue. You determine that you must skip the transaction. + +In the error logs, find the commit record LSN to skip to, as in this +artificial example: + +``` + ERROR: 55000: pglogical target relation "public.break_me" does not exist + CONTEXT: during apply of INSERT in commit before 0/1B28848, xid 670 committed + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + this LSN + at 2018-07-03 14:28:48.58659+08 (action #2) from node replorigin 1 +``` + +and if needed use the `pglogical.subscriptions` view to map the origin back to +a subscription name, e.g.: + +```postgresql + SELECT subscription_name, slot_name + FROM pglogical.subscriptions s + WHERE replication_origin_id = 1 +``` + +Next, disable the subscription so the apply worker doesn't try to connect to the replication slot: + +```postgresql + SELECT pglogical.alter_subscription_disable('the_subscription'); +``` + +Note that you cannot skip only parts of the transaction, it's all or nothing. So +it's strongly recommended that you save a record of it by `COPY`ing it out on the +provider side first, using the subscription's slot name (as obtained above). + +```postgresql + \copy (SELECT * FROM pg_catalog.pg_logical_slot_peek_changes('the_slot_name', + 'the_target_lsn', NULL, 'min_proto_version', '1', 'max_proto_version', '1', + 'startup_params_format', '1', 'proto_format', 'json') + TO 'transaction_to_drop.csv' WITH (FORMAT csv); +``` + +*(Note that the example is broken into multiple lines for readability, +but it should be issued in a single line because `\copy` does not +support multi-line commands)* + +Now you can skip the change by changing "peek" to "get" above, but +`pglogical.skip_changes_upto` does a faster skip that avoids decoding +and outputting all the data: + +```postgresql + SELECT pglogical.alter_subscription_skip_changes_upto('subscription_name', + 'the_target_lsn'); +``` + +If necessary or desired, apply the same changes (or repaired versions of them) +manually to the subscriber, using the dumped transaction contents as a guide. + +Finally, re-enable the subscription: + +```postgresql + SELECT pglogical.alter_subscription_enable('the_subscription'); +``` + +### pglogical.alter_subscription_writer_options + +Change the writer options first addressed when `writer_name` and `writer_options` +are clarified with pglogical.create_subscription. + +#### Synopsis + +```postgresql + pglogical.alter_subscription_writer_options( + subscription_name name, + writer_name name, + writer_options text[] = '{}' + + ); +``` + +#### Example + +Find the subscription you want to alter and use that as the `subscription_name` and +possibly the `writer_name` if chosen (shown first). Then the DML with the `writer_options` +text array. + +```postgresql +SELECT pglogical.alter_subscription_writer_options(sub_name, sub_name, '{}') FROM pglogical.subscription; +``` + +Grant all writer options to `writer_name` super; array has to be an even number of elements. + +```postgresql +SELECT pglogical.alter_subscription_writer_options(sub_name, 'super', '{UPDATE,INSERT,DELETE,''}') FROM pglogical.subscription; +``` + +### pglogical.alter_subscription_set_conflict_resolver + +Change the conflict resolver of given conflict type for the given subscription. + +#### Synopsis + +``` +pglogical.alter_subscription_set_conflict_resolver( + sub_name text, + conflict_type text, + conflict_resolver text + ) +``` + +#### Parameters + +- `sub_name` - name of subscription to change +- `conflict_type` - type of conflict to configure (see bellow) +- `conflict_resolver` - which resolver to use for the given conflict type (see bellow) + +Conflict type can be one of: + +- `insert_exists` - the row being inserted exists locally +- `update_differing` - the origin has updated a different version of row that + the local has +- `update_missing` - the row being updated does not exist locally +- `delete_missing` - the row being deleted does not exist locally +- `update_origin_change` - the row being updated was updated on a different + origin +- `target_table_missing` - the table corresponding to the change does not + exist locally +- `target_column_missing` - the column being updated or inserted to does not + exist locally +- `source_column_missing` - a column that exists locally is not available in + the updated or inserted row + replicated +- `update_recently_deleted` - the row being updated was deleted locally recently +- `delete_recently_updated` - the row being deleted was updated locally + recently +- `update_pkey_exists` - the updated primary key exists locally +- `apply_error` - an error occured while applying the change locally +- `apply_error_trigger` - an error occured while firing a trigger locally + after applying the change +- `apply_error_ddl` - an error occured during applying a DDL that was + replicated +- `apply_error_dml` - an error occured while applying a DML that was + + Note that `apply_error`, `apply_error_trigger`, `apply_error_ddl` and + `apply_error_dml` are never raised right now. They may be used in future. + +Conflict resolver can be one of: + +- `error` - the replication will stop on error if conflict is detected; manual + action is then required for resolution. +- `skip` - keep the local version of the data and ignore the + conflicting change that is coming from the remote node. This is same as + `keep_local` which is now deprecated. +- `update` - always apply the upstream change that's conflicting with local data. + This is same as `apply_remote`, which is now deprecated. +- `update_if_newer` - the version of data with the newest commit timestamp + will be kept (this can be either the local or the remote version). This is same + as `last_update_wins` which is now deprecated. +- `update_if_older` - the version of the data with the oldest timestamp will + be kept (this can be either the local or the remote version). This is same + as `first_update_wins` which is now deprecated. +- `insert_or_skip` - if the row being updated is missing and the downstream + can verify that the updated row was none of the ones that exist the new row + will be inserted. Otherwise the change will be skipped. +- `insert_or_error` - if the row being updated is missing and the downstream + can verify that the updated row was none of the ones that exist the new row + will be inserted. Otherwise the replication will stop on error. +- `ignore` - if the updated or inserted column is missing, it will be ignored + while applying the upstream change +- `ignore_or_error` - if the updated or inserted column is missing, it will be ignored + if it the new value is NULL. Otherwise replication will stop on error +- `use_default_value` - if a column is present locally but is not available on + the source, a default value will be used for that column. + +The available settings and defaults depend on the version of PostgreSQL and +other settings. + +The `skip`, `update_if_newer` and `first_update_wins` settings require the +`track_commit_timestamp` PostgreSQL setting to be enabled. Those can not be +used with PostgreSQL 9.4 as `track_commit_timestamp` is not available in there. + +Some conflict resolvers can not be used with some conflict types e.g. resolver +`update_if_newer` can not be used with conflict type `target_table_missing`. +`error` is the only resolved available to handle conflict types `apply_error`, +`apply_error_trigger`, `apply_error_ddl`, or `apply_error_dml`. The function +throws an error when an incompatible resolver is used. + +#### Example + +Find the subscription you want to change the conflict resolver for and use that as the `sub_name`. + +```postgresql +SELECT pglogical.alter_subscription_set_conflict_resolver(`sub_name`, 'insert_exists', 'update_if_newer') +``` + +Changes the conflict resolver of conflict type `insert_exists` for subscription +`sub_name` to `update_if_newer`. If the row specified by INSERT change on +subscription `sub_name` already exists locally, out of the two rows, the one +with the newest commit will be kept. + +#### Listing Conflict Resolution Configurations + +The catalog `pglogical.sub_cfl_res` show non-default conflict resolution +configuration settings for each subscription. + +### pglogical.alter_subscription_add_log + +Add conflict logging configuration for a subscription. + +This can primarily be used to log conflicts into the `pglogical.apply_log` +table. + +#### Synopsis + +```postgresql +pglogical.alter_subscription_add_log( + sub_name text, + log_name text, + log_to_file bool DEFAULT true, + log_to_table regclass DEFAULT NULL, + conflict_type text[] DEFAULT NULL, + conflict_resolution text[] DEFAULT NULL +) +``` + +#### Listing Conflict Logging Configurations + +The catalog `pglogical.sub_log` shows all the logging configurations. +It lists the name of the logging configuration, where it logs and which +conflicts type and resolution it logs. + +#### Parameters + +- `sub_name` - name of the subscription that is being changed +- `log_name` - name of the logging configuration +- `log_to_file` - whether to log to the server log file +- `log_to_table` - whether to log to a table, and which table should be the + target; NULL (the default) means do not log to a table +- `conflict_type` - which conflict types to log; NULL (the default) means all +- `conflict_resolution` - which conflict resolutions to log; NULL + (the default) means all + +### pglogical.alter_subscription_remove_log + +Remove existing conflict logging configuration for a subscription. + +#### Synopsis + +```postgresql +pglogical.alter_subscription_remove_log( + sub_name text, + log_name text +) +``` + +#### Parameters + +- `node_name` - name of the subscription that is being changed +- `log_name` - name of the logging configuration to be removed diff --git a/product_docs/docs/pglogical/3.7/subscriptions/pglogical-writer.mdx b/product_docs/docs/pglogical/3.7/subscriptions/pglogical-writer.mdx new file mode 100644 index 00000000000..e74d9caac3c --- /dev/null +++ b/product_docs/docs/pglogical/3.7/subscriptions/pglogical-writer.mdx @@ -0,0 +1,186 @@ +--- +redirects: + - ../pglogical-writer +navTitle: pglogical Writer +title: pglogical writer +originalFilePath: pglogical-writer.md + +--- + +The pglogical writer (or HeapWriter) is the standard way of writing into a local +PostgreSQL instance when using pglogical subscription. This is the default +writer used when no writer is specified in `pglogical.create_subscription()`. + +The pglogical writer is using low-level APIs to write the data into local +tables and sequences. It supports conflict detection and resolution, +has full support for `REPLICA IDENTITY`, invokes constraints with the exception +of foreign keys (see [Foreign Keys](#foreign-keys) for details) and row triggers +marked as `REPLICA` (see [Triggers](#triggers)). + +Changes are applied as the table owning-user, thus security concerns are similar +to the use of triggers by table owners. + +## Conflict handling + +In case the node is subscribed to multiple providers, or when local writes +happen on a subscriber, conflicts can arise for the incoming changes. These +are automatically detected and can be acted on depending on the configuration. + +The configuration of the conflicts resolver is done using +pglogical.alter_subscription_set_conflict_resolver(). + +### Row versioning + +To ease reasoning about different versions of a row, it can be helpful for it +to carry a row version. PGLogical provides the helper trigger +`pglogical.inc_row_version` to simplify this task. It requires a user +provided integer column of any bitwidth (usually, `SMALLINT` is enough) and +needs to be added to a table as follows (assuming a table `my_table` with +an integer column `row_version`): + +``` +CREATE TRIGGER my_row_version_trigger + BEFORE UPDATE ON my_table + FOR EACH ROW + EXECUTE PROCEDURE pglogical.inc_row_version('row_version'); +``` + +This approach resembles Lamport timestamps and - in combination with +`REPLICA IDENTITY FULL` and `check_full_tuple` (see below) - fully prevents +the ABA problem for conflict detection. + +## Configuration options + +Some aspects of pglogical can be configured using configuration options that +can be either set in `postgresql.conf` or via `ALTER SYSTEM SET`. + +### pglogical.conflict_log_level + +Sets the log level for reporting detected conflicts. + +Main use for this setting is to suppress logging of conflicts. + +Possible values are the same as for PostgreSQL `log_min_messages` parameter. + +The default is `LOG`. + +### pglogical.conflict_ignore_redundant_updates + +In case the subscriber retrieves an INSERT or UPDATE to a locally +pre-existing and equivalent tuple, it is simply ignored without invoking any +conflict handler or logging on the subscriber side, if this option is turned +on. + +To be used in combination with `REPLICA IDENTITY FULL`. + +The default is `false`. + +### pglogical.conflict_check_full_tuple + +This option controls the detection of UPDATE-UPDATE conflicts. By default, +the origin of the existing tuple is compared to the expected origin - +every mismatch is considered a conflict and initiates conflict handling. +This is a low-overhead conflict detection mechanism and is therefore the +default. However, it can lead to false positives and invoke conflict +handlers inadvertently. + +With this option turned on, the expected tuple, as it was before the update on +the provider, is compared to the existing tuple on the subscriber. This +allows for a better conflict detection mechanism and (in combination with +a row version column) can mitigate all false positives. + +Due to the requirement to know the full old tuple, this option only ever +affects relations that are set to `REPLICA IDENTITY FULL`. + +The default is `false`. + +### pglogical.batch_inserts + +This tells pglogical writer to use the batch insert mechanism if possible. The +Batch mechanism uses PostgreSQL internal batch insert mode which is also used +by `COPY` command. + +The batch inserts will improve replication performance of transactions that +perform many inserts into one table. pglogical will switch to batch mode when +the transaction performed than 5 INSERTs, or 5 rows within a COPY. + +It's only possible to switch to batch mode when there are no +`INSTEAD OF INSERT` and `BEFORE INSERT` triggers on the table and when +there are no defaults with volatile expressions for columns of the table. + +The default is `true`. + +### config.session_replication_role + +This tells pglogical writer what `session_replication_role` to use. This can be +useful mainly in case when it's desirable to enforce `FOREIGN KEY` constraints. + +The default is `replica` which ignores foreign keys when writing changes +to the database. + +**WARNING: Use with caution.** +This option changes trigger execution behavior as documented in +[PostgreSQL documentation](https://www.postgresql.org/docs/current/static/runtime-config-client.html#GUC-SESSION-REPLICATION-ROLE). +If set to `origin` or `local` this will fire **normal** triggers in the +database which can leadi to the trigger being executed both on the upstream and on +the downstream! + +## Restrictions + +There are some additional restrictions imposed by pglogical writer over the +standard set of [restrictions](restrictions). + +### Only one unique index/constraint/PK + +If more than one upstream is configured, or the downstream accepts local writes, +then only one `UNIQUE` index should be present on downstream replicated tables. +Conflict resolution can only use one index at a time, so conflicting rows may +`ERROR` if a row satisfies the `PRIMARY KEY` but violates a `UNIQUE` constraint +on the downstream side. This will stop replication until the downstream table +is modified to remove the violation. + +It's fine to have extra unique constraints on an upstream if the downstream only +gets writes from that upstream and nowhere else. The rule is that the downstream +constraints must *not be more restrictive* than those on the upstream(s). + +### Deferrable unique indexes + +Deferrable unique indexes are supported; however initially deferred unique +indexes might result in apply retries, as the conflicts might not be detected +on first try due to the deferred uniqueness check. + +Note that deferred `PRIMARY KEY` cannot be used as `REPLICA IDENTITY` - PostgreSQL +will throw an error if this is attempted. As a result a table withi a deferred +`PRIMARY KEY` does not have `REPLICA IDENTITY` unless another `REPLICA IDENTITY` +is explicitly set. Replicated tables without `REPLICA IDENTITY` cannot receive +`UPDATEs` or `DELETEs`. + +### Foreign Keys + +By default foreign key constraints are not enforced for the replication +process - what succeeds on the provider side gets applied to the subscriber +even if the `FOREIGN KEY` would be violated. + +This behavior can be changed using `config.session_replication_role` writer +option. + +### TRUNCATE + +Using `TRUNCATE ... CASCADE` will only apply the `CASCADE` option on the +provider side. + +(Properly handling this would probably require the addition of `ON TRUNCATE CASCADE` +support for foreign keys in PostgreSQL). + +`TRUNCATE ... RESTART IDENTITY` is not supported. The identity restart step is +not replicated to the replica. + +### Triggers + +Trigger behavior depends on the `config.session_replication_role` setting of +the writer. By default it's set to `replica`, which means that `ENABLE REPLICA` +and `ENABLE ALWAYS` triggers will be fired. When it's set to `origin` or +`local`, it will trigger normal triggers. + +Only row triggers are fired. Statement triggers are ignored as there are no +statements executed by the writer. Per-column UPDATE triggers are ignored. diff --git a/product_docs/docs/pglogical/3.7/troubleshooting.mdx b/product_docs/docs/pglogical/3.7/troubleshooting.mdx new file mode 100644 index 00000000000..ce9c0f71a16 --- /dev/null +++ b/product_docs/docs/pglogical/3.7/troubleshooting.mdx @@ -0,0 +1,216 @@ +--- +navTitle: Troubleshooting +title: Error handling in pglogical +originalFilePath: troubleshooting.md + +--- + +The main tool for troubleshooting is the PostgreSQL log file. + +On the upstream side, monitoring uses the views: + +``` +pg_catalog.pg_replication_slots +pg_catalog.pg_stat_replication +``` + +On the subscriber side there are numerous helper functions and views that may +be consulted to gain insight into pglogical's configuration and behaviour. + +Start with the configuration and status summary views: + +Configuration can be fetched from: + +``` +SELECT * FROM pglogical.subscriptions; +SELECT * FROM pglogical.TABLES; +SELECT * FROM pglogical.SEQUENCES; +SELECT * FROM pglogical.DDL_REPLICATION; +``` + +Observe worker activity with: + +``` +SELECT * FROM pglogical.workers; +SELECT * FROM pglogical.worker_error_summary; +SELECT * FROM pglogical.apply_log; +SELECT * FROM pglogical.apply_log_summary; +SELECT * FROM pglogical.worker_locks; +``` + +Statistics are reported by: + +``` +SELECT * FROM pglogical.stat_relation; +SELECT * FROM pglogical.stat_subscription; +``` + +Other views provide logs and details: + +``` +SELECT * FROM pglogical.local_sync_status; +SELECT * FROM pglogical.show_subscription_status(); +SELECT * FROM pglogical.sub_history; +SELECT * FROM pglogical.worker_error; +SELECT * FROM pglogical.show_workers(); +SELECT * FROM pglogical.worker_tasks; + +SELECT * FROM pg_catalog.pg_stat_activity; +SELECT * FROM pg_catalog.pg_locks; +SELECT * FROM pg_catalog.pg_replication_origin_status; +``` + +The relation `pglogical.worker_error_summary` is particularly important for +getting a quick overview of recent problems, though the logs should generally +be your main reference. + +## `pglogical.worker_error` and `pglogical.worker_error_summary` + +These relations show the last error reported by each kind of pglogical worker. +Only the most recent error is retained for each distinct worker task. Receiver +workers are tracked separately to their writer(s), as are any writer(s) used +for table (re)sync purposes. + +walsender workers cannot record errors in `pglogical.worker_error`. Their +errors are only available in the log files. + +`pglogical.worker_error_summary` is a convenience view over +`pglogical.worker_error` available in 3.7 and above. + +## `pglogical.worker_tasks` + +The `pglogical.worker_tasks` view shows pglogical's current worker launch rate +limiting state as well as some basic statistics on background worker launch +and registration activity. + +Unlike the other views listed here, it is not specific to the current database +and pglogical node; state for all pglogical nodes on the current PostgreSQL +instance is shown. Join on the current database to filter it. + +`pglogical.worker_tasks` does not track walsenders and output plugins. + +See the configuration option +[`pglogical.min_worker_backoff_delay`](configuration#pglogical.min_worker_backoff_delay) for +rate limit settings and overrides. + +## `pglogical.apply_log` and `pglogical.apply_log_summary` + +The `pglogical.apply_log_summary` view summarizes the record of apply worker +events kept in `pglogical.apply_log`. This records human-readable information +about conflicts and errors that arose during apply. + +## `pglogical.sub_log` + +The `pglogical.sub_log` table contains *conflict log filter definitions* that +are applied when recording entries in `pglogical.apply_log`, controlling +whether conflicts are recorded to a log table and/or postgres log, or silently +dropped. It's managed by `pglogical.alter_subscription_add_log(...)` and +`pglogical.alter_subscription_remove_log()`. + +If you aren't seeing expected conflict information when debugging an issue, check +to make sure you have not filtered it out. + +When pglogical workers encounter an error condition during operation they +report the error to the PostgreSQL log file, record the error to the +`pglogical.worker_error` table if possible, and exit. + +Unlike normal PostgreSQL user backends they do not attempt to recover from most +errors and resume normal operation. Instead the worker in question will be +relaunched soon and will resume operations at the last recoverable point. +In the case of apply workers and walsenders that generally means restarting the +last uncommitted transaction from the beginning. + +This is an intentional design choice to make error handling and recovery +simpler and more robust. + +For example, if an apply worker tries to apply an `UPDATE` and the new row +violates a secondary unique constraint on the target table, the apply worker +will report the unique violation error and exit. The error information will be +visible in `pglogical.worker_error_summary` (3.7+, `pglogical.worker_error` on 3.6). +The walsender worker on the peer end will exit automatically as well. The apply +worker will be relaunched by the manager worker for the database in a few +seconds and will retry the failed transaction from the beginning. If the +conflicting row has since been removed the transaction will apply normally and +replication will resume. If not, the worker will error again and the cycle will +repeat until the cause of the error is fixed. In this case the fix would +typically be for another subscription or a local application write to replicate +a change that clears the unhandled conflict condition or for the administrator +to intervene to change the conflicting row. + +## Diagnosing and fixing errors + +It's important to first check that your schema and deployment don't violate any +of the [restrictions](restrictions) imposed by pglogical. Also check the +additional writer-specific restrictions from the pglogical writer you are using, +most likely the [HeapWriter](pglogical-writer#Restrictions). + +### Common problems + +Some issues that arise when operating pglogical include: + +- Incorrect or changed provider address or hostname. Update the interface definition + for the subscription. + + Use `pglogical.alter_node_add_interface(...)` and + `pglogical.alter_subscription_interface(...)` to change the subscriber's + recorded address for the provider. + +- Incorrect `pg_hba.conf` on provider disallowing subscriber from connecting. + The subscriber must be able to connect in both replication and ordinary + non-replication mode. + + Correct the `pg_hba.conf` on the provider and `SELECT pg_reload_conf();` on + the provider. + +- Incompatible schema definitions on provider and subscriber caused by schema changes + being made without [DDL replication](ddl) enabled and without use of + `pglogical.replicate_ddl_command`. For example, missing columns on subscriber + that haven't been excluded by a column filter, differing data types for columns + between provider and subscriber, etc. + + (Some data type differences are actually permitted, but care must be taken + that the text representations are compatible. Do not use differing data types + for PostgreSQL built-in data types. See [restrictions](restrictions).) + +- Incorrectly defined `ENABLE REPLICA` or `ENABLE ALWAYS` triggers firing on + apply on the subscriber and causing errors. + +- Heap writers configured to fire normal triggers and foreign key validation + triggers (using writer option `config.session_replication_role`). Problems + arise when not all triggers have been checked to ensure they'll work + correctly with row-replication and without statement triggers being fired as + well. Or when FK violations or check constraint violations are created by + replication set configuration such as row and column filters or by referenced + tables not being replicated along with the referencing tables. + +- Inconsistent versions of PostgreSQL or extensions between provider and subscriber + where the version difference affects the behaviour or limits of a data type being + replicated. + + pglogical explicitly supports replicating between different versions of + PostgreSQL, so a version difference alone is not a problem. But the data + being replicated must be valid on the subscriber's PostgreSQL version. + + For example, apply errors may occur when replicating data from PostGIS 3.0 to + PostGIS 2.5 where not all the 3.0 data is understood by 2.5. Similarly, + replicating from a PostgreSQL configured without integer datetimes to one + with integer datetimes may result in errors if there are non-integer + datetimes with values outside the somewhat narrower range permitted by + integer datetimes support. + +### Multiple data source issues + +Additional classes of error tend to arise with any sort of multiple-data-source +configuration, i.e. multiple subscriptions to different providers for the same +tables and/or local writes to tables that are also part of a subscription. Some of +these affect BDR3 as well. + +These include: + +- Tables with multiple unique constraints may cause unique violation errors + during apply if the table receives writes from multiple sources. + +- Updating the PRIMARY KEY value for rows, or deleting a key then inserting the same + key again soon afterwards. This may cause unique violation errors during apply + if the table receives writes from more than one source, i.e. multiple providers + and/or local writes. diff --git a/scripts/source/pglogical.js b/scripts/source/pglogical.js new file mode 100644 index 00000000000..1cdef283e44 --- /dev/null +++ b/scripts/source/pglogical.js @@ -0,0 +1,221 @@ +// run: node scripts/source/pglogical.js" +// purpose: +// Import and convert the pglogical docs, rendering them in /product_docs/pglogical/ +// +const path = require("path"); +const fs = require("fs/promises"); +const { read, write } = require("to-vfile"); +const remarkParse = require("@mdx-js/mdx/node_modules/remark-parse"); +const mdx = require("remark-mdx"); +const unified = require("@mdx-js/mdx/node_modules/unified"); +const remarkFrontmatter = require("remark-frontmatter"); +const remarkStringify = require("remark-stringify"); +const admonitions = require("remark-admonitions"); +const yaml = require("js-yaml"); +const visit = require("unist-util-visit"); +const visitAncestors = require("unist-util-visit-parents"); +const mdast2string = require("mdast-util-to-string"); +const { exec, execSync } = require("child_process"); +const isAbsoluteUrl = require("is-absolute-url"); + +const fileToMetadata = {}; +const basePath = path.resolve("temp_pglogical3/docs/"); + +(async () => { + const processor = unified() + .use(remarkParse) + .use(remarkStringify, { emphasis: "*", bullet: "-", fences: true }) + .use(admonitions, { tag: "!!!", icons: "none", infima: true }) + .use(remarkFrontmatter) + .use(mdx) + .use(pglogicalTransformer); + + const processEntry = async (dirEntry, destPath, indexFilename) => + { + for (const [navTitle, dest] of Object.entries(dirEntry)) { + if (!dest) continue; + if (dest instanceof Array) + { + let subIndexFilename = null; + for (const subEntry of dest) { + for (const [subNav, subDest] of Object.entries(subEntry)) + { + if (subDest && !(subDest instanceof Array)) + { + if (!subIndexFilename) subIndexFilename = subDest; + fileToMetadata[subDest] = { + ...fileToMetadata[subDest], + redirects: ["../" + path.basename(subDest, ".md")] + }; + } + } + } + for (const subEntry of dest) { + await processEntry(subEntry, path.resolve(destPath, navTitle.toLowerCase()), subIndexFilename) + } + // write out index w/ navigation tree + fileToMetadata[subIndexFilename] = { + ...fileToMetadata[subIndexFilename], + navTitle + }; + await process(path.resolve(basePath, subIndexFilename), subIndexFilename, path.resolve(destPath, navTitle.toLowerCase(), "index.mdx")); + + // add subindex to parent + fileToMetadata[indexFilename] = {navigation: [], ...fileToMetadata[indexFilename]}; + fileToMetadata[indexFilename].navigation.push(navTitle.toLowerCase()); + continue; + } + const fileAbsolutePath = path.resolve(basePath, dest); + const filename = path.relative(basePath, fileAbsolutePath); + const destFilepath = path.resolve(destPath, filename.replace(/\//g, '_')+"x"); + + fileToMetadata[filename] = {...fileToMetadata[filename], navTitle}; + fileToMetadata[indexFilename] = {navigation: [], ...fileToMetadata[indexFilename]}; + fileToMetadata[indexFilename].navigation.push(path.basename(destFilepath, ".mdx")); + + if (filename === indexFilename) continue; + await process(fileAbsolutePath, filename, destFilepath); + } + }; + + const process = async (fileAbsolutePath, filename, destFilepath) => + { + let file = await read(fileAbsolutePath); + stripEmptyComments(file); + file = await processor.process(file); + file.path = destFilepath; + try + { + await fs.mkdir(path.dirname(file.path), {recursive: true}); + } catch {} + await write(file); + } + + const mdIndex = yaml.load(await fs.readFile(path.resolve(basePath, "pglogical.yml"), 'utf8')); + + const markdownToProcess = mdIndex.nav; + const version = mdIndex.site_name.match(/pglogical (\d+\.\d+)/)[1]; + const destPath = path.resolve("product_docs", "docs", "pglogical", version); + const indexFilename = "index.md"; + + fileToMetadata[indexFilename] = {}; + + for (const dirEntry of markdownToProcess) { + if (!dirEntry) continue; + await processEntry(dirEntry, destPath, indexFilename); + } + + // write out index w/ navigation tree + await process(path.resolve(basePath, indexFilename), indexFilename, path.resolve(destPath, "index.mdx")); +})(); + +// GPP leaves the files littered with these; they alter parsing by flipping sections to HTML context +// remove them BEFORE parsing to avoid issues +function stripEmptyComments(file) +{ + file.contents = file.contents.toString().replace(//g, ''); +} + +// Transforms: +// - identify title +// - identify navTitle +// - identify description (if only page content is ) +// - Create frontmatter YAML from above +// + +function pglogicalTransformer() { + return (tree, file) => { + const filename = path.relative(basePath, file.path); + const metadata = fileToMetadata[filename]; + let title = ""; + let description = ""; + let stub = true; + for (let i=0; i, there shouldn't be any JSX in these - so look for it and remove it. + // Warn about these, except for comments + visit(tree, "jsx", (node, index, parent) => { + // todo: use HAST parser here - this is not reliable + + // strip (potentially NON-EMPTY) HTML comments - these are not valid in JSX + const newValue = node.value.replace(/(?=/g, ''); + if (newValue !== node.value) + { + node.value = newValue; + if (newValue.trim()) + return; + } + + // ignore placeholder + if (node.value.match(/^ { + if (isAbsoluteUrl(node.url) || node.url[0] === '/') return; + node.url = node.url.replace(/\//g, '_').replace(/\.md(?=$|\?|#)/, ''); + }); + + // MDExtra anchors: + // - identify + // - remove + // - create explicit anchor preceding removal in container block + const anchorRE = /{#([^}]+)}/; + visitAncestors(tree, "text", (node, ancestors) => { + let anchor = node.value.match(anchorRE); + if (!anchor) return; + anchor = anchor[1]; + node.value = node.value.replace(anchorRE, ''); + + const blockTypes = ['root', 'paragraph', 'listItem', 'blockquote']; + for (let i=ancestors.length-1, parent=ancestors[ancestors.length-1], child=node; i>=0; --i, child=parent, parent=ancestors[i]) + { + if (!blockTypes.includes(parent.type)) continue; + anchor = {type: "jsx", value: `
`}; + parent.children.splice(parent.children.indexOf(child), 0, anchor); + break; + } + }); + + // images: strip Markdown Extra attribute block + visit(tree, "image", (node, index, parent) => { + const attrRE = /{[^}]+}/; + if (/{[^}]+?}/.test(parent.children[index+1]?.value)) + parent.children[index+1].value = parent.children[index+1].value.replace(attrRE, ''); + }); + + if (!metadata.title) + metadata.title = title; + if (metadata.description && stub && description) + metadata.description = description; + if (metadata.title.trim() === metadata.navTitle.trim()) + delete metadata.navTitle; + metadata.originalFilePath = filename; + tree.children.unshift({type: "yaml", value: yaml.dump(metadata)}); + }; +} diff --git a/src/components/index-sub-nav.js b/src/components/index-sub-nav.js index 5571623f530..6ebe4b469e7 100644 --- a/src/components/index-sub-nav.js +++ b/src/components/index-sub-nav.js @@ -14,7 +14,7 @@ const IndexSubNav = () => (
    EDB Home - Support + Knowledge Base Contact Us diff --git a/src/components/side-navigation.js b/src/components/side-navigation.js index 46ccfe476a5..c45d517771e 100644 --- a/src/components/side-navigation.js +++ b/src/components/side-navigation.js @@ -23,7 +23,7 @@ const SideNavigationFooter = () => (

      - Support + Knowledge Base Contact Us Have feedback? diff --git a/src/pages/index.js b/src/pages/index.js index 3ae7efc525d..be236a6d05d 100644 --- a/src/pages/index.js +++ b/src/pages/index.js @@ -158,6 +158,7 @@ const Page = () => ( BDR (Bi-Directional Replication) Replication Server + pglogical Slony Cluster Management @@ -234,7 +235,6 @@ const Page = () => ( iconName={iconNames.HANDSHAKE} headingText="Third Party Integrations" > - Liquibase Pro