diff --git a/advocacy_docs/pg_extensions/extensionrefs.json b/advocacy_docs/pg_extensions/extensionrefs.json
index 7ab90dae3c7..da3e2633aa8 100644
--- a/advocacy_docs/pg_extensions/extensionrefs.json
+++ b/advocacy_docs/pg_extensions/extensionrefs.json
@@ -117,5 +117,6 @@
"mysql_fdw": "https://github.com/EnterpriseDB/mysql_fdw",
"bdr_5.x":"https://www.enterprisedb.com/docs/pgd/5/",
"pg_cron":"https://github.com/citusdata/pg_cron",
- "sql_profiler":"https://www.enterprisedb.com/docs/pem/latest/profiling_workloads/using_sql_profiler/"
+ "sql_profiler":"https://www.enterprisedb.com/docs/pem/latest/profiling_workloads/using_sql_profiler/",
+ "pg_squeeze":"https://www.enterprisedb.com/docs/pg_extensions/pg_squeeze/"
}
\ No newline at end of file
diff --git a/advocacy_docs/pg_extensions/index.mdx b/advocacy_docs/pg_extensions/index.mdx
index 5d0ee44dbcc..c79d50c45ab 100644
--- a/advocacy_docs/pg_extensions/index.mdx
+++ b/advocacy_docs/pg_extensions/index.mdx
@@ -10,6 +10,7 @@ navigation:
- query_advisor
- wait_states
- pg_failover_slots
+ - pg_squeeze
- "#EDB Postgres Advanced Server only"
- spl_check
- edb_job_scheduler
@@ -91,6 +92,7 @@ Categories of extensions:
pg_cron | | ✓ | ✓ | ✓ | – | – | ✓ | ✓ | ✓ |
pg_failover_slots | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
pg_permissions | | ✓ | – | – | – | – | – | – | – |
+pg_squeeze | | ✓ | ✓ | ✓ | ✓ | ✓ | – | – | – |
pg_store_plans | | ✓ | – | – | – | – | – | – | – |
pgaudit | Yes | ✓ | ✓ | – | ✓ | – | ✓ | – | – |
pglogical 2.x | | ✓ | ✓ | – | – | – | – | – | – |
diff --git a/advocacy_docs/pg_extensions/pg_squeeze/configuring.mdx b/advocacy_docs/pg_extensions/pg_squeeze/configuring.mdx
new file mode 100644
index 00000000000..f8c7a0ea37a
--- /dev/null
+++ b/advocacy_docs/pg_extensions/pg_squeeze/configuring.mdx
@@ -0,0 +1,33 @@
+---
+title: Configuring pg_squeeze
+navTitle: Configuring
+---
+
+1. Add `pg_squeeze` to the `shared_preload_libraries` parameter:
+
+ ```ini
+ shared_preload_libraries = 'pg_squeeze'
+ ```
+
+ !!! Note
+ If `shared_preload_libraries` has other extensions, then you can add `pg_squeeze` to the list. The order doesn't matter.
+ !!!
+
+1. Add these changes to `postgresql.conf`:
+
+ ```shell
+ wal_level = logical
+ max_replication_slots = 1 # ... or add 1 to the current value.
+ ```
+
+1. Restart Postgres.
+
+1. Create the `pg_squeeze` extension in your database:
+
+ ```shell
+ CREATE EXTENSION pg_squeeze;
+ ```
+
+!!! Note
+ When upgrading a database cluster with `pg_squeeze` installed (either using `pg_dumpall`/`pg_restore` or `pg_upgrade`), make sure that the new cluster has `pg_squeeze` in `shared_preload_libraries` before you upgrade. Otherwise the upgrade fails.
+
diff --git a/advocacy_docs/pg_extensions/pg_squeeze/index.mdx b/advocacy_docs/pg_extensions/pg_squeeze/index.mdx
new file mode 100644
index 00000000000..6a999127582
--- /dev/null
+++ b/advocacy_docs/pg_extensions/pg_squeeze/index.mdx
@@ -0,0 +1,27 @@
+---
+title: 'PG Squeeze'
+indexCards: none
+navigation:
+ - rel_notes
+ - installing
+ - configuring
+ - using
+directoryDefaults:
+ product: PG Squeeze
+---
+
+PG Squeeze (`pg_squeeze`) is an extension released as open source software under the PostgreSQL License.
+
+`pg_squeeze` removes unused space from a table and optionally sorts tuples according to a particular index (as if a `CLUSTER` command were executed concurrently with regular reads and writes). In fact, `pg_squeeze` tries to replace the `pg_repack` extension. While providing very similar functionality, `pg_squeeze` takes a different approach by:
+
+- Implementing functionality solely on the server side. This approach makes both configuration and use simpler than `pg_repack`, which uses both server- and client-side code. Server-side implementation also allows for smooth implementation of unattended processing using background workers.
+
+- Using recent improvements of the PostgreSQL database server. Most notably, besides the use of background workers, `pg_squeeze` uses logical decode instead of triggers to capture concurrent changes.
+
+For more information about `pg_squeeze`, see:
+
+- [Installing pg_squeeze](installing.mdx)
+- [Configuring pg_squeeze](configuring.mdx)
+- [Using pg_squeeze](using.mdx)
+
+
diff --git a/advocacy_docs/pg_extensions/pg_squeeze/installing.mdx b/advocacy_docs/pg_extensions/pg_squeeze/installing.mdx
new file mode 100644
index 00000000000..a7a57c4ed5d
--- /dev/null
+++ b/advocacy_docs/pg_extensions/pg_squeeze/installing.mdx
@@ -0,0 +1,71 @@
+---
+title: Installing pg_squeeze
+navTitle: Installing
+---
+
+`pg_squeeze` is supported on the same platforms as the Postgres distribution you're using. Support for `pg_squeeze` starts with Postgres 12. For details, see:
+
+- [EDB Postgres Advanced Server Product Compatibility](https://www.enterprisedb.com/platform-compatibility#epas)
+- [PostgreSQL Product Compatibility](https://www.enterprisedb.com/resources/platform-compatibility#pg)
+- [EDB Postgres Extended Server Product Compatibility](https://www.enterprisedb.com/resources/platform-compatibility#epas_extended)
+
+## Installation
+
+Before you begin the installation process:
+
+- Install Postgres. See:
+
+ - [Installing EDB Postgres Advanced Server](/epas/latest/installing/)
+
+ - [Installing PostgreSQL](https://www.postgresql.org/download/)
+
+ - [Installing EDB Postgres Extended Server](/pge/latest/installing/)
+
+- Set up the repository.
+
+ Setting up the repository is a one-time task. If you've already set up your repository, you don't need to perform this step.
+
+ To set up the repository, go to [EDB repositories](https://www.enterprisedb.com/repos-downloads) and follow the instructions provided there.
+
+## Install the package
+
+The syntax for the package install command is:
+
+```shell
+# For SLES, CentOS, RHEL and its derivatives
+sudo -y install edb--pg-squeeze1
+
+# For Debian and Ubuntu
+sudo -y install edb--pg-squeeze-1
+```
+
+Where:
+- ``is the package manager used with your operating system:
+
+ | Package manager | Operating system |
+ | --------------- | -------------------------------- |
+ | dnf | RHEL 8/9 and derivatives |
+ | yum | RHEL 7 and derivatives, CentOS 7 |
+ | zypper | SLES |
+ | apt-get | Debian and derivatives |
+
+- `` is the distribution of Postgres you're using:
+
+ | Postgres distribution | Value |
+ | ---------------------------- | ---------- |
+ | PostgreSQL | pg |
+ | EDB Postgres Advanced Server | as |
+ | EDB Postgres Extended Server | postgresextended |
+
+- `` is the version of Postgres you're using.
+
+For example, to install pg_squeeze for EDB Postgres Advanced Server 15 on a RHEL 8 platform:
+
+```shell
+sudo dnf -y install edb-as15-pg-squeeze1
+```
+
+
+
+
+
diff --git a/advocacy_docs/pg_extensions/pg_squeeze/rel_notes/index.mdx b/advocacy_docs/pg_extensions/pg_squeeze/rel_notes/index.mdx
new file mode 100644
index 00000000000..210f68de402
--- /dev/null
+++ b/advocacy_docs/pg_extensions/pg_squeeze/rel_notes/index.mdx
@@ -0,0 +1,10 @@
+---
+title: 'pg_squeeze release notes'
+navTitle: "Release notes"
+indexCards: none
+---
+The pg_squeeze documentation describes the latest version of pg_squeeze, including minor releases and patches. These release notes cover what was new in each release. For new functionality introduced in a minor or patch release, there are also indicators in the content about the release that introduced the feature.
+
+| Version | Release Date |
+| ----------------------------------- | ------------ |
+| [1.6.1](pg_squeeze_1.6.1_rel_notes) | 15 Feb 2024 |
diff --git a/advocacy_docs/pg_extensions/pg_squeeze/rel_notes/pg_squeeze_1.6.1_rel_notes.mdx b/advocacy_docs/pg_extensions/pg_squeeze/rel_notes/pg_squeeze_1.6.1_rel_notes.mdx
new file mode 100644
index 00000000000..0114bf3ee06
--- /dev/null
+++ b/advocacy_docs/pg_extensions/pg_squeeze/rel_notes/pg_squeeze_1.6.1_rel_notes.mdx
@@ -0,0 +1,10 @@
+---
+title: Release notes for pg_squeeze version 1.6.1
+navTitle: "Version 1.6.1"
+---
+
+This release of pg_squeeze includes:
+
+| Type | Description |
+| ------- | -------------------------------------------------------------------------- |
+| Feature | This is the initial release. |
diff --git a/advocacy_docs/pg_extensions/pg_squeeze/using.mdx b/advocacy_docs/pg_extensions/pg_squeeze/using.mdx
new file mode 100644
index 00000000000..a2f0ecf1369
--- /dev/null
+++ b/advocacy_docs/pg_extensions/pg_squeeze/using.mdx
@@ -0,0 +1,190 @@
+---
+title: Using pg_squeeze
+navTitle: Using
+---
+
+## Registering a table for regular processing
+
+First, ensure that your table has either a primary key or a unique constraint. This is necessary to process changes resulting from other transactions while `pg_squeeze` is doing its work.
+
+To make the `pg_squeeze` extension aware of the table, you must insert a record into the `squeeze.tables` table. After it is added, table statistics are checked periodically. When the table meets the necessary criteria to be squeezed, a task is added to a queue. The tasks are processed sequentially, in the order they are created.
+
+Here is an example of a simple registration:
+
+```sql
+INSERT INTO squeeze.tables (tabschema, tabname, schedule)
+VALUES ('public', 'foo', ('{30}', '{22}', NULL, NULL, '{3, 5}'));
+```
+
+Additional columns can be specified optionally, for example:
+
+```sql
+INSERT INTO squeeze.tables (
+ tabschema,
+ tabname,
+ schedule,
+ free_space_extra,
+ vacuum_max_age,
+ max_retry
+)
+VALUES (
+ 'public',
+ 'bar',
+ ('{30}', '{22}', NULL, NULL, '{3, 5}'),
+ 30,
+ '2 hours',
+ 2
+);
+```
+
+The following list describes the table metadata:
+
+- `tabschema` — Schema name.
+
+- `tabname` — Table name.
+
+- `schedule` — Specifies when the table should be checked and possibly squeezed. The schedule is described by a value of the following composite data type, which resembles a crontab entry:
+
+ ```shell
+ CREATE TYPE schedule AS (
+ minutes minute[],
+ hours hour[],
+ days_of_month dom[],
+ months month[],
+ days_of_week dow[]
+ );
+ ```
+
+ In this data type `minutes` (0-59) and `hours` (0-23) determine the time when the check occurs within a day, while `days_of_month` (1-31), `months` (1-12) and `days_of_week` (0-7, where both 0 and 7 stand for Sunday) determine the day of the check.
+
+ The check is performed if the `minutes`, `hours`, and `months` all match the current timestamp. `NULL` means any minute, hour, and month, respectively. Either `days_of_month` or `days_of_week` must match the current timestamp or both must be `NULL` for the check to take place.
+
+ For example, the entries in the sample registration specify to check the table `public.bar` every Wednesday and Friday at 22:30.
+
+- `free_space_extra` — Minimum percentage of extra free space needed to trigger processing of the table. The `extra` adjective refers to the fact that free space derived from `fillfactor` isn't a reason to squeeze the table.
+
+ For example, if `fillfactor` equals 60, then at least 40 percent of each page stays free during normal operation. If you want to ensure that 70 percent of free space makes `pg_squeeze` interested in the table, set `free_space_extra` to 30 (that is, 70 percent required to be free minus the 40 percent free due to the `fillfactor`).
+
+ The default value of `free_space_extra` is 50.
+
+- `min_size` — Minimum disk space in megabytes that the table must occupy to be eligible for processing. The default value is 8.
+
+- `vacuum_max_age` — Maximum time since the completion of the last `VACUUM` to consider the free space map (FSM) fresh. After this interval has elapsed, the portion of dead tuples might be significant. In this case, you must spend more effort to evaluate the potential effect of `pg_squeeze` beyond checking the FSM. The default value is 1 hour.
+
+- `max_retry` — Maximum number of extra attempts to squeeze a table if the first processing of the corresponding task fails. The typical reason to retry processing is that the table definition changed while the table was being squeezed. If the number of retries is achieved, processing of the table is considered complete. The next task is created at the next scheduled time.
+ The default value of `max_retry` is 0 (that is, don't retry).
+
+!!! note
+ The `squeeze.table` is the only table to modify. If you want to change anything else, make sure you understand what you're doing.
+
+## Ad hoc processing for any table
+
+It's possible to squeeze tables manually without registering (that is, without inserting the corresponding record into `squeeze.tables`) and without prior checking of the actual bloat.
+
+Function signature:
+
+```shell
+squeeze.squeeze_table(
+ tabschema name,
+ tabname name,
+ clustering_index name,
+ rel_tablespace name,
+ ind_tablespaces name[]
+)
+```
+
+The following list describes the table metadata for ad hoc processing:
+
+- `clustering_index` — Index of the processed table. After processing finishes, tuples of the table are physically sorted by the key of this index.
+
+- `rel_tablespace` — Existing tablespace into which to move the table. `NULL` means to leave the table where it is.
+
+- `ind_tablespaces` — Two-dimensional array in which each row specifies tablespace mapping of an index. The first and the second columns represent the index name and tablespace name, respectively. All indexes for which no mapping is specified remain in the original tablespace.
+ !!! note
+ If a tablespace is specified for the table but not for indexes, the table gets moved to that tablespace, but the indexes remain in the original tablespace. In other words, the tablespace of the table isn't the default for indexes.
+
+### Sample execution
+
+```shell
+SELECT squeeze.squeeze_table('public', 'pgbench_accounts');
+```
+
+## Enabling or disabling table processing
+
+To enable processing of bloated tables, run this statement as superuser:
+
+```shell
+SELECT squeeze.start_worker();
+```
+
+The function starts a background worker (`scheduler worker`) that periodically checks which of the registered tables to check for bloat and creates a task for each. Another worker (`squeeze worker`) is launched whenever a task exists for particular database.
+
+If the scheduler worker is already running for the current database, the function doesn't report any error, and the new worker exits immediately.
+
+If the workers are running for the current database, you can use the following statement to stop them:
+
+```shell
+SELECT squeeze.stop_worker();
+```
+
+!!! note
+ Only the functions mentioned in this documentation are considered part of the user interface. If you want to call any other function, make sure you understand what you're doing.
+
+If you want the background workers to start during startup of the whole PostgreSQL cluster, add entries like the following to the `postgresql.conf` file:
+
+```shell
+squeeze.worker_autostart = 'my_database your_database'
+squeeze.worker_role = postgres
+```
+
+The next time you start the cluster, two or more workers (that is, one scheduler worker and one or more squeeze workers) are launched for `my_database` and the same for `your_database`. If you take this approach, any worker doesn't start or stop without doing any work if either:
+
+- The `pg_squeeze` extension doesn't exist in the database.
+
+- The `squeeze.worker_role` parameter specifies a role that doesn't have superuser privileges.
+
+Although there are actually two workers, the functions and configuration variables described here use a singular form of the word `worker`. This is because only one `worker` existed in the previous versions of `pg_squeeze`, which ensured both scheduling and execution of the tasks. This implementation change, then, doesn't force all users to adjust their configuration files during upgrade.
+
+## Controlling impact on other backends
+
+Although the table being squeezed is available for both read and write operations by other transactions most of the time, an exclusive lock is needed to finalize processing. If `pg_squeeze` occasionally seems to block access to tables, consider setting the `squeeze.max_xlock_time` GUC parameter. For example:
+
+```shell
+SET squeeze.max_xlock_time TO 100;
+```
+
+This example specifies not to hold the exclusive lock for more than 0.1 second (100 milliseconds). If more time is needed for the final stage, `pg_squeeze` releases the exclusive lock, processes changes committed by other transactions in between, and tries the final stage again. An error is reported if the lock duration is exceeded a few more times. If that happens, either increase the setting or schedule processing of the problematic table for a different time when write activity is lower.
+
+## Running multiple workers per database
+
+If you think that a single squeeze worker doesn't cope with the load, consider setting the `squeeze.workers_per_database` configuration variable to a value higher than 1. Then the `pg_squeeze` extension can process multiple tables simultaneously: one table per squeeze worker.
+
+However, be aware that this setting affects all databases in which you actively use the `pg_squeeze` extension. The total number of all the squeeze workers in the cluster (including the scheduler workers) can't exceed the in-core configuration variable `max_worker_processes`.
+
+## Monitoring
+
+The `squeeze.log` table contains one entry per successfully squeezed table.
+
+The columns `tabschema` and `tabname` identify the processed table. The columns `started` and `finished` report when the processing started and finished. `ins_initial` is the number of tuples inserted into the new table storage during the initial load stage, that is, the number of tuples present in the table before the processing started. On the other hand, `ins`, `upd`, and `del` are the numbers of tuples inserted, updated, and deleted by applications during the table processing. (These concurrent data changes must also be incorporated into the squeezed table. Otherwise they'd get lost.)
+
+The `squeeze.errors` table contains errors that happen during squeezing. A common problem is that someone changes the definition of the table whose processing is in progress (that is, someone adds or removes a column).
+
+The `squeeze.get_active_workers()` function returns a table of squeeze workers that are processing tables in the current database.
+
+The `pid` column contains the system PID of the worker process. The other columns have the same meaning as their counterparts in the `squeeze.log` table. While the `squeeze.log` table shows information only on the completed squeeze operations, the `squeeze.get_active_workers()` function lets you check the progress during processing.
+
+## Unregistering a table
+
+If a particular table is no longer subject to periodical squeezes, delete the corresponding row from the `squeeze.tables` table.
+
+It's also a good practice to unregister a table that you're going to drop, although the background worker does unregister non-existing tables periodically.
+
+## Concurrency
+
+The `pg_squeeze` extension doesn't prevent other transactions from altering a table at certain stages of the processing. If a disruptive command (that is, `ALTER TABLE`, `VACUUM FULL`, `CLUSTER`, or `TRUNCATE`) manages to commit before the squeeze finishes, the `squeeze_table()` function aborts and all changes made to the table are rolled back. The `max_retry` column of the `squeeze.tables` table determines how many times the squeeze worker retries. Changing your schedule might help to avoid these kind of disruptions.
+
+Like `pg_repack`, `pg_squeeze` also changes visibility of rows and thus allows for the MVCC-unsafe behavior described in the first paragraph of [Caveats](https://www.postgresql.org/docs/current/mvcc-caveats.html) for MVCC.
+
+## Disk space requirements
+
+Performing a full-table squeeze requires twice as much free disk space as the target table and its indexes. For example, if the total size of the tables and indexes to be squeezed is 1GB, you need an additional 2GB of disk space.
diff --git a/advocacy_docs/pg_extensions/wait_states/index.mdx b/advocacy_docs/pg_extensions/wait_states/index.mdx
index 938774ec354..21e78717259 100644
--- a/advocacy_docs/pg_extensions/wait_states/index.mdx
+++ b/advocacy_docs/pg_extensions/wait_states/index.mdx
@@ -4,6 +4,7 @@ indexCards: none
directoryDefaults:
product: EDB Wait States
navigation:
+ - rel_notes
- installing
- using
---
diff --git a/advocacy_docs/pg_extensions/wait_states/rel_notes/index.mdx b/advocacy_docs/pg_extensions/wait_states/rel_notes/index.mdx
new file mode 100644
index 00000000000..de0be9fde69
--- /dev/null
+++ b/advocacy_docs/pg_extensions/wait_states/rel_notes/index.mdx
@@ -0,0 +1,11 @@
+---
+title: 'EDB Wait States release notes'
+navTitle: "Release notes"
+indexCards: none
+---
+
+The EDB Wait States documentation describes the latest version of EDB Wait States, including minor releases and patches. These release notes cover what was new in each release. For new functionality introduced in a minor or patch release, there are also indicators in the content about the release that introduced the feature.
+
+| Version | Release Date |
+| -------------------------------- | ------------ |
+| [1.2](wait_states_1.2_rel_notes) | 15 Feb 2024 |
diff --git a/advocacy_docs/pg_extensions/wait_states/rel_notes/wait_states_1.2_rel_notes.mdx b/advocacy_docs/pg_extensions/wait_states/rel_notes/wait_states_1.2_rel_notes.mdx
new file mode 100644
index 00000000000..b4966d0d159
--- /dev/null
+++ b/advocacy_docs/pg_extensions/wait_states/rel_notes/wait_states_1.2_rel_notes.mdx
@@ -0,0 +1,11 @@
+---
+title: Release notes for Wait States version 1.2
+navTitle: "Version 1.2"
+---
+
+This release of Wait States includes:
+
+| Type | Description |
+|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Enhancement | Added new parameters to functions `edb_wait_states_samples()` and `edn_wait_states_session()`. |
+| Enhancement | Added new functions: `edb_wait_states_system_info`, `edb_wait_states_wait_events`, `edb_wait_states_sql_statements`, `edb_wait_states_cluster_stats`, and `edb_wait_states_cluster`. |
\ No newline at end of file
diff --git a/advocacy_docs/pg_extensions/wait_states/using.mdx b/advocacy_docs/pg_extensions/wait_states/using.mdx
index ba71842db68..57b3a624383 100644
--- a/advocacy_docs/pg_extensions/wait_states/using.mdx
+++ b/advocacy_docs/pg_extensions/wait_states/using.mdx
@@ -16,12 +16,15 @@ This information is saved in a set of files in a user-configurable path and dire
Each of the functions in the EDB Wait States interface has common input and output parameters. Those parameters are:
-| Parameter(s) | Input or output | Description |
-| ------------ | --------------- | ----------- |
-| `start_ts` and `end_ts` | Input | Together these specify the time interval and the data to read. If you specify only `start_ts`, the data starting from `start_ts` is output. If you specify only `end_ts`, data up to `end_ts` is output. If you don't specify either, all the data is output. |
-| `query_id` | Output | Identifies a normalized query. It's internal hash code computed from the query. |
-| `session_id` | Output | Identifies a session. |
-| `ref_start_ts` and `ref_end_ts` | Output | Provides the timestamps of a file containing a particular data point. A data point might be a wait event sample record, a query record, or a session record. |
+| Parameter(s) | Input or output | Description |
+|---------------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `start_ts` and `end_ts` | Input | Together these specify the time interval and the data to read. If you specify only `start_ts`, the data starting from `start_ts` is output. If you specify only `end_ts`, data up to `end_ts` is output. If you don't specify either, all the data is output. |
+| `query_id` | Output | Identifies a normalized query. It's internal hash code computed from the query. |
+| `session_id` | Output | Identifies a session. |
+| `ref_start_ts` and `ref_end_ts` | Output | The timestamps of a file containing a particular data point. A data point might be a wait event sample record, a query record, or a session record. |
+| `wait_time` | Output | The amount of time in seconds spent waiting for some wait events. |
+| `cpu_time` | Output | The amount of time in seconds spent working on the CPU. For this given duration, the query wasn't waiting on any wait event. |
+| `db_time` | Output | The sum of the `wait_time` and the `cpu_time`. The `db_time`, `wait_time`, and the `cpu_time` don't provide an exact time. They provide an approximate time computed based on number of occurrences and the sampling interval. |
The following examples use a scenario where three queries are executed simultaneously on four different sessions connected to different databases using different users. Those three queries are:
@@ -34,7 +37,7 @@ SELECT tablename, schemaname FROM pg_tables, pg_sleep(10) WHERE schemaname
<> 'pg_catalog';
```
-### `edb_wait_states_data`
+## `edb_wait_states_data`
Use this function to read the data collected by the BGW:
@@ -77,41 +80,27 @@ You can use this function to find out the following:
FROM edb_wait_states_data();
```
-#### Parameters
+### Parameters
In addition to the common parameters described previously, each row of the output gives the following:
-`dbname`
+`dbname` — The session's database.
-The session's database.
+`username` — The session's logged-in user.
-`username`
+`query` — The query running in the session.
-The session's logged-in user.
+`query_start_time` — The time when the query started.
-`query`
+`sample_time` — The time when wait event data was collected.
-The query running in the session.
+`wait_event_type` — The type of wait event the session (backend) is waiting on.
-`query_start_time`
+`wait_event` — The wait event the session (backend) is waiting on.
-The time when the query started.
+### Example
-`sample_time`
-
-The time when wait event data was collected.
-
-`wait_event_type`
-
-The type of wait event the session (backend) is waiting on.
-
-`wait_event`
-
-The wait event the session (backend) is waiting on.
-
-#### Example
-
-The following is a sample output from the `edb_wait_states_data()` function:
+This example shows sample output from the `edb_wait_states_data()` function:
```sql
edb=# SELECT * FROM edb_wait_states_data();
@@ -148,7 +137,7 @@ wait_event | PgSleep
.
```
-### `edb_wait_states_queries`
+## `edb_wait_states_queries`
This function gives information about the queries sampled by the BGW. For example:
@@ -173,17 +162,15 @@ SELECT query FROM edb_wait_states_queries(start_ts, end_ts);
In other words, the function can output queries that didn't run in the given interval. To do that, use `edb_wait_states_data()`.
-#### Parameters
+### Parameters
In addition to the common parameters described previously, each row of the output gives the following:
-`query`
+`query` — Normalized query text.
-Normalized query text.
+### Example
-#### Example
-
-The following is a sample output from the `edb_wait_states_queries()` function:
+This example shows sample output from the `edb_wait_states_queries()` function:
```sql
edb=# SELECT * FROM edb_wait_states_queries();
@@ -205,7 +192,7 @@ ref_start_ts | 17-AUG-18 11:52:38.698793 -04:00
ref_end_ts | 18-AUG-18 11:52:38.698793 -04:00
```
-### `edb_wait_states_sessions`
+## `edb_wait_states_sessions`
This function gives information about the sessions sampled by the BGW:
@@ -216,8 +203,11 @@ edb_wait_states_sessions(
OUT session_id int4,
OUT text,
OUT text,
- OUT ref_start_ts timestamptz
- OUT ref_end_ts timestamptz
+ OUT ref_start_ts timestamptz,
+ OUT ref_end_ts timestamptz,
+ OUT application_name text,
+ OUT client_hostname text,
+ OUT session_start_ts timestamptz
)
```
@@ -230,52 +220,48 @@ SELECT dbname, username, session_id
Similar to `edb_wait_states_queries()`, this function outputs all the sessions logged in session files that contain sessions sampled in the given interval. It doesn't necessarily output only the sessions sampled in the given interval. To identify that, use `edb_wait_states_data()`.
-#### Parameters
+### Parameters
In addition to the common parameters described previously, each row of the output gives the following:
-`dbname`
+`dbname` — The database to which the session is connected.
+
+`username` — Login user of the session.
-The database to which the session is connected.
+`application_name` — Name of the application connection to the session.
-`username`
+`client_hostname` — Host name of the client machine.
-Login user of the session.
+`session_start_ts` — Actual timestamp when the session started.
-#### Example
+### Example
-The following is a sample output from the `edb_wait_states_sessions()` function:
+This example shows sample output from the `edb_wait_states_sessions()` function:
```sql
edb=# SELECT * FROM edb_wait_states_sessions();
__OUTPUT__
--[ RECORD 1 ]+---------------------------------
-session_id | 4340
-dbname | edb
-username | enterprisedb
-ref_start_ts | 17-AUG-18 11:52:38.698793 -04:00
-ref_end_ts | 18-AUG-18 11:52:38.698793 -04:00
--[ RECORD 2 ]+---------------------------------
-session_id | 4398
-dbname | edb
-username | enterprisedb
-ref_start_ts | 17-AUG-18 11:52:38.698793 -04:00
-ref_end_ts | 18-AUG-18 11:52:38.698793 -04:00
--[ RECORD 3 ]+---------------------------------
-session_id | 4410
-dbname | db1
-username | user1
-ref_start_ts | 17-AUG-18 11:52:38.698793 -04:00
-ref_end_ts | 18-AUG-18 11:52:38.698793 -04:00
--[ RECORD 4 ]+---------------------------------
-session_id | 4422
-dbname | db2
-username | user2
-ref_start_ts | 17-AUG-18 11:52:38.698793 -04:00
-ref_end_ts | 18-AUG-18 11:52:38.698793 -04:00
+-[ RECORD 1 ]---+---------------------------------
+session_id | 184365
+dbname | postgres
+username | dilip
+ref_start_ts | 01-FEB-24 15:59:56.283204 +05:30
+ref_end_ts | 02-FEB-24 15:59:56.283204 +05:30
+application_name | pgbench
+client_hostname |
+session_start_ts | 01-FEB-24 16:15:13.267287 +05:30
+-[ RECORD 2 ]---+---------------------------------
+session_id | 184344
+dbname | postgres
+username | dilip
+ref_start_ts | 01-FEB-24 15:59:56.283204 +05:30
+ref_end_ts | 02-FEB-24 15:59:56.283204 +05:30
+application_name | pgbench
+client_hostname |
+session_start_ts | 01-FEB-24 16:15:59.284207 +05:30
```
-### `edb_wait_states_samples`
+## `edb_wait_states_samples`
This function gives information about wait events sampled by the BGW:
@@ -288,73 +274,63 @@ edb_wait_states_samples(
OUT timestamptz,
OUT timestamptz,
OUT text,
- OUT text
+ OUT text,
+ OUT sampling_interval int4
)
```
Usually, you don't need to call this function directly.
-#### Parameters
+### Parameters
In addition to the common parameters described previously, each row of the output gives the following:
-`query_start_time`
-
-The time when the query started in this session.
-
-`sample_time`
-
-The time when wait event data was collected.
+`query_start_time` — The time when the query started in this session.
-`wait_event_type`
+`sample_time` — The time when wait event data was collected.
-The type of wait event on which the session is waiting.
+`wait_event_type` — The type of wait event on which the session is waiting.
-`wait_event`
+`wait_event` — The wait event on which the session (backend) is waiting.
-The wait event on which the session (backend) is waiting.
+`sampling_interval` — The time interval at which the sample is taken.
-#### Example
+### Example
-The following is a sample output from the `edb_wait_states_samples()` function:
+This example shows sample output from the `edb_wait_states_samples()` function:
```sql
edb=# SELECT * FROM edb_wait_states_samples();
__OUTPUT__
--[ RECORD 1 ]----+---------------------------------
-query_id | 4292540138852956818
-session_id | 4340
-query_start_time | 17-AUG-18 11:56:00.39421 -04:00
-sample_time | 17-AUG-18 11:56:00.699934 -04:00
-wait_event_type | Timeout
-wait_event | PgSleep
--[ RECORD 2 ]----+---------------------------------
-query_id | 4292540138852956818
-session_id | 4340
-query_start_time | 17-AUG-18 11:56:00.39421 -04:00
-sample_time | 17-AUG-18 11:56:01.699003 -04:00
-wait_event_type | Timeout
-wait_event | PgSleep
--[ RECORD 3 ]----+---------------------------------
-query_id | 4292540138852956818
-session_id | 4340
-query_start_time | 17-AUG-18 11:56:00.39421 -04:00
-sample_time | 17-AUG-18 11:56:02.70001 -04:00
-wait_event_type | Timeout
-wait_event | PgSleep
--[ RECORD 4 ]----+---------------------------------
-query_id | 4292540138852956818
-session_id | 4340
-query_start_time | 17-AUG-18 11:56:00.39421 -04:00
-sample_time | 17-AUG-18 11:56:03.700081 -04:00
-wait_event_type | Timeout
-wait_event | PgSleep
- .
+-[ RECORD 1 ]-----+---------------------------------
+query_id | -5489517304104177538
+session_id | 183864
+query_start_time | 01-FEB-24 16:04:40.292778 +05:30
+sample_time | 01-FEB-24 16:04:41.284071 +05:30
+wait_event_type | Timeout
+wait_event | PgSleep
+sampling_interval | 1
+-[ RECORD 2 ]-----+---------------------------------
+query_id | -5489517304104177538
+session_id | 183864
+query_start_time | 01-FEB-24 16:04:40.292778 +05:30
+sample_time | 01-FEB-24 16:04:42.284278 +05:30
+wait_event_type | Timeout
+wait_event | PgSleep
+sampling_interval | 1
+-[ RECORD 3 ]-----+---------------------------------
+query_id | -5489517304104177538
+session_id | 183864
+query_start_time | 01-FEB-24 16:04:40.292778 +05:30
+sample_time | 01-FEB-24 16:04:43.283385 +05:30
+wait_event_type | Timeout
+wait_event | PgSleep
+sampling_interval | 1 .
.
.
```
-### `edb_wait_states_purge`
+## `edb_wait_states_purge`
The function deletes all the sampled data files (queries, sessions, and wait-event samples) that were created after `start_ts` and aged (rotated) before `end_ts`:
@@ -369,7 +345,7 @@ Usually you don't need to run this function. The backend usually purges those ac
To find out how long the samples were retained, use `edb_wait_states_data()`.
-#### Example
+### Example
This code shows the `$PGDATA/edb_wait_states` directory before running `edb_wait_states_purge()`:
@@ -421,3 +397,369 @@ __OUTPUT__
on
(1 row)
```
+
+## `edb_wait_states_system_info`
+
+The function outputs the hostname, CPU information, and memory information for the server machine.
+
+```sql
+edb_wait_states_system_info(
+ OUT host_name text,
+ OUT cpu_info text,
+ OUT mem_info text
+)
+```
+
+### Parameters
+
+Each row of the output gives the following information:
+
+`host_name` — The hostname of the server machine.
+
+`cpu_info` — CPU information about the server machine.
+
+`mem_info` — Memory information about the server machine.
+
+### Example
+
+This example shows sample output from the `edb_wait_states_system_info()` function:
+
+```sql
+edb=# select * from edb_wait_states_system_info();
+-[ RECORD 1 ]-------------------------------------------------------------------
+host_name | dilip_kumar
+ |
+cpu_info | processor : 0
+ | BogoMIPS : 48.00
+ | Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
+ | CPU implementer : 0x61
+ | CPU architecture: 8
+ | CPU variant : 0x0
+ | CPU part : 0x000
+ | CPU revision : 0
+ |
+ | processor : 1
+ | BogoMIPS : 48.00
+ | Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics
+ | CPU implementer : 0x61
+ | CPU architecture: 8
+ | CPU variant : 0x0
+ | CPU part : 0x000
+ | CPU revision : 0
+ ......
+ |
+mem_info | MemTotal: 7786664 kB
+ | MemFree: 422544 kB
+ | MemAvailable: 2044836 kB
+ | Buffers: 256 kB
+ | Cached: 1884380 kB
+ | SwapCached: 146316 kB
+ | Active: 3180572 kB
+ | Inactive: 2843108 kB
+ | Active(anon): 2448468 kB
+ | Inactive(anon): 1960812 kB
+ | Active(file): 732104 kB
+ .......
+```
+
+## `edb_wait_states_wait_events`
+
+The function outputs aggregated wait event information.
+
+```sql
+edb_wait_states_wait_events(
+ IN start_ts timestamptz default '-infinity'::timestamptz,
+ IN end_ts timestamptz default 'infinity'::timestamptz,
+ OUT waitevent TEXT,
+ OUT wait_event_type text,
+ OUT waittime int8,
+ OUT pct_dbtime numeric
+)
+```
+
+### Parameters
+
+In addition to the common parameters described previously, each row of the output gives the following information:
+
+`waitevent` — The name of the wait event.
+
+`wait_event_type` — The type of wait event.
+
+`waittime` — The approximate wait time of this wait event (in seconds) based on the number of samples and the sampling interval from `edb_wait_states_samples`.
+
+`pct_dbtime` — The percentage of database time spent on this wait event.
+
+### Example
+
+This example shows sample output from the `edb_wait_states_wait_events()` function:
+
+```sql
+edb=# select * from edb_wait_states_wait_events();
+-[ RECORD 1 ]---+------------------------
+waitevent |
+wait_event_type |
+waittime | 124
+pct_dbtime | 36.5781710914454277
+-[ RECORD 2 ]---+------------------------
+waitevent | DataFileRead
+wait_event_type | IO
+waittime | 92
+pct_dbtime | 27.1386430678466077
+-[ RECORD 3 ]---+------------------------
+waitevent | WALWrite
+wait_event_type | IO
+waittime | 3
+pct_dbtime | 0.88495575221238938053
+-[ RECORD 4 ]---+------------------------
+waitevent | BufFileWrite
+wait_event_type | IO
+waittime | 3
+pct_dbtime | 0.88495575221238938053
+ .
+ .
+ .
+```
+
+## `edb_wait_states_sql_statements`
+
+The function outputs database time, CPU time, and wait information for each SQL statement.
+
+```sql
+edb_wait_states_sql_statements(
+ IN start_ts timestamptz default '-infinity'::timestamptz,
+ IN end_ts timestamptz default 'infinity'::timestamptz,
+ OUT query_id int8,
+ OUT dbtime numeric,
+ OUT waittime numeric,
+ OUT cputime numeric,
+ OUT top_waitevent text,
+ OUT query text
+)
+```
+
+### Parameters
+
+In addition to the common parameters described previously, each row of the output gives the following information:
+
+`query_id` — The query ID of the SQL statement.
+
+`dbtime` — The total database time consumed by this statement. (Database time is the total time in seconds used to execute this statement, including
+CPU time as well as wait time).
+
+`waittime` — The approximate wait time spent by this query.
+
+`cputime` — The approximate CPU time spent by this query.
+
+`top_waitevent` — The wait event on which this statement spent maximum time.
+
+`query` — The actual text of the query.
+
+### Example
+
+This example shows sample output from the `edb_wait_states_sql_statements()` function:
+
+```sql
+edb=# select * from edb_wait_states_sql_statements();
+-[ RECORD 1 ]-+---------------------------------------------------
+query_id | -1697985474390439145
+dbtime | 188
+waittime | 188
+cputime | 0
+top_waitevent | DataFileRead
+query | vacuum analyze pgbench_accounts
+-[ RECORD 2 ]-+---------------------------------------------------
+query_id | 2577670717561330585
+dbtime | 143
+waittime | 52
+cputime | 91
+top_waitevent | WALSync
+query | copy pgbench_accounts from stdin with (freeze on)
+-[ RECORD 3 ]-+---------------------------------------------------
+query_id | -7684589253409855891
+dbtime | 250
+waittime | 204
+cputime | 46
+top_waitevent | WALWrite
+query | alter table pgbench_accounts add primary key (aid)
+```
+
+## `edb_wait_states_cluster_stats`
+
+The function outputs database, WAL, and session count information for a given time period. Each row shows the consolidated information for
+that time period.
+
+```sql
+edb_wait_states_cluster_stats(
+ IN start_ts timestamptz default '-infinity'::timestamptz,
+ IN end_ts timestamptz default 'infinity'::timestamptz,
+ OUT num_sessions int4,
+ OUT num_databases int4,
+ OUT wal_records int8,
+ OUT wal_fpi int8,
+ OUT wal_bytes int8,
+ OUT xact_commit int8,
+ OUT xact_rollback int8,
+ OUT blocks_fetched int8,
+ OUT blocks_hit int8,
+ OUT tuples_returned int8,
+ OUT tuples_fetched int8,
+ OUT tuples_inserted int8,
+ OUT tuples_updated int8,
+ OUT tuples_deleted int8,
+ OUT temp_files int8,
+ OUT temp_bytes int8,
+ OUT sample_ts timestamptz,
+ OUT last_wal_stats_reset timestamptz,
+ OUT last_db_stats_reset timestamptz
+)
+```
+
+### Parameters
+
+In addition to the common parameters described previously, each row of the output gives the following information:
+
+`num_sessions` — Number of active sessions.
+
+`num_databases` — Total number of databases.
+
+`wal_records` — Total number of WAL records generated.
+
+`wal_fpi` — Total number of WAL full page images generated.
+
+`wal_bytes` — Total amount of WAL generated in bytes.
+
+`xact_commit` — Number of transactions that have been committed.
+
+`xact_rollback` — Number of transactions that have been rolled back.
+
+`blocks_fetched` — Number of disk blocks accessed.
+
+`blocks_hit` — Number of times disk blocks were found already in the buffer cache.
+
+`tuples_returned` — Number of live rows fetched by sequential scans and index entries returned by index scans.
+
+`tuples_fetched` — Number of live rows fetched by index scans.
+
+`tuples_inserted` — Number of rows inserted by queries.
+
+`tuples_updated` — Number of rows updated by queries.
+
+`tuples_deleted` — Number of rows deleted by queries.
+
+`temp_files` — Number of temporary files created by queries.
+
+`temp_bytes` — Total amount of data written to temporary files by queries.
+
+`sample_ts` — Timestamp when this sample was taken.
+
+`last_wal_stats_reset` — Time when WAL statistics were last reset.
+
+`last_db_stats_reset` — Time when database statistics were last reset.
+
+### Example
+
+This example shows sample output from the `edb_wait_states_cluster_stats()` function:
+
+```sql
+edb=# select * from edb_wait_states_cluster_stats();
+-[ RECORD 1 ]--------+---------------------------------
+num_sessions | 0
+num_databases | 4
+wal_records | 4358683
+wal_fpi | 8593
+wal_bytes | 419918702
+xact_commit | 557135
+xact_rollback | 143
+blocks_fetched | 30959799
+blocks_hit | 30955075
+tuples_returned | 3968546
+tuples_fetched | 2179331
+tuples_inserted | 793226
+tuples_updated | 1667862
+tuples_deleted | 2235
+temp_files | 8
+temp_bytes | 4063232
+sample_ts | 01-FEB-24 15:59:57.283591 +05:30
+last_wal_stats_reset | 01-FEB-24 15:49:12.976401 +05:30
+last_db_stats_reset | 01-JAN-00 05:30:00 +05:30
+-[ RECORD 2 ]--------+---------------------------------
+num_sessions | 0
+num_databases | 4
+wal_records | 4358683
+wal_fpi | 8593
+wal_bytes | 419918702
+xact_commit | 557135
+xact_rollback | 143
+blocks_fetched | 30959799
+blocks_hit | 30955075
+tuples_returned | 3968546
+tuples_fetched | 2179331
+tuples_inserted | 793226
+tuples_updated | 1667862
+tuples_deleted | 2235
+temp_files | 8
+temp_bytes | 4063232
+sample_ts | 01-FEB-24 15:59:58.285452 +05:30
+last_wal_stats_reset | 01-FEB-24 15:49:12.976401 +05:30
+last_db_stats_reset | 01-JAN-00 05:30:00 +05:30
+```
+
+## `edb_wait_states_cluster`
+
+The function outputs differential information for `edb_wait_states_cluster_stats` derived from two snapshots.
+
+```sql
+edb_wait_states_cluster(
+ IN start_ts timestamptz default '-infinity'::timestamptz,
+ IN end_ts timestamptz default 'infinity'::timestamptz,
+ OUT start_session int4,
+ OUT end_session int4,
+ OUT wal_records int8,
+ OUT wal_fpi int8,
+ OUT wal_bytes int8,
+ OUT xact_commit int8,
+ OUT xact_rollback int8,
+ OUT blocks_fetched int8,
+ OUT blocks_hit int8,
+ OUT tuples_returned int8,
+ OUT tuples_fetched int8,
+ OUT tuples_inserted int8,
+ OUT tuples_updated int8,
+ OUT tuples_deleted int8,
+ OUT temp_files int8,
+ OUT temp_bytes int8
+)
+```
+
+### Parameters
+
+The output for this function is the same as `edb_wait_states_cluster_stats()`. But instead of producing one record for each sample the function produces a single record with differential values between `start_ts` and `end_ts`.
+
+!!! Note
+ If the `pg_stat_wal` or `pg_stat_database` is reset or any database is dropped between the `start_ts` and `end_ts`, the output for the respective fields is -1. If the database is dropped and re-created, it might not be reliably detected and differential values might not be accurate.
+!!!
+
+### Example
+
+This example shows sample output from the `edb_wait_states_cluster()` function:
+
+```sql
+edb# select * from edb_wait_states_cluster();
+-[ RECORD 1 ]---+------------
+start_session | 0
+end_session | 1
+wal_records | 7723654
+wal_fpi | 481549
+wal_bytes | 14740067381
+xact_commit | 550880
+xact_rollback | 75
+blocks_fetched | 38997085
+blocks_hit | 37112621
+tuples_returned | 116057913
+tuples_fetched | 2203619
+tuples_inserted | 100546427
+tuples_updated | 1606127
+tuples_deleted | 125
+temp_files | 4
+temp_bytes | 2004926464
+```
diff --git a/build-sources.json b/build-sources.json
index f63e134e382..a2c27ddf443 100644
--- a/build-sources.json
+++ b/build-sources.json
@@ -11,6 +11,7 @@
"epas": true,
"pgd": true,
"pge": true,
+ "pwr": true,
"eprs": true,
"hadoop_data_adapter": true,
"jdbc_connector": true,
diff --git a/gatsby-config.js b/gatsby-config.js
index c62c61be3cc..5c9ff089fb5 100644
--- a/gatsby-config.js
+++ b/gatsby-config.js
@@ -94,6 +94,7 @@ const sourceToPluginConfig = {
},
pgpool: { name: "pgpool", path: "product_docs/docs/pgpool" },
postgis: { name: "postgis", path: "product_docs/docs/postgis" },
+ pwr: { name: "pwr", path: "product_docs/docs/pwr" },
repmgr: { name: "repmgr", path: "product_docs/docs/repmgr" },
slony: { name: "slony", path: "product_docs/docs/slony" },
tde: { name: "tde", path: "product_docs/docs/tde" },
diff --git a/product_docs/docs/biganimal/release/using_cluster/05_monitoring_and_logging/monitoring_using_pemx/index.mdx b/product_docs/docs/biganimal/release/using_cluster/05_monitoring_and_logging/monitoring_using_pemx/index.mdx
index 2c104fd454e..59ccf4597b9 100644
--- a/product_docs/docs/biganimal/release/using_cluster/05_monitoring_and_logging/monitoring_using_pemx/index.mdx
+++ b/product_docs/docs/biganimal/release/using_cluster/05_monitoring_and_logging/monitoring_using_pemx/index.mdx
@@ -52,7 +52,7 @@ By default, these charts displays the historical data of the last 15 minutes. To
## Features for both types of charts
-All these charts have tools and features that help you to get more information about the metrics or the chart. The [time-range picker](#time-range-picker) helps with viewing the data on these charts for a specific time-range interval. The [information tootip](#information-tooltip) helps you to view the information for a particular chart. The [charts error state](#charts-error-state) helps you to find the error and provides the option to edit the configurations and fix the error.
+All these charts have tools and features that help you to get more information about the metrics or the chart. The [time-range picker](#time-range-picker) helps with viewing the data on these charts for a specific time-range interval. The [information tooltip](#information-tooltip) helps you to view the information for a particular chart. The [charts error state](#charts-error-state) helps you to find the error and provides the option to edit the configurations and fix the error.
### Time-range picker
diff --git a/product_docs/docs/biganimal/release/using_cluster/extensions.mdx b/product_docs/docs/biganimal/release/using_cluster/extensions.mdx
index bb6bacf17f9..a6a16990e59 100644
--- a/product_docs/docs/biganimal/release/using_cluster/extensions.mdx
+++ b/product_docs/docs/biganimal/release/using_cluster/extensions.mdx
@@ -60,7 +60,7 @@ PostgreSQL contrib extensions/modules:
EDB extensions:
- edb_dbo
- sql-profiler
-- index_advisor
+- query_advisor
- refdata
- autocluster
- edb_pg_tuner
@@ -98,4 +98,4 @@ Use the [DROP EXTENSION](https://www.postgresql.org/docs/current/sql-dropextensi
Use the [pg_available_extensions](https://www.postgresql.org/docs/current/view-pg-available-extensions.html) view to see a list of all PostreSQL extensions.
-The [catalog_pg_extension](https://www.postgresql.org/docs/current/catalog-pg-extension.html) catalog stores information about the installed extensions.
\ No newline at end of file
+The [catalog_pg_extension](https://www.postgresql.org/docs/current/catalog-pg-extension.html) catalog stores information about the installed extensions.
diff --git a/product_docs/docs/epas/16/reference/oracle_compatibility_reference/04_partitioning_commands_compatible_with_oracle_databases/limitations.mdx b/product_docs/docs/epas/16/reference/oracle_compatibility_reference/04_partitioning_commands_compatible_with_oracle_databases/limitations.mdx
index 8d022dfd764..9c7eaf63abd 100644
--- a/product_docs/docs/epas/16/reference/oracle_compatibility_reference/04_partitioning_commands_compatible_with_oracle_databases/limitations.mdx
+++ b/product_docs/docs/epas/16/reference/oracle_compatibility_reference/04_partitioning_commands_compatible_with_oracle_databases/limitations.mdx
@@ -8,7 +8,7 @@ navTitle: Limitations
For Oracle compatibility, EDB Postgres Advanced Server allows you to use a `CREATE TABLE` statement to create a unique or primary key constraint on a partitioned table that includes a non-partitioned key column. This capability differs from PostgreSQL, and it has the following limitations:
-- The primary or unique key is only created on a child table. The key isn't created on a parent or root table.
+- The primary or unique key is created only on a child table. The key isn't created on a parent or root table.
- When using an `ALTER` statement to create a new partition, the primary or unique key isn't created automatically. You must create it manually.
@@ -18,17 +18,17 @@ For Oracle compatibility, EDB Postgres Advanced Server allows you to use a `CREA
Interval range partitioning is a useful capability, but it has the following limitations:
-- Interval partitioning is restricted to a single partition key. If you try to create or alter an existing partitioned table having a multi-column partitioned key, it will fail.
+- Interval partitioning is restricted to a single partition key. If you try to create or alter an existing partitioned table having a multi-column partitioned key, it fails.
- The supported key must be a numerical or date range type.
-- You can't define `DEFAULT` and `MAXVALUE` for an interval partitioned table.
+- You can't define `DEFAULT` and `MAXVALUE` for an interval-partitioned table.
-- Data to be inserted can't have `NULL`, `Not-a-Number`, or `Infinity` values specified in the partitioning key column.
+- Data to insert can't have `NULL`, `Not-a-Number`, or `Infinity` values specified in the partitioning key column.
-- The interval partitioning expression must yield a constant value and can't be a negative value.
+- The interval-partitioning expression must yield a constant value and can't be a negative value.
-- For the interval partitioned table, at least one partition should be defined first.
+- For the interval-partitioned table, define at least one partition first.
## Automatic partitioning limitations
@@ -37,8 +37,3 @@ Automatic list partitioning has the following limitations:
- Like interval partitioning, automatic partitioning is restricted to a single partition key.
- An automatic partitioned table can't have a `DEFAULT` partition.
-
-
-
-
-
diff --git a/product_docs/docs/net_connector/7.0.6.2/01_release_notes.mdx b/product_docs/docs/net_connector/7.0.6.2/01_release_notes.mdx
new file mode 100644
index 00000000000..9c1182fd1ed
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/01_release_notes.mdx
@@ -0,0 +1,14 @@
+---
+title: "Release notes"
+---
+
+Released: 15 Feb 2024
+
+The EDB .NET Connector provides connectivity between a .NET client application and an EDB Postgres Advanced Server database server.
+
+New features, enhancements, bug fixes, and other changes in the EDB .NET Connector `7.0.6.2` include:
+
+| Type | Description |
+|--------------|--------------------------------------------------------------------------------------------------------------------------------------------|
+| Enhancement | .NET packages are now available on [nuget.org](https://www.nuget.org/). |
+| Bug fix | Fixed an issue while any attempt to connect synchronously hung indefinitely, referencing the .Net Framework assembly using non-ASYNC code. |
\ No newline at end of file
diff --git a/product_docs/docs/net_connector/7.0.6.2/02_requirements_overview.mdx b/product_docs/docs/net_connector/7.0.6.2/02_requirements_overview.mdx
new file mode 100644
index 00000000000..fbd750391e3
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/02_requirements_overview.mdx
@@ -0,0 +1,20 @@
+---
+title: "Product compatibility"
+---
+
+
+
+These are the supported versions and platforms for the EDB .NET Connector.
+
+The EDB .NET Connector is certified with EDB Postgres Advanced Server version 11 and later.
+
+The EDB .NET Connector graphical installers are supported on the following Windows platforms:
+
+64-bit Windows:
+
+- Windows Server 2019 and 2022
+- Windows 10 and 11
+
+32-bit Windows:
+
+- Windows 10
diff --git a/product_docs/docs/net_connector/7.0.6.2/03_the_advanced_server_net_connector_overview.mdx b/product_docs/docs/net_connector/7.0.6.2/03_the_advanced_server_net_connector_overview.mdx
new file mode 100644
index 00000000000..660a94173a2
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/03_the_advanced_server_net_connector_overview.mdx
@@ -0,0 +1,40 @@
+---
+title: "EDB .NET Connector overview"
+
+---
+
+
+
+EDB .NET Connector is a .NET data provider that allows a client application to connect to a database stored on an EDB Postgres Advanced Server host. The .NET Connector accesses the data directly, allowing the client application optimal performance, a broad spectrum of functionality, and access to EDB Postgres Advanced Server features.
+
+The .NET Connector supports the following frameworks:
+
+- .NET 7.0
+- .NET 6.0
+- .NET Framework 4.7.2, 4.8, and 4.8.1
+- .NET Standard 2.0 and 2.1
+
+
+## The .NET class hierarchy
+
+The .NET class hierarchy contains classes that you can use to create objects that control a connection to the EDB Postgres Advanced Server database and manipulate the data stored on the server. The following are a few of the most commonly used object classes.
+
+`EDBDataSource`
+
+ `EDBDataSource` is the entry point for all the connections made to the database. It's responsible for issuing connections to the server and efficiently managing them. Starting with EDB .NET Connector 7.0.4.1, you no longer need direct instantiation of `EDBConnection`. Instantiate `EDBDataSource` and use the method provided to create commands or execute queries.
+
+`EDBConnection`
+
+ The `EDBConnection` class represents a connection to EDB Postgres Advanced Server. An `EDBConnection` object contains a `ConnectionString` that tells the .NET client how to connect to an EDB Postgres Advanced Server database. Obtain `EDBConnection` from an `EDBDataSource` instance, and use it directly only in specific scenarios, such as transactions.
+
+`EDBCommand`
+
+ An `EDBCommand` object contains an SQL command that the client executes against EDB Postgres Advanced Server. Before you can execute an `EDBCommand` object, you must link it to an `EDBConnection` object.
+
+`EDBDataReader`
+
+ An `EDBDataReader` object provides a way to read an EDB Postgres Advanced Server result set. You can use an `EDBDataReader` object to step through one row at a time, forward only.
+
+`EDBDataAdapter`
+
+ An `EDBDataAdapter` object links a result set to the EDB Postgres Advanced Server database. You can modify values and use the `EDBDataAdapter` class to update the data stored in an EDB Postgres Advanced Server database.
diff --git a/product_docs/docs/net_connector/7.0.6.2/04_installing_and_configuring_the_net_connector.mdx b/product_docs/docs/net_connector/7.0.6.2/04_installing_and_configuring_the_net_connector.mdx
new file mode 100644
index 00000000000..f7c68f4644a
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/04_installing_and_configuring_the_net_connector.mdx
@@ -0,0 +1,274 @@
+---
+title: "Installing and configuring the .NET Connector"
+
+---
+
+
+
+## Installing the .NET Connector
+
+You can use the EDB .NET Connector Installer (available [from the EDB website](https://www.enterprisedb.com/software-downloads-postgres)) to add the .NET Connector to your system.
+
+1. After downloading the installer, right-click the installer icon, and select **Run As Administrator** from the context menu. When prompted, select an installation language and select **OK** to continue to the Setup window.
+
+ ![The .NET Connector Installation wizard](images/dotnet_installation_wizard.png)
+
+1. Select **Next**.
+
+ ![The Installation dialog box](images/dotnet_installation_dialog.png)
+
+1. Use the Installation Directory dialog box to specify the directory in which to install the connector. Select **Next**.
+
+ ![The Ready to Install dialog box](images/ready_to_install.png)
+
+1. To start the installation, on the Ready to Install dialog box, select **Next**. Popups confirm the progress of the installation wizard.
+
+ ![The installation is complete](images/dotnet_installation_complete.png)
+
+1. When the wizard informs you that it has completed the setup, select **Finish**.
+
+You can also use StackBuilder Plus to add or update the connector on an existing Advanced Server installation.
+
+1. To open StackBuilder Plus, from the Windows **Apps** menu, select **StackBuilder Plus**.
+
+ ![Starting StackBuilder Plus](images/starting_stackbuilder_plus.png)
+
+1. When StackBuilder Plus opens, follow the onscreen instructions.
+
+1. From the Database Drivers node of the tree control, select the **EnterpriseDB.Net Connector** option.
+
+ ![Selecting the Connectors installer](images/selecting_the_connectors_installer.png)
+
+1. Follow the directions of the onscreen wizard to add or update an installation of an EDB Connector.
+
+## Configuring the .NET Connector
+
+For information about configuring the .NET Connector in each environment, see:
+
+- **Referencing the Library Files.** [General configuration information](#referencing_the_library_files) applicable to all components.
+- **.NET 7.0** Instructions for configuring for use with [.NET 7.0](#setup_7_0)
+- **.NET 6.0** Instructions for configuring for use with [.NET 6.0](#setup_6_0).
+- **.NET Framework 4.7.2** Instructions for configuring for use with [.NET framework 4.7.2](#net-framework-472).
+- **.NET Framework 4.8** Instructions for configuring for use with [.NET Framework 4.8](#net-framework-48).
+- **.NET Framework 4.8.1** Instructions for configuring for use with [.NET Framework 4.8.1](#net-framework-481).
+- **.NET Standard 2.0** Instructions for configuring for use with [.NET Standard 2.0](#standard_setup_2_0).
+- **.NET Standard 2.1** Instructions for configuring for use with [.NET Standard 2.1](#standard_setup_2_1).
+- **.NET EntityFramework Core** Instructions for configuring for use with [.NET EntityFramework Core](#entity_setup).
+
+### Referencing the library files
+
+
+
+To reference library files with Microsoft Visual Studio:
+
+1. Select the project in the Solution Explorer.
+2. Select **Project > Add Reference**.
+3. In the Add Reference` dialog box, browse to select the appropriate library files.
+
+Optionally, you can copy the library files to the specified location.
+
+Before you can use an EDB .NET class, you must import the namespace into your program. Importing a namespace makes the compiler aware of the classes available within the namespace. The namespace is `EnterpriseDB.EDBClient`.
+
+The method you use to include the namespace varies by the type of application you're writing. For example, the following command imports a namespace into an `ASP.NET` page:
+
+```text
+ <% import namespace="EnterpriseDB.EDBClient" %>
+```
+
+To import a namespace into a C# application, use:
+
+```text
+ using EnterpriseDB.EDBClient;
+```
+
+### .NET framework setup
+
+Each .NET version has specific setup instructions.
+
+
+
+#### .NET 7.0
+
+For .NET 7.0, the data provider installation path is `C:\Program Files\edb\dotnet\net7.0\`.
+
+You must add the following dependencies to your project:
+
+- `EnterpriseDB.EDBClient.dll`
+
+Depending on your application type, you might need to import the namespace into the source code. See [Referencing the library files](#referencing_the_library_files) for this and the other information about referencing the library files.
+
+
+
+#### .NET 6.0
+
+For .NET 6.0, the data provider installation path is:
+
+ `C:\Program Files\edb\dotnet\net6.0\`
+
+You must add the following dependencies to your project:
+
+- `EnterpriseDB.EDBClient.dll`
+
+Depending on your application type, you might need to import the namespace into the source code. See [Referencing the library files](#referencing_the_library_files) for this and the other information about referencing library files.
+
+
+
+#### .NET Framework 4.7.2
+
+For .NET Framework 4.7.2, the data provider installation path is:
+
+ `C:\Program Files\edb\dotnet\net472\`.
+
+You must add the following dependency to your project. You may also need to add other dependencies from the same directory:
+
+- `EnterpriseDB.EDBClient.dll`
+
+Depending on your application type, you might need to import the namespace into the source code. See Referencing the library files for this and the other information about referencing the library files.
+
+#### .NET Framework 4.8
+
+For .NET Framework 4.8, the data provider installation path is:
+
+ `C:\Program Files\edb\dotnet\net48\`.
+
+You must add the following dependency to your project. You may also need to add other dependencies from the same directory:
+
+- `EnterpriseDB.EDBClient.dll`
+
+Depending on your application type, you might need to import the namespace into the source code. See Referencing the library files for this and the other information about referencing the library files.
+
+#### .NET Framework 4.8.1
+
+For .NET Framework 4.8.1, the data provider installation path is:
+
+ `C:\Program Files\edb\dotnet\net481\`.
+
+You must add the following dependency to your project. You may also need to add other dependencies from the same directory:
+
+- `EnterpriseDB.EDBClient.dll`
+
+Depending on your application type, you might need to import the namespace into the source code. See Referencing the library files for this and the other information about referencing the library files.
+
+
+
+#### .NET Standard 2.0
+
+For .NET Standard Framework 2.0, the data provider installation path is:
+
+ `C:\Program Files\edb\dotnet\netstandard2.0\`.
+
+You must add the following dependencies to your project:
+
+- `EnterpriseDB.EDBClient.dll`
+
+- `System.Threading.Tasks.Extensions.dll`
+
+- `System.Runtime.CompilerServices.Unsafe.dll`
+
+- `System.ValueTuple.dll`
+
+Depending on your application type, you might need to import the namespace into the source code. See [Referencing the library files](#referencing_the_library_files) for this and the other information about referencing the library files.
+
+
+
+#### .NET Standard 2.1
+
+For .NET Standard Framework 2.1, the data provider installation path is `C:\Program Files\edb\dotnet\netstandard2.1\`.
+
+The following shared library files are required:
+
+- `EnterpriseDB.EDBClient.dll`
+
+- `System.Memory.dll`
+
+- `System.Runtime.CompilerServices.Unsafe.dll`
+
+- `System.Text.Json.dll`
+
+- `System.Threading.Tasks.Extensions.dll`
+
+- `System.ValueTuple.dll`
+
+Depending on your application type, you might need to import the namespace into the source code. See [Referencing the library files](#referencing_the_library_files) for this and the other information about referencing the library files.
+
+
+
+#### .NET Entity Framework Core
+
+To configure the .NET Connector for use with Entity Framework Core, the data provider installation path is either:
+
+- `C:\Program Files\edb\dotnet\EF.Core\EFCore.PG\net7.0`
+
+- `C:\Program Files\edb\dotnet\EF.Core\EFCore.PG\net6.0`
+
+The following shared library file is required:
+
+- `EnterpriseDB.EDBClient.EntityFrameworkCore.PostgreSQL.dll`
+
+!!! Note
+
+ You can use Entity Framework Core with the `EnterpriseDB.EDBClient.dll` library available in the `net7.0` or `net6.0` subdirectory.
+
+
+See [Referencing the library files](#referencing_the_library_files) for information about referencing the library files.
+
+The following NuGet packages are required:
+
+- `Microsoft.EntityFrameworkCore.Design`
+
+- `Microsoft.EntityFrameworkCore.Relational`
+
+- `Microsoft.EntityFrameworkCore.Abstractions`
+
+
+For usage information about Entity Framework Core, see the [Microsoft documentation](https://learn.microsoft.com/en-us/ef/core/).
+
+**Prerequisite**
+
+To open a command prompt:
+
+Select **Tools > Command Line > Developer Command Prompt**.
+
+Install dotnet-ef (using the command prompt),
+
+ `dotnet tool install --global dotnet-ef`
+
+**Sample project**
+
+Create a new Console Application based on .NET 7.0 or .NET 6.0..
+
+Add Reference to the following EDB assemblies:
+
+- `EnterpriseDB.EDBClient.EntityFrameworkCore.PostgreSQL.dll`
+
+- `EnterpriseDB.EDBClient.dll`
+
+Add the following NuGet packages:
+
+- `Microsoft.EntityFrameworkCore.Design`
+
+- `Microsoft.EntityFrameworkCore.Relational`
+
+- `Microsoft.EntityFrameworkCore.Abstractions`
+
+**Database-first scenario**
+
+Issue the following command to create model classes corresponding to all objects in the specified database:
+
+```text
+dotnet ef dbcontext scaffold Host=;Database=;Username=;Password=;Port= EnterpriseDB.EDBClient.EntityFrameworkCore.PostgreSQL -o Models
+```
+
+**Code-first scenario**
+
+Add code for defining a DbContext and create, read, update, and delete operations.
+
+For further details, see the Microsoft documentation.
+
+Issue the following commands to create the initial database and tables:
+
+```text
+ dotnet ef migrations add InitialCreate --context BloggingContext
+
+ dotnet ef database update --context BloggingContext
+```
diff --git a/product_docs/docs/net_connector/7.0.6.2/05_using_the_net_connector.mdx b/product_docs/docs/net_connector/7.0.6.2/05_using_the_net_connector.mdx
new file mode 100644
index 00000000000..dcdac30a376
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/05_using_the_net_connector.mdx
@@ -0,0 +1,24 @@
+---
+title: "Using the .NET Connector"
+
+---
+
+
+
+These examples show how you can use the EDB object classes that are provided by the EDB .NET Connector that allow a .NET application to connect to and interact with an EDB Postgres Advanced Server database.
+
+To use these examples, place the .NET library files in the same directory as the compiled form of your application. All of these examples are written in C#, and each is embedded in an ASP.NET page. The same logic and code applies to other .NET applications (WinForm or console applications, for example).
+
+Create and save the following `web.config` file in the same directory as the sample code. The examples make use of the `DB_CONN_STRING` key from this configuration file to return a connection string from the EDB Postgres Advanced Server host.
+
+```text
+
+
+
+
+
+
+```
+
+An EDB Postgres Advanced Server connection string for an ASP.NET web application is stored in the `web.config` file. If you're writing an application that doesn't use ASP.NET, provide the connection information in an application configuration file such as `app.config`.
diff --git a/product_docs/docs/net_connector/7.0.6.2/06_opening_a_database_connection.mdx b/product_docs/docs/net_connector/7.0.6.2/06_opening_a_database_connection.mdx
new file mode 100644
index 00000000000..8f5eb142b23
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/06_opening_a_database_connection.mdx
@@ -0,0 +1,278 @@
+---
+title: "Opening a database connection"
+
+---
+
+
+
+An `EDBConnection` object is responsible for handling the communication between an instance of EDB Postgres Advanced Server and a .NET application. Before you can access data stored in an EDB Postgres Advanced Server database, you must create and open an `EDBConnection` object.
+
+## Creating an EDBConnection object
+
+You can open a connection using one of the following approaches. In either case, you must import the namespace `EnterpriseDB.EDBClient`.
+
+### Connection with a data source
+
+1. Create an instance of the `EDBDataSource` object using a connection string as a parameter to the create method of the `EDBDataSource` class.
+
+2. Call the `OpenConnection` method of the `EDBDataSource` object to open a connection.
+
+This example shows how to open a connection using a data source:
+
+ ```text
+ await using var dataSource = EDBDataSource.Create(ConnectionString);
+ var connection = dataSource.OpenConnection();
+ ```
+
+### Connection without a data source
+
+1. Create an instance of the `EDBConnection` object using a connection string as a parameter to the constructor of the `EDBConnection` class.
+
+2. Call the `Open` method of the `EDBConnection` object to open the connection.
+
+This example shows how to open a connection without a data source:
+
+ ```text
+ EDBConnection conn = new EDBConnection(ConnectionString);
+ conn.Open();
+ ```
+
+!!! Note
+ For `EnterpriseDB.EDBClient 7.0.4` and later, we recommend `EDBDataSource` to connect to EDB Postgres Advanced Server database or execute SQL directly against it. For more information on data source, see the [Npgsql documentation](https://www.npgsql.org/doc/basic-usage.html).
+
+
+
+
+## Connection string parameters
+
+A valid connection string specifies location and authentication information for an EDB Postgres Advanced Server instance. You must provide the connection string before opening the connection. A connection string must contain:
+
+- The name or IP address of the server
+- The name of the EDB Postgres Advanced Server database
+- The name of an EDB Postgres Advanced Server user
+- The password associated with that user
+
+You can include the following parameters in the connection string:
+
+`CommandTimeout`
+
+ `CommandTimeout` specifies the length of time (in seconds) to wait for a command to finish executing before throwing an exception. The default value is `20`.
+
+`ConnectionLifeTime`
+
+ Use `ConnectionLifeTime` to specify the length of time (in seconds) to wait before closing unused connections in the pool. The default value is `15`.
+
+`Database`
+
+ Use the `Database` parameter to specify the name of the database for the application to connect to. The default is the name of the connecting user.
+
+`Encoding`
+
+ The `Encoding` parameter is obsolete. The parameter always returns the string `unicode` and silently ignores attempts to set it.
+
+`Integrated Security`
+
+ Specify a value of `true` to use Windows Integrated Security. By default, `Integrated Security` is set to `false`, and Windows Integrated Security is disabled.
+
+`Load Role Based Tables`
+
+ Use `Load Role Based Tables` to load table OIDs based on role. This change affects only the loading of table type OID and not the composite type. Setting this parameter to `true` triggers the new functionality. The default value is `false`.
+
+`MaxPoolSize`
+
+ `MaxPoolSize` instructs `EDBConnection` to dispose of pooled connections when the pool exceeds the specified number of connections. The default value is `20`.
+
+`MinPoolSize`
+
+ `MinPoolSize` instructs `EDBConnection` to preallocate the specified number of connections with the server. The default value is `1`.
+
+`Password`
+
+ When using clear text authentication, specify the password to use to establish a connection with the server.
+
+`Pooling`
+
+ Specify a value of `false` to disable connection pooling. By default, `Pooling` is set to `true` to enable connection pooling.
+
+`No Reset On Close`
+
+When `Pooling` is enabled and the connection is closed, reopened, and the underlying connection is reused, then some operations are executed to discard the previous connection resources. You can override this behavior by enabling `No Reset On Close`.
+
+`Port`
+
+ The `Port` parameter specifies the port for the application to connect to.
+
+`Protocol`
+
+ The specific protocol version to use (instead of automatic). Specify an integer value of `2` or `3`.
+
+`SearchPath`
+
+ Use the `SearchPath` parameter to change the search path to named and public schemas.
+
+`Server`
+
+ The name or IP address of the EDB Postgres Advanced Server host.
+
+`SSL`
+
+ Specify a value of `true` to attempt a secure connection. By default, `SSL` is set to `false`.
+
+`sslmode`
+
+ Use `sslmode` to specify an SSL connection control preference. `sslmode` can be:
+
+- `prefer` — Use SSL if possible.
+
+- `require` — Throw an exception if an SSL connection can't be established.
+
+- `allow` — Connect without SSL. This parameter isn't supported.
+
+- `disable` — Don't attempt an SSL connection. This is the default behavior.
+
+`SyncNotification`
+
+ Use the `SyncNotification` parameter to specify for `EDBDataprovider` to use synchronous notifications. The default value is `false`.
+
+`Timeout`
+
+ `Timeout` specifies the length of time (in seconds) to wait for an open connection. The default value is `15`.
+
+`User Id`
+
+ The `User Id` parameter specifies the user name to use for the connection.
+
+## Example: Opening a database connection using ASP.NET
+
+This example shows how to open a connection to an instance of EDB Postgres Advanced Server and then close the connection. The connection is established using the credentials specified in the `DB_CONN_STRING` configuration parameter. See [Using the .Net Connector](05_using_the_net_connector/#using_the_net_connector) for an introduction to connection information. Also see [Connection string parameters](#connection-string-parameters) for connection parameters.
+
+```cpp
+<% @ Page Language="C#" %>
+<% @Import Namespace="EnterpriseDB.EDBClient" %>
+<% @Import Namespace="System.Configuration" %>
+
+```
+
+If the connection is successful, a message appears indicating that the connection opened successfully.
+
+## Example: Opening a database connection from a console application
+
+This example opens a connection with an EDB Postgres Advanced Server database using a console-based application.
+
+Before writing the code for the console application, create an `app.config` file that stores the connection string to the database. Using a configuration file makes it convenient to update the connection string if the information changes.
+
+```ini
+
+
+
+
+
+
+```
+
+Enter the following code sample into a file:
+
+```csharp
+using System;
+using System.Data;
+using EnterpriseDB.EDBClient;
+using System.Configuration;
+namespace EnterpriseDB
+{
+ class EDB
+ {
+ static void Main(string[] args)
+ {
+ var strConnectionString = ConfigurationManager.AppSettings["DB_CONN_STRING"];
+ try
+ {
+ await using var dataSource = EDBDataSource.Create(strConnectionString);
+ var conn = dataSource.OpenConnection();
+ Console.WriteLine("Connection Opened Successfully");
+ conn.Close();
+ }
+ catch(Exception exp)
+ {
+ throw new Exception(exp.ToString());
+ }
+ }
+ }
+}
+```
+
+Save the file as `EDBConnection-Sample.cs` and compile it with the following command:
+
+```text
+csc /r:EnterpriseDB.EDBClient.dll /out:Console.exe EDBConnection-Sample.cs`
+```
+
+Compiling the sample generates a `Console.exe` file. You can execute the sample code by entering `Console.exe`. When executed, the console verifies that it opened successfully.
+
+## Example: Opening a database connection from a Windows form application
+
+This example opens a database connection using a .NET WinForm application. To use the example, save the following code as `WinForm-Example.cs` in a directory that contains the library files.
+
+```csharp
+using System;
+using System.Windows.Forms;
+using System.Drawing;
+using EnterpriseDB.EDBClient;
+namespace EDBTestClient
+{
+ class Win_Conn
+ {
+ static void Main(string[] args)
+ {
+ Form frmMain = new Form();
+ Button btnConn = new Button();
+ btnConn.Location = new System.Drawing.Point(104, 64);
+ btnConn.Name = "btnConn";
+ btnConn.Text = "Open Connection";
+ btnConn.Click += new System.EventHandler(btnConn_Click);
+ frmMain.Controls.Add(btnConn);
+ frmMain.Text = "EnterpriseDB";
+ Application.Run(frmMain);
+ }
+ private static void btnConn_Click(object sender, System.EventArgs e)
+ {
+ try
+ {
+ var strConnectionString = "Server=localhost;port=5444;username=edb;password=edb;database=edb";
+ await using var dataSource = EDBDataSource.Create(strConnectionString);
+ var conn = dataSource.OpenConnection();
+ MessageBox.Show("Connection Opened Successfully");
+ conn.Close();
+ }
+ catch(EDBException exp)
+ {
+ MessageBox.Show(exp.ToString());
+ }
+ }
+ }
+}
+```
+
+Change the database connection string to point to the database that you want to connect to. Then compile the file with the following command:
+
+```text
+csc /r:EnterpriseDB.EDBClient.dll /out:WinForm.exe WinForm-Example.cs
+```
+
+This command generates a `WinForm.exe` file in the same folder that the executable was compiled under. Invoking the executable displays a message that the connection was successful.
diff --git a/product_docs/docs/net_connector/7.0.6.2/07_retrieving_database_records.mdx b/product_docs/docs/net_connector/7.0.6.2/07_retrieving_database_records.mdx
new file mode 100644
index 00000000000..c8c7cc6fb3a
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/07_retrieving_database_records.mdx
@@ -0,0 +1,102 @@
+---
+title: "Retrieving database records"
+
+---
+
+
+
+You can use a `SELECT` statement to retrieve records from the database using a `SELECT` command. To execute a `SELECT` statement you must:
+
+- Create and open a database connection.
+- Create an `EDBCommand` object that represents the `SELECT` statement.
+- Execute the command with the `ExecuteReader()` method of the `EDBCommand` object returning `EDBDataReader`.
+- Loop through the `EDBDataReader`, displaying the results or binding the `EDBDataReader` to some control.
+
+An `EDBDataReader` object represents a forward-only and read-only stream of database records, presented one record at a time. To view a subsequent record in the stream, you must call the `Read()` method of the `EDBDataReader` object.
+
+The example that follows:
+
+1. Imports the EDB Postgres Advanced Server namespace `EnterpriseDB.EDBClient`.
+2. Initializes an `EDBCommand` object with a `SELECT` statement.
+3. Opens a connection to the database.
+4. Executes the `EDBCommand` by calling the `ExecuteReader` method of the `EDBCommand` object.
+
+The results of the SQL statement are retrieved into an `EDBDataReader` object.
+
+Loop through the contents of the `EDBDataReader` object to display the records returned by the query in a `WHILE` loop.
+
+The `Read()` method advances to the next record (if there is one) and returns `true` if a record exists. It returns `false` if `EDBDataReader` has reached the end of the result set.
+
+```cpp
+<% @ Page Language="C#" %>
+<% @Import Namespace="EnterpriseDB.EDBClient" %>
+<% @Import Namespace="System.Data" %>
+<% @Import Namespace="System.Configuration" %>
+
+
+```
+
+To exercise the sample code, save the code in your default web root directory in a file named `selectEmployees.aspx`. Then, to invoke the program, enter the following URL in a browser: `http://localhost/selectEmployees.aspx`.
+
+## Retrieving a single database record
+
+To retrieve a single result from a query, use the `ExecuteScalar()` method of the `EDBCommand` object. The `ExecuteScalar()` method returns the first value of the first column of the first row of the `DataSet` generated by the specified query.
+
+```cpp
+<% @ Page Language="C#" %>
+<% @Import Namespace="EnterpriseDB.EDBClient" %>
+<% @Import Namespace="System.Data" %>
+<% @Import Namespace="System.Configuration" %>
+
+```
+
+Save the sample code in a file named `selectscalar.aspx` in a web root directory.
+
+To invoke the sample code, enter the following in a browser: `http://localhost/selectScalar.aspx`
+
+The sample includes an explicit conversion of the value returned by the `ExecuteScalar()` method. The `ExecuteScalar()` method returns an object. To view the object, you must convert it to an integer value by using the `Convert.ToInt32` method.
diff --git a/product_docs/docs/net_connector/7.0.6.2/08_parameterized_queries.mdx b/product_docs/docs/net_connector/7.0.6.2/08_parameterized_queries.mdx
new file mode 100644
index 00000000000..c6e393c296f
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/08_parameterized_queries.mdx
@@ -0,0 +1,47 @@
+---
+title: "Parameterized queries"
+
+---
+
+
+
+A _parameterized query_ is a query with one or more parameter markers embedded in the SQL statement. Before executing a parameterized query, you must supply a value for each marker found in the text of the SQL statement.
+
+Parameterized queries are useful when you don't know the complete text of a query when you write your code. For example, the value referenced in a `WHERE` clause can be calculated from user input.
+
+As shown in the following example, you must declare the data type of each parameter specified in the parameterized query by creating an `EDBParameter` object and adding that object to the command's parameter collection. Then, you must specify a value for each parameter by calling the parameter's `Value()` function.
+
+The example shows using a parameterized query with an `UPDATE` statement that increases an employee salary:
+
+```cpp
+<% @ Page Language="C#" Debug="true"%>
+<% @Import Namespace="EnterpriseDB.EDBClient" %>
+<% @Import Namespace="System.Data" %>
+<% @Import Namespace="System.Configuration" %>
+
+```
+
+Save the sample code in a file named `updateSalary.aspx` in a web root directory.
+
+To invoke the sample code, enter the following in a browser: `http://localhost/updateSalary.aspx`
diff --git a/product_docs/docs/net_connector/7.0.6.2/09_inserting_records_in_a_database.mdx b/product_docs/docs/net_connector/7.0.6.2/09_inserting_records_in_a_database.mdx
new file mode 100644
index 00000000000..228224eb3b8
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/09_inserting_records_in_a_database.mdx
@@ -0,0 +1,47 @@
+---
+title: "Inserting records in a database"
+
+---
+
+
+
+You can use the `ExecuteNonQuery()` method of `EDBCommand` to add records to a database stored on an EDB Postgres Advanced Server host with an `INSERT` command.
+
+In the example that follows, the `INSERT` command is stored in the variable `cmd`. The values prefixed with a colon (`:`) are placeholders for `EDBParameters` that are instantiated, assigned values, and then added to the `INSERT` command's parameter collection in the statements that follow. The `INSERT` command is executed by the `ExecuteNonQuery()` method of the `cmdInsert` object.
+
+The example adds an employee to the `emp` table:
+
+```cpp
+<% @ Page Language="C#" Debug="true"%>
+<% @Import Namespace="EnterpriseDB.EDBClient" %>
+<% @Import Namespace="System.Data" %>
+<% @Import Namespace="System.Configuration" %>
+
+```
+
+Save the sample code in a file named `insertEmployee.aspx` in a web root directory.
+
+To invoke the sample code, enter the following in a browser: `http://localhost/insertEmployee.aspx`
diff --git a/product_docs/docs/net_connector/7.0.6.2/10_deleting_records_in_a_database.mdx b/product_docs/docs/net_connector/7.0.6.2/10_deleting_records_in_a_database.mdx
new file mode 100644
index 00000000000..8c797af0868
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/10_deleting_records_in_a_database.mdx
@@ -0,0 +1,44 @@
+---
+title: "Deleting records in a database"
+
+---
+
+
+
+You can use the `ExecuteNonQuery()` method of `EDBCommand` to delete records from a database stored on an EDB Postgres Advanced Server host with a `DELETE` statement.
+
+In the example that follows, the `DELETE` command is stored in the variable `strDeleteQuery`. The code passes the employee number specified by `EmpNo` to the `DELETE` command. The command is then executed using the `ExecuteNonQuery()` method.
+
+```cpp
+<% @ Page Language="C#" Debug="true"%>
+<% @Import Namespace="EnterpriseDB.EDBClient" %>
+<% @Import Namespace="System.Data" %>
+<% @Import Namespace="System.Configuration" %>
+
+```
+
+Save the sample code in a file named `deleteEmployee.aspx` in a web root directory.
+
+To invoke the sample code, enter the following in a browser: `http://localhost/deleteEmployee.aspx`
diff --git a/product_docs/docs/net_connector/7.0.6.2/11_using_spl_stored_procedures_in_your_net_application.mdx b/product_docs/docs/net_connector/7.0.6.2/11_using_spl_stored_procedures_in_your_net_application.mdx
new file mode 100644
index 00000000000..51a2c02f6f6
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/11_using_spl_stored_procedures_in_your_net_application.mdx
@@ -0,0 +1,340 @@
+---
+title: "Using SPL stored procedures in your .NET application"
+
+---
+
+
+
+You can include SQL statements in an application in two ways:
+
+- By adding the SQL statements directly in the .NET application code
+- By packaging the SQL statements in a stored procedure and executing the stored procedure from the .NET application
+
+In some cases, a stored procedure can provide advantages over embedded SQL statements. Stored procedures support complex conditional and looping constructs that are difficult to duplicate with SQL statements embedded directly in an application.
+
+You can also see an improvement in performance by using stored procedures. A stored procedure needs to be parsed, compiled, and optimized only once on the server side. A SQL statement that's included in an application might be parsed, compiled, and optimized each time it's executed from a .NET application.
+
+To use a stored procedure in your .NET application you must:
+
+1. Create an SPL stored procedure on the EDB Postgres Advanced Server host.
+2. Import the `EnterpriseDB.EDBClient` namespace.
+3. Pass the name of the stored procedure to the instance of the `EDBCommand`.
+4. Change the `EDBCommand.CommandType` to `CommandType.StoredProcedure`.
+5. `Prepare()` the command.
+6. Execute the command.
+
+## Example: Executing a stored procedure without parameters
+
+This sample procedure prints the name of department 10. The procedure takes no parameters and returns no parameters. To create the sample procedure, invoke EDB-PSQL and connect to the EDB Postgres Advanced Server host database. Enter the following SPL code at the command line:
+
+```sql
+CREATE OR REPLACE PROCEDURE list_dept10
+IS
+ v_deptname VARCHAR2(30);
+BEGIN
+ DBMS_OUTPUT.PUT_LINE('Dept No: 10');
+ SELECT dname INTO v_deptname FROM dept WHERE deptno = 10;
+ DBMS_OUTPUT.PUT_LINE('Dept Name: ' || v_deptname);
+END;
+```
+
+When EDB Postgres Advanced Server validates the stored procedure, it echoes `CREATE PROCEDURE`.
+
+### Using the EDBCommand object to execute a stored procedure
+
+The `CommandType` property of the `EDBCommand` object indicates the type of command being executed. The `CommandType` property is set to one of three possible `CommandType` enumeration values:
+
+- Use the default `Text` value when passing a SQL string for execution.
+- Use the `StoredProcedure` value, passing the name of a stored procedure for execution.
+- Use the `TableDirect` value when passing a table name. This value passes back all records in the specified table.
+
+The `CommandText` property must contain a SQL string, stored procedure name, or table name, depending on the value of the `CommandType` property.
+
+The following example executes the stored procedure:
+
+```cpp
+<% @ Page Language="C#" Debug="true"%>
+<% @Import Namespace="EnterpriseDB.EDBClient" %>
+<% @Import Namespace="System.Data" %>
+<% @Import Namespace="System.Configuration" %>
+
+```
+
+Save the sample code in a file named `storedProc.aspx` in a web root directory.
+
+To invoke the sample code, enter the following in a browser: `http://localhost/storedProc.aspx`
+
+## Example: Executing a stored procedure with IN parameters
+
+This example calls a stored procedure that includes `IN` parameters. To create the sample procedure, invoke EDB-PSQL and connect to the EDB Postgres Advanced Server host database. Enter the following SPL code at the command line:
+
+```sql
+CREATE OR REPLACE PROCEDURE
+ EMP_INSERT
+ (
+ pENAME IN VARCHAR,
+ pJOB IN VARCHAR,
+ pSAL IN FLOAT4,
+ pCOMM IN FLOAT4,
+ pDEPTNO IN INTEGER,
+ pMgr IN INTEGER
+ )
+AS
+DECLARE
+ CURSOR TESTCUR IS SELECT MAX(EMPNO) FROM EMP;
+ MAX_EMPNO INTEGER := 10;
+BEGIN
+
+ OPEN TESTCUR;
+ FETCH TESTCUR INTO MAX_EMPNO;
+ INSERT INTO EMP(EMPNO,ENAME,JOB,SAL,COMM,DEPTNO,MGR)
+ VALUES(MAX_EMPNO+1,pENAME,pJOB,pSAL,pCOMM,pDEPTNO,pMgr);
+ CLOSE testcur;
+END;
+
+```
+
+When EDB Postgres Advanced Server validates the stored procedure, it echoes `CREATE PROCEDURE`.
+
+### Passing input values to a stored procedure
+
+Calling a stored procedure that contains parameters is similar to executing a stored procedure without parameters. The major difference is that, when calling a parameterized stored procedure, you must use the `EDBParameter` collection of the `EDBCommand` object. When the `EDBParameter` is added to the `EDBCommand` collection, properties such as `ParameterName`, `DbType`, `Direction`, `Size`, and `Value` are set.
+
+This example shows the process of executing a parameterized stored procedure from a C# script:
+
+```cpp
+<% @ Page Language="C#" Debug="true"%>
+<% @Import Namespace="EnterpriseDB.EDBClient" %>
+<% @Import Namespace="System.Data" %>
+<% @Import Namespace="System.Configuration" %>
+
+```
+
+Save the sample code in a file named `storedProcInParam.aspx` in a web root directory.
+
+To invoke the sample code, enter the following in a browser: `http://localhost/storedProcInParam.aspx`
+
+In the example, the body of the `Page_Load` method declares and instantiates an `EDBConnection` object. The sample then creates an `EDBCommand` object with the properties needed to execute the stored procedure.
+
+The example then uses the `Add` method of the `EDBCommand Parameter` collection to add six input parameters.
+
+```cpp
+EDBCommand cmdStoredProc = new EDBCommand
+("emp_insert(:EmpName,:Job,:Salary,:Commission,:DeptNo,:Manager)",conn);
+cmdStoredProc.CommandType = CommandType.StoredProcedure;
+```
+
+It assigns a value to each parameter before passing them to the `EMP_INSERT` stored procedure.
+
+The `Prepare()` method prepares the statement before calling the `ExecuteNonQuery()` method.
+
+The `ExecuteNonQuery` method of the `EDBCommand` object executes the stored procedure. After the stored procedure executes, a test record is inserted into the `emp` table, and the values inserted are displayed on the web page.
+
+## Example: Executing a stored procedure with IN, OUT, and INOUT parameters
+
+The previous example showed how to pass `IN` parameters to a stored procedure. The following examples show how to pass `IN` values and return `OUT` values from a stored procedure.
+
+### Creating the stored procedure
+
+The following stored procedure passes the department number and returns the corresponding location and department name. To create the sample procedure, invoke EDB-PSQL and connect to the EDB Postgres Advanced Server host database. Enter the following SPL code at the command line:
+
+```sql
+CREATE OR REPLACE PROCEDURE
+ DEPT_SELECT
+ (
+ pDEPTNO IN INTEGER,
+ pDNAME OUT VARCHAR,
+ pLOC OUT VARCHAR
+ )
+AS
+DECLARE
+ CURSOR TESTCUR IS SELECT DNAME,LOC FROM DEPT;
+ REC RECORD;
+BEGIN
+
+ OPEN TESTCUR;
+ FETCH TESTCUR INTO REC;
+
+ pDNAME := REC.DNAME;
+ pLOC := REC.LOC;
+
+ CLOSE testcur;
+END;
+```
+
+When EDB Postgres Advanced Server validates the stored procedure, it echoes `CREATE PROCEDURE`.
+
+### Receiving output values from a stored procedure
+
+When retrieving values from `OUT` parameters, you must explicitly specify the direction of those parameters as `Output`. You can retrieve the values from `Output` parameters in two ways:
+
+- Call the `ExecuteReader` method of the `EDBCommand` and explicitly loop through the returned `EDBDataReader`, searching for the values of `OUT` parameters.
+- Call the `ExecuteNonQuery` method of `EDBCommand` and explicitly get the value of a declared `Output` parameter by calling that `EDBParameter` value property.
+
+In each method, you must declare each parameter, indicating the direction of the parameter (`ParameterDirection.Input`, `ParameterDirection.Output`, or `ParameterDirection.InputOutput`). Before invoking the procedure, you must provide a value for each `IN` and `INOUT` parameter. After the procedure returns, you can retrieve the `OUT` and `INOUT` parameter values from the `command.Parameters[]` array.
+
+This code shows using the `ExecuteReader` method to retrieve a result set:
+
+```cpp
+<% @ Page Language="C#" Debug="true"%>
+<% @Import Namespace="EnterpriseDB.EDBClient" %>
+<% @Import Namespace="System.Data" %>
+<% @Import Namespace="System.Configuration" %>
+
+```
+
+This code shows using the `ExecuteNonQuery` method to retrieve a result set:
+
+```cpp
+<% @ Page Language="C#" Debug="true"%>
+<% @Import Namespace="EnterpriseDB.EDBClient" %>
+<% @Import Namespace="System.Data" %>
+<% @Import Namespace="System.Configuration" %>
+
+```
diff --git a/product_docs/docs/net_connector/7.0.6.2/12_using_advanced_queueing.mdx b/product_docs/docs/net_connector/7.0.6.2/12_using_advanced_queueing.mdx
new file mode 100644
index 00000000000..1a926587a89
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/12_using_advanced_queueing.mdx
@@ -0,0 +1,492 @@
+---
+title: "Using advanced queueing"
+
+---
+
+
+
+EDB Postgres Advanced Server advanced queueing provides message queueing and message processing for the EDB Postgres Advanced Server database. User-defined messages are stored in a queue. A collection of queues is stored in a queue table. Create a queue table before creating a queue that depends on it.
+
+On the server side, procedures in the `DBMS_AQADM` package create and manage message queues and queue tables. Use the `DBMS_AQ` package to add messages to or remove messages from a queue or register or unregister a PL/SQL callback procedure. For more information about `DBMS_AQ` and `DBMS_AQADM`, see [DBMS_AQ](/epas/latest/reference/oracle_compatibility_reference/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/).
+
+On the client side, the application uses the EDB.NET driver to enqueue and dequeue messages.
+
+## Enqueueing or dequeueing a message
+
+For more information about using EDB Postgres Advanced Server's advanced queueing functionality, see [Built-in packages](/epas/latest/reference/oracle_compatibility_reference/epas_compat_bip_guide/).
+
+### Server-side setup
+
+To use advanced queueing functionality on your .NET application, you must first create a user-defined type, queue table, and queue, and then start the queue on the database server. Invoke EDB-PSQL and connect to the EDB Postgres Advanced Server host database. Use the following SPL commands at the command line:
+
+#### Creating a user-defined type
+
+To specify a RAW data type, create a user-defined type. This example shows creating a user-defined type named as `myxml`:
+
+```
+CREATE TYPE myxml AS (value XML);
+```
+
+#### Creating the queue table
+
+A queue table can hold multiple queues with the same payload type. This example shows creating a table named `MSG_QUEUE_TABLE`:
+
+```Text
+EXEC DBMS_AQADM.CREATE_QUEUE_TABLE
+ (queue_table => 'MSG_QUEUE_TABLE',
+ queue_payload_type => 'myxml',
+ comment => 'Message queue table');
+END;
+```
+
+#### Creating the queue
+
+This example shows creating a queue named `MSG_QUEUE` in the table `MSG_QUEUE_TABLE`:
+
+```Text
+BEGIN
+DBMS_AQADM.CREATE_QUEUE ( queue_name => 'MSG_QUEUE', queue_table => 'MSG_QUEUE_TABLE', comment => 'This queue contains pending messages.');
+END;
+```
+
+**Starting the queue**
+
+Once the queue is created, invoke the following SPL code at the command line to start a queue in the EDB database:
+
+```Text
+BEGIN
+DBMS_AQADM.START_QUEUE
+(queue_name => 'MSG_QUEUE');
+END;
+```
+
+### Client-side example
+
+Once you've created a user-defined type, followed by queue table and queue, start the queue. Then, you can enqueue or dequeue a message using EDB .Net drivers.
+
+#### Enqueue a message
+
+To enqueue a message on your .NET application, you must:
+
+1. Import the `EnterpriseDB.EDBClient` namespace.
+2. Pass the name of the queue and create the instance of the `EDBAQQueue`.
+3. Create the enqueue message and define a payload.
+4. Call the `queue.Enqueue` method.
+
+The following code shows using the `queue.Enqueue` method.
+
+!!! Note
+ This code creates the message and serializes it. This is example code and doesn't compile if copied as it is. You must serialize the message as XML.
+
+```Text
+using EnterpriseDB.EDBClient;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace AQXml
+{
+ class MyXML
+ {
+ public string value { get; set; }
+ }
+ class Program
+ {
+ static void Main(string[] args)
+ {
+ int messagesToSend = 1;
+ if (args.Length > 0 && !string.IsNullOrEmpty(args[0]))
+ {
+ messagesToSend = int.Parse(args[0]);
+ }
+ for (int i = 0; i < 5; i++)
+ {
+ EnqueMsg("test message: " + i);
+ }
+ }
+
+ private static EDBConnection GetConnection()
+ {
+ string connectionString = "Server=127.0.0.1;Host=127.0.0.1;Port=5444;User Id=enterprisedb;Password=test;Database=edb;Timeout=999";
+ EDBConnection connection = new EDBConnection(connectionString);
+ connection.Open();
+ return connection;
+ }
+
+
+ private static string ByteArrayToString(byte[] byteArray)
+ {
+ // Sanity check if it's null so we don't incur overhead of an exception
+ if (byteArray == null)
+ {
+ return string.Empty;
+ }
+ try
+ {
+ StringBuilder hex = new StringBuilder(byteArray.Length * 2);
+ foreach (byte b in byteArray)
+ {
+ hex.AppendFormat("{0:x2}", b);
+ }
+
+ return hex.ToString().ToUpper();
+ }
+ catch
+ {
+ return string.Empty;
+ }
+ }
+
+ private static bool EnqueMsg(string msg)
+ {
+ EDBConnection con = GetConnection();
+ using (EDBAQQueue queue = new EDBAQQueue("MSG_QUEUE", con))
+ {
+ queue.MessageType = EDBAQMessageType.Xml;
+ EDBTransaction txn = queue.Connection.BeginTransaction();
+ QueuedEntities.Message queuedMessage = new QueuedEntities.Message() { MessageText = msg };
+
+ try
+ {
+ string rootElementName = queuedMessage.GetType().Name;
+ if (rootElementName.IndexOf(".") != -1)
+ {
+ rootElementName = rootElementName.Split('.').Last();
+ }
+
+ string xml = new Utils.XmlFragmentSerializer().Serialize(queuedMessage);
+ EDBAQMessage queMsg = new EDBAQMessage();
+ queMsg.Payload = new MyXML { value = xml };
+ queue.MessageType = EDBAQMessageType.Udt;
+ queue.UdtTypeName = "myxml";
+ EDBConnection.GlobalTypeMapper.MapComposite("myxml");
+ con.ReloadTypes();
+ queue.Enqueue(queMsg);
+ var messageId = ByteArrayToString((byte[])queMsg.MessageId);
+ Console.WriteLine("MessageID: " + messageId);
+ txn.Commit();
+ queMsg = null;
+ xml = null;
+ rootElementName = null;
+ return true;
+ }
+ catch (Exception ex)
+ {
+ txn?.Rollback();
+ Console.WriteLine("Failed to enqueue message.");
+ Console.WriteLine(ex.ToString());
+ return false;
+ }
+ finally
+ {
+ queue?.Connection?.Dispose();
+ }
+ }
+ }
+
+ }
+}
+```
+
+#### Dequeueing a message
+
+To dequeue a message on your .NET application, you must:
+
+1. Import the `EnterpriseDB.EDBClient` namespace.
+2. Pass the name of the queue and create the instance of the `EDBAQQueue`.
+3. Call the `queue.Dequeue` method.
+
+!!! Note
+ The following code creates the message and serializes it. This is example code and doesn't compile if copied as it is. You must serialize the message as XML.
+
+```Text
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+using EnterpriseDB.EDBClient;
+
+namespace DequeueXML
+{
+ class MyXML
+ {
+ public string value { get; set; }
+ }
+ class Program
+ {
+ static void Main(string[] args)
+ {
+ DequeMsg();
+ }
+
+
+ private static EDBConnection GetConnection()
+ {
+ string connectionString = "Server=127.0.0.1;Host=127.0.0.1;Port=5444;User Id=enterprisedb;Password=test;Database=edb;Timeout=999";
+ EDBConnection connection = new EDBConnection(connectionString);
+ connection.Open();
+ return connection;
+ }
+
+
+ private static string ByteArrayToString(byte[] byteArray)
+ {
+ // Sanity check if it's null so we don't incur overhead of an exception
+ if (byteArray == null)
+ {
+ return string.Empty;
+ }
+ try
+ {
+ StringBuilder hex = new StringBuilder(byteArray.Length * 2);
+ foreach (byte b in byteArray)
+ {
+ hex.AppendFormat("{0:x2}", b);
+ }
+
+ return hex.ToString().ToUpper();
+ }
+ catch
+ {
+ return string.Empty;
+ }
+ }
+ public static void DequeMsg(int waitTime = 10)
+ {
+ EDBConnection con = GetConnection();
+ using (EDBAQQueue queueListen = new EDBAQQueue("MSG_QUEUE", con))
+ {
+ queueListen.UdtTypeName = "myxml";
+ queueListen.DequeueOptions.Navigation = EDBAQNavigationMode.FIRST_MESSAGE;
+ queueListen.DequeueOptions.Visibility = EDBAQVisibility.ON_COMMIT;
+ queueListen.DequeueOptions.Wait = 1;
+ EDBTransaction txn = null;
+
+ while (1 == 1)
+ {
+
+ if (queueListen.Connection.State == System.Data.ConnectionState.Closed)
+ {
+ queueListen.Connection.Open();
+ }
+
+ string messageId = "Unknown";
+ try
+ {
+ // the listen function is a blocking function. It will Wait the specified waitTime or until a
+ // message is received.
+ Console.WriteLine("Listening...");
+ string v = queueListen.Listen(null, waitTime);
+ // If we are waiting for a message and we specify a Wait time,
+ // then if there are no more messages, we want to just bounce out.
+ if (waitTime > -1 && v == null)
+ {
+ Console.WriteLine("No message received during Wait period.");
+ Console.WriteLine();
+ continue;
+ }
+
+ // once we're here that means a message has been detected in the queue. Let's deal with it.
+ txn = queueListen.Connection.BeginTransaction();
+
+ Console.WriteLine("Attempting to dequeue message...");
+ // dequeue the message
+ EDBAQMessage deqMsg;
+ try
+ {
+ deqMsg = queueListen.Dequeue();
+ }
+ catch (Exception ex)
+ {
+ if (ex.Message.Contains("ORA-25228"))
+ {
+ Console.WriteLine("Message was not there. Another process must have picked it up.");
+ Console.WriteLine();
+ txn.Rollback();
+ continue;
+ }
+ else
+ {
+ throw;
+ }
+ }
+
+ messageId = ByteArrayToString((byte[])deqMsg.MessageId);
+ if (deqMsg != null)
+ {
+ Console.WriteLine("Processing received message...");
+ // process the message payload
+ MyXML obj = (MyXML) deqMsg.Payload;
+
+ QueuedEntities.Message msg = new Utils.XmlFragmentSerializer().Deserialize(obj.value);
+
+ Console.WriteLine("Received Message:");
+ Console.WriteLine("MessageID: " + messageId);
+ Console.WriteLine("Message: " + msg.MessageText);
+ Console.WriteLine("Enqueue Time" + queueListen.MessageProperties.EnqueueTime);
+
+ txn.Commit();
+
+ Console.WriteLine("Finished processing message");
+ Console.WriteLine();
+
+ }
+ else
+ {
+ Console.WriteLine("Message was not dequeued.");
+ }
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine("Failed To dequeue or process the dequeued message.");
+ Console.WriteLine(ex.ToString());
+ Console.WriteLine();
+ if (txn != null)
+ {
+ txn.Rollback();
+ if (txn != null)
+ {
+ txn.Dispose();
+ }
+ }
+ }
+ }
+ }
+
+ }
+ }
+}
+```
+
+## EDBAQ classes
+
+The following EDBAQ classes are used in this application.
+
+### EDBAQDequeueMode
+
+The `EDBAQDequeueMode` class lists all the dequeuer modes available.
+
+| Value | Description |
+| ------------- | ------------------------------------------------------------- |
+| Browse | Reads the message without locking. |
+| Locked | Reads and gets a write lock on the message. |
+| Remove | Deletes the message after reading. This is the default value. |
+| Remove_NoData | Confirms receipt of the message. |
+
+### EDBAQDequeueOptions
+
+The `EDBAQDequeueOptions` class lists the options available when dequeuing a message.
+
+| Property | Description |
+| -------------- | ------------------------------------------------------------------------------------------------------------------------------ |
+| ConsumerName | The name of the consumer for which to dequeue the message. |
+| DequeueMode | Set from `EDBAQDequeueMode`. It represents the locking behavior linked with the dequeue option. |
+| Navigation | Set from `EDBAQNavigationMode`. It represents the position of the message to fetch. |
+| Visibility | Set from `EDBAQVisibility`. It represents whether the new message is dequeued as part of the current transaction. |
+| Wait | The wait time for a message as per the search criteria. |
+| Msgid | The message identifier. |
+| Correlation | The correlation identifier. |
+| DeqCondition | The dequeuer condition. It's a Boolean expression. |
+| Transformation | The transformation to apply before dequeuing the message. |
+| DeliveryMode | The delivery mode of the dequeued message. |
+
+### EDBAQEnqueueOptions
+
+The `EDBAQEnqueueOptions` class lists the options available when enqueuing a message.
+
+| Property | Description |
+| ----------------- | ------------------------------------------------------------------------------------------------------------------------------ |
+| Visibility | Set from `EDBAQVisibility`. It represents whether the new message is enqueued as part of the current transaction. |
+| RelativeMsgid | The relative message identifier. |
+| SequenceDeviation | The sequence when to dequeue the message. |
+| Transformation | The transformation to apply before enqueuing the message. |
+| DeliveryMode | The delivery mode of the enqueued message. |
+
+### EDBAQMessage
+
+The `EDBAQMessage` class lists a message to enqueue/dequeue.
+
+| Property | Description |
+| ------------ | -------------------------------- |
+| Payload | The actual message to queue. |
+| MessageId | The ID of the queued message. |
+
+### EDBAQMessageProperties
+
+The `EDBAQMessageProperties` lists the message properties available.
+
+| Property | Description |
+| ---------------- | --------------------------------------------------------------------------------------------- |
+| Priority | The priority of the message. |
+| Delay | The duration after which the message is available for dequeuing, in seconds. |
+| Expiration | The duration for which the message is available for dequeuing, in seconds. |
+| Correlation | The correlation identifier. |
+| Attempts | The number of attempts taken to dequeue the message. |
+| RecipientList | The recipients list that overthrows the default queue subscribers. |
+| ExceptionQueue | The name of the queue to move the unprocessed messages to. |
+| EnqueueTime | The time when the message was enqueued. |
+| State | The state of the message while dequeued. |
+| OriginalMsgid | The message identifier in the last queue. |
+| TransactionGroup | The transaction group for the dequeued messages. |
+| DeliveryMode | The delivery mode of the dequeued message. |
+
+### EDBAQMessageState
+
+The `EDBAQMessageState` class represents the state of the message during dequeue.
+
+| Value | Description |
+| --------- | --------------------------------------------------------- |
+| Expired | The message is moved to the exception queue. |
+| Processed | The message is processed and kept. |
+| Ready | The message is ready to be processed. |
+| Waiting | The message is in waiting state. The delay isn't reached. |
+
+### EDBAQMessageType
+
+The `EDBAQMessageType` class represents the types for payload.
+
+| Value | Description |
+| --------- | ------------------------------------------------------------------------------------- |
+| Raw | The raw message type.
Note: Currently, this payload type isn't supported. |
+| UDT | The user-defined type message. |
+| XML | The XML type message.
Note: Currently, this payload type isn't supported. |
+
+### EDBAQNavigationMode
+
+The `EDBAQNavigationMode` class represents the different types of navigation modes available.
+
+| Value | Description |
+| ---------------- | ------------------------------------------------------------------ |
+| First_Message | Returns the first available message that matches the search terms. |
+| Next_Message | Returns the next available message that matches the search items. |
+| Next_Transaction | Returns the first message of next transaction group. |
+
+### EDBAQQueue
+
+The `EDBAQQueue` class represents a SQL statement to execute `DMBS_AQ` functionality on a PostgreSQL database.
+
+| Property | Description |
+| ----------------- | --------------------------------------------------------------------------------------------- |
+| Connection | The connection to use. |
+| Name | The name of the queue. |
+| MessageType | The message type that's enqueued/dequeued from this queue, for example `EDBAQMessageType.Udt`. |
+| UdtTypeName | The user-defined type name of the message type. |
+| EnqueueOptions | The enqueue options to use. |
+| DequeuOptions | The dequeue options to use. |
+| MessageProperties | The message properties to use. |
+
+### EDBAQVisibility
+
+The `EDBAQVisibility` class represents the visibility options available.
+
+| Value | Description |
+| --------- | ----------------------------------------------------------- |
+| Immediate | The enqueue/dequeue isn't part of the ongoing transaction. |
+| On_Commit | The enqueue/dequeue is part of the current transaction. |
+
+!!! Note
+- To review the default options for these parameters, see [DBMS_AQ](../../epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/).
+ - EDB advanced queueing functionality uses user-defined types for calling enqueue/dequeue operations. `Server Compatibility Mode=NoTypeLoading` can't be used with advanced queueing because `NoTypeLoading` doesn't load any user-defined types.
diff --git a/product_docs/docs/net_connector/7.0.6.2/13_using_a_ref_cursor_in_a_net_application.mdx b/product_docs/docs/net_connector/7.0.6.2/13_using_a_ref_cursor_in_a_net_application.mdx
new file mode 100644
index 00000000000..3b77d3f4ba8
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/13_using_a_ref_cursor_in_a_net_application.mdx
@@ -0,0 +1,104 @@
+---
+title: "Using a ref cursor in a .NET application"
+
+---
+
+
+
+A `ref cursor` is a cursor variable that contains a pointer to a query result set. The result set is determined by executing the `OPEN FOR` statement using the cursor variable. A cursor variable isn't tied to a particular query like a static cursor. You can open the same cursor variable a number of times with the `OPEN FOR` statement containing different queries and each time. A new result set is created for that query and made available by way of the cursor variable. You can declare a cursor variable in two ways:
+
+- Use the `SYS_REFCURSOR` built-in data type to declare a weakly typed ref cursor.
+- Define a strongly typed ref cursor that declares a variable of that type.
+
+`SYS_REFCURSOR` is a ref cursor type that allows any result set to be associated with it. This is known as a weakly typed ref cursor. The following example is a declaration of a weakly typed ref cursor:
+
+```
+ name SYS_REFCURSOR`;
+```
+
+Following is an example of a strongly typed ref cursor:
+
+```
+TYPE IS REF CURSOR RETURN emp%ROWTYPE`;
+```
+
+## Creating the stored procedure
+
+This sample code creates a stored procedure called `refcur_inout_callee`. It specifies the data type of the ref cursor being passed as an OUT parameter. To create the sample procedure, invoke EDB-PSQL and connect to the EDB Postgres Advanced Server host database. Enter the following SPL code at the command line:
+
+```sql
+CREATE OR REPLACE PROCEDURE
+ refcur_inout_callee(v_refcur OUT SYS_REFCURSOR)
+IS
+BEGIN
+ OPEN v_refcur FOR SELECT ename FROM emp;
+END;
+```
+
+This C# code uses the stored procedure to retrieve employee names from the `emp` table:
+
+```cpp
+using System;
+using System.Data;
+using EnterpriseDB.EDBClient;
+using System.Configuration;
+namespace EDBRefCursor
+{
+ class EmpRefcursor
+ {
+ [STAThread]
+ static void Main(string[] args)
+ {
+ var strConnectionString =
+ ConfigurationManager.AppSettings["DB_CONN_STRING"];
+ try
+ {
+ await using var dataSource = EDBDataSource.Create(ConnectionString);
+ var conn = await dataSource.OpenConnectionAsync();
+ await using var tran = await connection.BeginTransactionAsync();
+ using var command = new EDBCommand("refcur_inout_callee", conn);
+ command.CommandType = CommandType.StoredProcedure;
+ command.Transaction = tran;
+ command.Parameters.Add(new EDBParameter("refCursor",
+ EDBTypes.EDBDbType.Refcursor, 10, "refCursor",
+ ParameterDirection.Output, false, 2, 2,
+ System.Data.DataRowVersion.Current, null));
+ await command.PrepareAsync();
+ command.Parameters[0].Value = null;
+ await command.ExecuteNonQueryAsync();
+ var cursorName = command.Parameters[0].Value.ToString();
+ command.CommandText = "fetch all in \"" + cursorName + "\"";
+ command.CommandType = CommandType.Text;
+ await using var reader =
+ await command.ExecuteReaderAsync(CommandBehavior.SequentialAccess);
+ var fc = reader.FieldCount;
+ while (await reader.ReadAsync())
+ {
+ for (int i = 0; i < fc; i++)
+ {
+ Console.WriteLine(reader.GetString(i));
+ }
+ }
+ await reader.CloseAsync();
+ await tran.CommitAsync();
+ await conn.CloseAsync();
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine(ex.Message.ToString());
+ }
+ }
+ }
+}
+```
+
+This .NET code snippet displays the result on the console:
+
+```cpp
+for(int i = 0;i < fc; i++)
+{
+ Console.WriteLine(reader.GetString(i));
+}
+```
+
+You must bind the `EDBDbType.RefCursor` type in `EDBParameter()` if you're using a ref cursor parameter.
diff --git a/product_docs/docs/net_connector/7.0.6.2/14_using_plugins.mdx b/product_docs/docs/net_connector/7.0.6.2/14_using_plugins.mdx
new file mode 100644
index 00000000000..9e8a3b1389e
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/14_using_plugins.mdx
@@ -0,0 +1,72 @@
+---
+title: "Using plugins"
+
+---
+
+
+
+EDB .Net driver plugins support the enhanced capabilities for different data types that are otherwise not available in .Net. The different plugins available support:
+
+- GeoJSON
+- Json.NET
+- NetTopologySuite
+- NodaTime
+
+The plugins support the use of spatial, data/time, and JSON types. The following are the supported frameworks and data provider installation path for these plugins.
+
+## GeoJSON
+
+If you're using the GeoJSON plugin on .NET Standard 2.0, the data provider installation paths are:
+
+- `C:\Program Files\edb\dotnet\plugins\GeoJSON\netstandard2.0`
+- `C:\Program Files\edb\dotnet\plugins\GeoJSON\net472`
+- `C:\Program Files\edb\dotnet\plugins\GeoJSON\net48`
+- `C:\Program Files\edb\dotnet\plugins\GeoJSON\net481`
+
+The following shared library files are required:
+
+- `EnterpriseDB.EDBClient.GeoJSON.dll`
+
+For detailed information about using the GeoJSON plugin, see the [Npgsql documentation](http://www.npgsql.org/doc/types/geojson.html).
+
+## Json.NET
+
+If you're using the Json.NET plugin on .NET Standard 2.0, the data provider installation paths are:
+- `C:\Program Files\edb\dotnet\plugins\Json.NET\netstandard2.0`
+- `C:\Program Files\edb\dotnet\plugins\Json.NET\net472`
+- `C:\Program Files\edb\dotnet\plugins\Json.NET\net48`
+- `C:\Program Files\edb\dotnet\plugins\Json.NET\net481`
+
+The following shared library files are required:
+
+- `EnterpriseDB.EDBClient.Json.NET.dll`
+
+For detailed information about using the Json.NET plugin, see the [Npgsql documentation](http://www.npgsql.org/doc/types/jsonnet.html).
+
+## NetTopologySuite
+
+If you're using the NetTopologySuite plugin on .Net Standard 2.0, the data provider installation paths are:
+- `C:\Program Files\edb\dotnet\plugins\NetTopologySuite\netstandard2.0`
+- `C:\Program Files\edb\dotnet\plugins\NetTopologySuite\net472`
+- `C:\Program Files\edb\dotnet\plugins\NetTopologySuite\net48`
+- `C:\Program Files\edb\dotnet\plugins\NetTopologySuite\net481`
+
+The following shared library files are required:
+
+- `EnterpriseDB.EDBClient.NetTopologySuite.dll`
+
+For detailed information about using the NetTopologySuite type plugin, see the [Npgsql documentation](http://www.npgsql.org/doc/types/nts.html).
+
+## NodaTime
+
+If you're using the NodaTime plugin on .Net Standard 2.0, the data provider installation paths are:
+- `C:\Program Files\edb\dotnet\plugins\NodaTime\netstandard2.0`
+- `C:\Program Files\edb\dotnet\plugins\NodaTime\net472`
+- `C:\Program Files\edb\dotnet\plugins\NodaTime\net48`
+- `C:\Program Files\edb\dotnet\plugins\NodaTime\net481`
+
+The following shared library files are required:
+
+- `EnterpriseDB.EDBClient.NodaTime.dll`
+
+For detailed information about using the NodaTime plugin, see the [Npgsql documentation](http://www.npgsql.org/doc/types/nodatime.html).
diff --git a/product_docs/docs/net_connector/7.0.6.2/15_using_object_types.mdx b/product_docs/docs/net_connector/7.0.6.2/15_using_object_types.mdx
new file mode 100644
index 00000000000..c57c5ba4713
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/15_using_object_types.mdx
@@ -0,0 +1,181 @@
+---
+title: "Using object types in .NET"
+
+---
+
+
+
+The SQL `CREATE TYPE` command creates a user-defined object type, which is stored in the EDB Postgres Advanced Server database. You can then reference these user-defined types in SPL procedures, SPL functions, and .NET programs.
+
+Create the basic object type with the `CREATE TYPE AS OBJECT` command. Optionally, use the `CREATE TYPE BODY` command.
+
+## Using an object type
+
+To use an object type, you must first create the object type in the EDB Postgres Advanced Server database. Object type `addr_object_type` defines the attributes of an address:
+
+```text
+CREATE OR REPLACE TYPE addr_object_type AS OBJECT
+(
+ street VARCHAR2(30),
+ city VARCHAR2(20),
+ state CHAR(2),
+ zip NUMBER(5)
+);
+```
+
+Object type `emp_obj_typ` defines the attributes of an employee. One of these attributes is object type `ADDR_OBJECT_TYPE`, as previously described. The object type body contains a method that displays the employee information:
+
+```text
+CREATE OR REPLACE TYPE emp_obj_typ AS OBJECT
+(
+ empno NUMBER(4),
+ ename VARCHAR2(20),
+ addr ADDR_OBJECT_TYPE,
+ MEMBER PROCEDURE display_emp(SELF IN OUT emp_obj_typ)
+);
+
+CREATE OR REPLACE TYPE BODY emp_obj_typ AS
+ MEMBER PROCEDURE display_emp (SELF IN OUT emp_obj_typ)
+ IS
+ BEGIN
+ DBMS_OUTPUT.PUT_LINE('Employee No : ' || SELF.empno);
+ DBMS_OUTPUT.PUT_LINE('Name : ' || SELF.ename);
+ DBMS_OUTPUT.PUT_LINE('Street : ' || SELF.addr.street);
+ DBMS_OUTPUT.PUT_LINE('City/State/Zip: ' || SELF.addr.city || ', ' ||
+ SELF.addr.state || ' ' || LPAD(SELF.addr.zip,5,'0'));
+ END;
+END;
+```
+
+This example is a complete .NET program that uses these user-defined object types:
+
+```text
+using EnterpriseDB.EDBClient;
+using System.Data.Common;
+namespace TypesTest
+{
+ internal class Program
+ {
+ static async Task Main(string[] args)
+ {
+ var connString = "Server=localhost;Port=5444;database=edb;User ID=enterprisedb;password=edb;";
+ var dataSourceBuilder = new EDBDataSourceBuilder(connString);
+ dataSourceBuilder.MapComposite("enterprisedb.addr_object_type");
+ dataSourceBuilder.MapComposite("enterprisedb.emp_obj_typ");
+ await using var dataSource = dataSourceBuilder.Build();
+ await using var conn = await dataSource.OpenConnectionAsync();
+ try
+ {
+ var address = new addr_object_type()
+ {
+ street = "123 MAIN STREET",
+ city = "EDISON",
+ state = "NJ",
+ zip = 8817
+ };
+ var emp = new emp_obj_typ()
+ {
+ empno = 9001,
+ ename = "JONES",
+ addr = address
+ };
+ await using (var cmd = new EDBCommand("emp_obj_typ.display_emp", conn))
+ {
+ cmd.CommandType = System.Data.CommandType.StoredProcedure;
+ EDBCommandBuilder.DeriveParameters(cmd);
+ cmd.Parameters[0].Value = emp;
+ cmd.Prepare();
+ cmd.ExecuteNonQuery();
+ var empOut = (emp_obj_typ?)cmd.Parameters[0].Value;
+ Console.WriteLine("Emp No: " + empOut.empno);
+ Console.WriteLine("Emp Name: " + empOut.ename);
+ Console.WriteLine("Emp Address Street: " + empOut.addr.street);
+ Console.WriteLine("Emp Address City: " + empOut.addr.city);
+ Console.WriteLine("Emp Address State: " + empOut.addr.state);
+ Console.WriteLine("Emp Address Zip: " + empOut.addr.zip);
+ Console.WriteLine("Emp No: " + empOut.empno);
+ }
+ }
+ catch (EDBException exp)
+ {
+ Console.WriteLine(exp.Message.ToString());
+ }
+ finally
+ {
+ conn.Close();
+ }
+ }
+ }
+public class addr_object_type
+ {
+ public string? street;
+ public string? city;
+ public string? state;
+ public decimal zip;
+ }
+ public class emp_obj_typ
+ {
+ public decimal empno;
+ public string? ename;
+ public addr_object_type? addr;
+ }
+}
+```
+
+The following .NET types are defined to map to the types in EDB Postgres Advanced Server:
+
+```text
+public class addr_object_type
+{
+public string? street;
+public string? city;
+public string? state;
+public decimal zip;
+}
+
+public class emp_obj_typ
+{
+ public decimal empno;
+ public string? ename;
+ public addr_object_type? addr;
+}
+```
+
+A call to `EDBDataSourceBuilder.MapComposite` maps the .NET type to the EDB Postgres Advanced Server types:
+
+```text
+dataSourceBuilder.MapComposite("enterprisedb.addr_object_type");
+dataSourceBuilder.MapComposite("enterprisedb.emp_obj_typ");
+```
+
+A call to `EDBCommandBuilder.DeriveParameters()` gets parameter information for a stored procedure. This allows you to just set the parameter values and call the stored procedure:
+
+```text
+EDBCommandBuilder.DeriveParameters(cmd);
+```
+
+Set the value of the parameter by creating an object of the .NET type and assigning it to the `Value` property of the parameter:
+
+```text
+addr_object_type address = new addr_object_type()
+{
+ street = "123 MAIN STREET",
+ city = "EDISON",
+ state = "NJ",
+ zip = 8817
+};
+
+emp_obj_typ emp = new emp_obj_typ()
+{
+ empno = 9001,
+ ename = "JONES",
+ addr = address
+};
+cmd.Parameters[0].Value = emp;
+```
+
+A call to `cmd.ExecuteNonQuery()` executes the call to the `display_emp()` method:
+
+```text
+cmd.ExecuteNonQuery();
+```
diff --git a/product_docs/docs/net_connector/7.0.6.2/16_scram_compatibility.mdx b/product_docs/docs/net_connector/7.0.6.2/16_scram_compatibility.mdx
new file mode 100644
index 00000000000..feff2da342b
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/16_scram_compatibility.mdx
@@ -0,0 +1,10 @@
+---
+title: "Scram compatibility"
+legacyRedirects:
+ - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/security_and_encryption.html"
+
+---
+
+
+
+The EDB .NET driver provides SCRAM-SHA-256 support for EDB Postgres Advanced Server version 10 and later. This support is available from EDB .NET 4.0.2.1 release and later.
diff --git a/product_docs/docs/net_connector/7.0.6.2/17_advanced_server_net_connector_logging.mdx b/product_docs/docs/net_connector/7.0.6.2/17_advanced_server_net_connector_logging.mdx
new file mode 100644
index 00000000000..03bd1ed820a
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/17_advanced_server_net_connector_logging.mdx
@@ -0,0 +1,82 @@
+---
+title: "EDB .NET Connector logging"
+
+---
+
+
+
+EDB .NET Connector supports the use of logging to help resolve issues with the .NET Connector when used in your application. EDB .NET Connector supports logging using the standard .NET `Microsoft.Extensions.Logging` package. For more information about logging in .Net, see [Logging in C# and .NET](https://learn.microsoft.com/en-us/dotnet/core/extensions/logging?tabs=command-line).
+
+!!! Note
+ For versions earlier than 7.x, EDB .NET Connector had its own, custom logging API.
+
+## Console logging provider
+
+.NET logging API works with a variety of built-in and third-party logging providers. The console logging provider logs output to the console.
+
+### Console logging with EDBDataSource
+
+Create a `Microsoft.Extensions.Logging.LoggerFactory` and configure an `EDBDataSource` with it. Any use of connections opened through this data source log using this logger factory.
+
+```text
+var loggerFactory = LoggerFactory.Create(builder => builder.AddSimpleConsole());
+
+var dataSourceBuilder = new EDBDataSourceBuilder(connectionString);
+dataSourceBuilder.UseLoggerFactory(loggerFactory);
+await using var dataSource = dataSourceBuilder.Build();
+
+await using var connection = await dataSource.OpenConnectionAsync();
+await using var command = new EDBCommand("SELECT 1", connection);
+_ = await command.ExecuteScalarAsync();
+```
+
+### Console logging without EDBDataSource
+
+Create a `Microsoft.Extensions.Logging.LoggerFactory` and configure EDB .NET Connector's logger factory globally using `EDBLoggingConfiguration.InitializeLogging`. Configure it at the start of your program, before using any other EDB .NET Connector API.
+
+```text
+var loggerFactory = LoggerFactory.Create(builder => builder.AddSimpleConsole());
+EDBLoggingConfiguration.InitializeLogging(loggerFactory);
+
+await using var conn = new EDBConnection(connectionString);
+await conn.OpenAsync();
+await using var command = new EDBCommand("SELECT 1", conn);
+_ = await command.ExecuteScalarAsync();
+```
+
+## Log levels
+
+The following log levels are available:
+
+- Trace
+- Debug
+- Information
+- Warning
+- Error
+- Fatal
+
+This example shows how to change the log level to `Trace`:
+
+```text
+var loggerFactory = LoggerFactory.Create(builder => builder
+.SetMinimumLevel(LogLevel.Trace
+.AddSimpleConsole()
+);
+```
+
+## Formatting the log output
+
+This example shows how to format your log output. Create a `LoggerFactory` to restrict each log message to a single line and add a date time to the log:
+
+```text
+var loggerFactory = LoggerFactory.Create(builder =>
+builder
+.SetMinimumLevel(LogLevel.Trace)
+.AddSimpleConsole(
+ options =>
+ {
+ options.SingleLine = true;
+ options.TimestampFormat = "yyyy/MM/dd HH:mm:ss ";
+ }
+ ));
+```
\ No newline at end of file
diff --git a/product_docs/docs/net_connector/7.0.6.2/18_api_reference.mdx b/product_docs/docs/net_connector/7.0.6.2/18_api_reference.mdx
new file mode 100644
index 00000000000..53050c37952
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/18_api_reference.mdx
@@ -0,0 +1,13 @@
+---
+title: "API reference"
+
+---
+
+
+
+For information about using the API, see the [Npgsql documentation](http://www.npgsql.org/doc/api/Npgsql.html).
+
+Usage notes:
+
+- When using the API, replace references to `Npgsql` with `EnterpriseDB.EDBClient`.
+- When referring to classes, replace `Npgsql` with `EDB`. For example, use the `EDBBinaryExporter` class instead of the `NpgsqlBinaryExporter` class.
diff --git a/product_docs/docs/net_connector/7.0.6.2/images/connection_opened_successfully.png b/product_docs/docs/net_connector/7.0.6.2/images/connection_opened_successfully.png
new file mode 100755
index 00000000000..bab12126d20
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/images/connection_opened_successfully.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:537e7edecce3882b794110e0ffc644a51cab69ba2c7e7f82a3e99d32c4b4ba65
+size 22683
diff --git a/product_docs/docs/net_connector/7.0.6.2/images/dialog.png b/product_docs/docs/net_connector/7.0.6.2/images/dialog.png
new file mode 100755
index 00000000000..19cba54d1f9
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/images/dialog.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f922e6cd4e43927708b5f460f5389a5b3a41dd70f3a5394723e6aee7d710f1ae
+size 9048
diff --git a/product_docs/docs/net_connector/7.0.6.2/images/dotnet_installation_complete.png b/product_docs/docs/net_connector/7.0.6.2/images/dotnet_installation_complete.png
new file mode 100755
index 00000000000..332ae795899
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/images/dotnet_installation_complete.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0983f216ce071f57f7e758980c3f966caded4312412905a6aba66efa362fdb13
+size 272137
diff --git a/product_docs/docs/net_connector/7.0.6.2/images/dotnet_installation_dialog.png b/product_docs/docs/net_connector/7.0.6.2/images/dotnet_installation_dialog.png
new file mode 100755
index 00000000000..b465d536032
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/images/dotnet_installation_dialog.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:994a725f061f1b51fd92ccc2df5abd9066a1cf4ea7600611ae57ebc6cc59af20
+size 51144
diff --git a/product_docs/docs/net_connector/7.0.6.2/images/dotnet_installation_wizard.png b/product_docs/docs/net_connector/7.0.6.2/images/dotnet_installation_wizard.png
new file mode 100755
index 00000000000..29c9a7f99f9
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/images/dotnet_installation_wizard.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efe8029105db01662005a349a7b59c87b6f7dce3017990a197e3b9b681392860
+size 227235
diff --git a/product_docs/docs/net_connector/7.0.6.2/images/ready_to_install.png b/product_docs/docs/net_connector/7.0.6.2/images/ready_to_install.png
new file mode 100755
index 00000000000..59e44d96bc5
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/images/ready_to_install.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91b18bef7b78a6dae7d6b664e2bccfbfdb4248dbd034cb59e2c6a35ada7da49c
+size 44080
diff --git a/product_docs/docs/net_connector/7.0.6.2/images/selecting_the_connectors_installer.png b/product_docs/docs/net_connector/7.0.6.2/images/selecting_the_connectors_installer.png
new file mode 100755
index 00000000000..432e4473c09
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/images/selecting_the_connectors_installer.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d939c7e6604025f82be47969d69e6acc63ab5a48a0af4341e42efe0156b42778
+size 97808
diff --git a/product_docs/docs/net_connector/7.0.6.2/images/starting_stackbuilder_plus.png b/product_docs/docs/net_connector/7.0.6.2/images/starting_stackbuilder_plus.png
new file mode 100755
index 00000000000..11665300652
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/images/starting_stackbuilder_plus.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ce6bcefb865ca14239fb7e0e2ac5149ed56251cfbc5153869070d039f70857c6
+size 91989
diff --git a/product_docs/docs/net_connector/7.0.6.2/index.mdx b/product_docs/docs/net_connector/7.0.6.2/index.mdx
new file mode 100644
index 00000000000..fdcdfafff77
--- /dev/null
+++ b/product_docs/docs/net_connector/7.0.6.2/index.mdx
@@ -0,0 +1,19 @@
+---
+title: "EDB .NET Connector"
+directoryDefaults:
+ description: "EDB .NET Connector version 7.0.6.1 documentation and release notes."
+---
+
+The EDB .NET Connector distributed with EDB Postgres Advanced Server provides connectivity between a .NET client application and an EDB Postgres Advanced Server database server. You can:
+
+- Connect to an instance of EDB Postgres Advanced Server.
+- Retrieve information from an EDB Postgres Advanced Server database.
+- Update information stored on an EDB Postgres Advanced Server database.
+
+To understand these examples, you need a solid working knowledge of C# and .NET. The EDB .NET Connector functionality is built on the core functionality of the Npgsql open source project. For details, see the [Npgsql User Guide](http://www.npgsql.org/doc/index.html).
+
+
+
+release_notes requirements_overview the_advanced_server_net_connector_overview installing_and_configuring_the_net_connector using_the_net_connector opening_a_database_connection retrieving_database_records parameterized_queries inserting_records_in_a_database deleting_records_in_a_database using_spl_stored_procedures_in_your_net_application using_advanced_queueing using_a_ref_cursor_in_a_net_application using_plugins using_object_types scram_compatibility advanced_server_net_connector_logging api_reference conclusion
+
+
diff --git a/product_docs/docs/ocl_connector/16/04_open_client_library/05_ocl_function_reference.mdx b/product_docs/docs/ocl_connector/16/04_open_client_library/05_ocl_function_reference.mdx
index 04ff3cd9d87..5879d6654e9 100644
--- a/product_docs/docs/ocl_connector/16/04_open_client_library/05_ocl_function_reference.mdx
+++ b/product_docs/docs/ocl_connector/16/04_open_client_library/05_ocl_function_reference.mdx
@@ -141,6 +141,7 @@ OCIStmtExecute(...);
!!! Note
Using `EDB_COMMIT_AFTER_CURSOR` commits any pending changes.
+!!!
`EDB_CURSOR_WITHOUT_XACT_BLK`
@@ -412,6 +413,7 @@ Oracle_XA+HostName=192.168.1.1+PortNumber=1533+SqlNet=XE+Acc=P/user/password+App
| SQLT_BDOUBLE | Binary double |
| SQLT_BIN | Binary data |
| SQLT_BFLOAT | Binary float |
+| SQLT_BOL | Boolean |
| SQLT_CHR | Character string |
| SQLT_DAT | Oracle date |
| SQLT_DATE | ANSI date |
diff --git a/product_docs/docs/ocl_connector/16/ocl_rel_notes/16.1.0.2_ocl_release_notes.mdx b/product_docs/docs/ocl_connector/16/ocl_rel_notes/16.1.0.2_ocl_release_notes.mdx
new file mode 100644
index 00000000000..cb94ab72191
--- /dev/null
+++ b/product_docs/docs/ocl_connector/16/ocl_rel_notes/16.1.0.2_ocl_release_notes.mdx
@@ -0,0 +1,14 @@
+---
+title: "EDB OCL Connector 16.1.0.2 release notes"
+navTitle: Version 16.1.0.2
+---
+
+Released: 15 Feb 2024
+
+The EDB OCL Connector provides an API similar to the Oracle Call Interface.
+
+New features, enhancements, bug fixes, and other changes in the EDB OCL Connector 16.1.0.2 include:
+
+| Type | Description |
+| ----------- | ----------------------------------------------------------- |
+| Enhancement | Added support for SQLT_BOL for SQL BOOLEAN. |
diff --git a/product_docs/docs/ocl_connector/16/ocl_rel_notes/index.mdx b/product_docs/docs/ocl_connector/16/ocl_rel_notes/index.mdx
index a3f95f0b42d..77ba9f8a562 100644
--- a/product_docs/docs/ocl_connector/16/ocl_rel_notes/index.mdx
+++ b/product_docs/docs/ocl_connector/16/ocl_rel_notes/index.mdx
@@ -2,6 +2,7 @@
title: "EDB OCL Connector release notes"
navTitle: Release Notes
navigation:
+ - 16.1.0.2_ocl_release_notes
- 16.1.0.1_ocl_release_notes
---
@@ -11,4 +12,5 @@ Release notes describe what's new in a release. When a minor or patch release in
| Version | Release date |
| -------------------------------------- | ------------ |
+| [16.1.0.2](16.1.0.2_ocl_release_notes) | 15 Feb 2024 |
| [16.1.0.1](16.1.0.1_ocl_release_notes) | 09 Nov 2023 |
diff --git a/product_docs/docs/pem/9/considerations/pem_security_best_practices/apache_httpd_security_configuration.mdx b/product_docs/docs/pem/9/considerations/pem_security_best_practices/apache_httpd_security_configuration.mdx
index c11bce3b380..17e72501cbf 100644
--- a/product_docs/docs/pem/9/considerations/pem_security_best_practices/apache_httpd_security_configuration.mdx
+++ b/product_docs/docs/pem/9/considerations/pem_security_best_practices/apache_httpd_security_configuration.mdx
@@ -8,7 +8,7 @@ redirects:
- /pem/latest/installing_pem_server/pem_security_best_practices/apache_httpd_security_configuration/
---
-On Windows, Apache HTTPD is named PEM HTTPD. The Apache HTTPD configuration file is `pme.conf` and the SSL configuration file is `httpd-ssl-pem.conf`. Both configuration files are in the `/conf/addons` directory.
+On Windows, Apache HTTPD is named PEM HTTPD. The Apache HTTPD configuration file is `pem.conf` and the SSL configuration file is `httpd-ssl-pem.conf`. Both configuration files are in the `/conf/addons` directory.
On Linux, the Apache HTTPD configuration file is `edb-pem.conf` and the SSL configuration file is `edb-ssl-pem.conf`. Both configurations files are in the `/conf.d` directory.
diff --git a/product_docs/docs/pem/9/index.mdx b/product_docs/docs/pem/9/index.mdx
index d36e2595da2..f2511d7b7a1 100644
--- a/product_docs/docs/pem/9/index.mdx
+++ b/product_docs/docs/pem/9/index.mdx
@@ -34,10 +34,12 @@ navigation:
- monitoring_BDR_nodes
- monitoring_failover_manager
- monitoring_xdb_replication_cluster
+ - monitoring_event_history
- tuning_performance
- profiling_workloads
- reports
- pem_cli
+ - pem_rest_api
- "#Developing"
- pem_query_tool
- pem_schema_diff_tool
diff --git a/product_docs/docs/pem/9/installing/configuring_the_pem_server_on_linux.mdx b/product_docs/docs/pem/9/installing/configuring_the_pem_server_on_linux.mdx
index 6feec547ee0..edb79f05c4b 100644
--- a/product_docs/docs/pem/9/installing/configuring_the_pem_server_on_linux.mdx
+++ b/product_docs/docs/pem/9/installing/configuring_the_pem_server_on_linux.mdx
@@ -22,24 +22,25 @@ The PEM server package includes a script (`configure-pem-server.sh`) to help aut
When invoking the script, you can include command line options to specify configuration properties. The script prompts you for values that you omit on the command line. The accepted options are:
-| Option | Description |
-| ----------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `-acp` or `--pemagent-certificate-path` | Defines PEM agent certificate path. The default is `/root/.pem`. |
-| `-ci` or `--cidr-address` | CIDR-formatted network address range that agents connect to the server from, to be added to the server's `pg_hba.conf` file, for example, `192.168.1.0/24`. The default is `0.0.0.0/0`. |
-| `-dbi` or `--db-install-path` | Directory for the database server installation, for example, `/usr/edb/as12` for EDB Postgres Advanced Server or `/usr/pgsql-12` for PostgreSQL. |
-| `-ds` or `--db-unitfile` | Unit file name of the PEM database server. For EDB Postgres Advanced Server, the default file name is `edb-as-12`. For PostgreSQL, it's `postgresql-12`. |
-| `-ho` or `--host` | Host address of the PEM database server. |
-| `-p` or `--port` | Port number of the PEM database server. |
-| `-ps` or `--pemagent-servicename` | Service name of the pemagent. The default value is `pemagent`. |
-| `-sp` or `--superpassword` | Superuser password of the PEM database server. This value is required. |
-| `-su` or `--superuser` | Superuser name of the PEM database server. |
-| `-au` or `--use-agent-user` | PEM agent user name. |
-| `-t` or `--type` | Installation type: Specify `1` if the configuration is for web services and backend database, `2` if you're configuring web services, or `3` if you're configuring the backend database. If you specify `3`, the database must reside on the local host. |
-| `-un` or `--uninstall-pem-server` | Uninstalls the PEM server. |
-| `-nhc` or `--no-hba-change` | Skips the changes done to `pg_hba.conf` and `pg_config` files. |
-| `-uac` or `--use-agent-sslcert` | Reuses the existing agent SSL certificate while configuring the PEM server. |
-| `-uak` or `--use-agent-sslkey` | Reuses the existing agent SSL key while configuring the PEM server. |
-| `-h` or `--help` | Lists all the available options while configuring the PEM server. |
+| Option | Description |
+|------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `-acp` or `--pemagent-certificate-path` | Defines PEM agent certificate path. The default is `/root/.pem`. |
+| `-ci` or `--cidr-address` | CIDR-formatted network address range that agents connect to the server from, to be added to the server's `pg_hba.conf` file, for example, `192.168.1.0/24`. The default is `0.0.0.0/0`. |
+| `-dbi` or `--db-install-path` | Directory for the database server installation, for example, `/usr/edb/as12` for EDB Postgres Advanced Server or `/usr/pgsql-12` for PostgreSQL. |
+| `-ds` or `--db-unitfile` | Unit file name of the PEM database server. For EDB Postgres Advanced Server, the default file name is `edb-as-12`. For PostgreSQL, it's `postgresql-12`. |
+| `-ho` or `--host` | Host address of the PEM database server. |
+| `-p` or `--port` | Port number of the PEM database server. |
+| `-ps` or `--pemagent-servicename` | Service name of the pemagent. The default value is `pemagent`. |
+| `-sp` or `--superpassword` | Superuser password of the PEM database server. This value is required. |
+| `-su` or `--superuser` | Superuser name of the PEM database server. |
+| `-au` or `--use-agent-user` | PEM agent user name. |
+| `-t` or `--type` | Installation type: Specify `1` if the configuration is for web services and backend database, `2` if you're configuring web services, or `3` if you're configuring the backend database. If you specify `3`, the database must reside on the local host. |
+| `-un` or `--uninstall-pem-server` | Uninstalls the PEM server. |
+| `-nhc` or `--no-hba-change` | Skips the changes done to `pg_hba.conf` and `pg_config` files. |
+| `-uac` or `--use-agent-sslcert` | Reuses the existing agent SSL certificate while configuring the PEM server. |
+| `-uak` or `--use-agent-sslkey` | Reuses the existing agent SSL key while configuring the PEM server. |
+| `-scs` or `--server-certificate-subject` | Provides the custom web server certificate subject. The format is `/C=CountryCode/ST=StateName/L=CityName/O=CompanySelectionName/CN=CommonNameorHostname/emailAddress=user@company.com`. Provide the `C=CountryCode` as `Alpha-2` code. |
+| `-h` or `--help` | Lists all the available options while configuring the PEM server. |
If you don't provide configuration properties on the command line, the script prompts you for values. When you invoke the script, choose from:
@@ -55,7 +56,7 @@ If you don't provide configuration properties on the command line, the script pr
After selecting a configuration option, the script prompts you for configuration properties. When the script finishes, it creates the objects required by the PEM server or performs the configuration steps required. To view help for the script, use the command:
```shell
-/usr/edb/pem/bin/configure-pem-server.sh -help
+/usr/edb/pem/bin/configure-pem-server.sh --help
```
After configuring the PEM server, you can access the PEM web interface in your browser. Navigate to:
diff --git a/product_docs/docs/pem/9/managing_pem_server.mdx b/product_docs/docs/pem/9/managing_pem_server.mdx
index 0dc2581664e..175d9a9d273 100644
--- a/product_docs/docs/pem/9/managing_pem_server.mdx
+++ b/product_docs/docs/pem/9/managing_pem_server.mdx
@@ -245,139 +245,140 @@ To modify a parameter value, edit the content displayed in the **Value** field t
You can use global configuration options to modify aspects of the PEM Server's behavior. The list of configuration parameters is subject to change.
-| Parameter name | Value and unit | Description |
-| ------------------------------------ | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| audit_log_retention_time | 30 days | Specifies the number of days for an audit log to be retained on the PEM server. |
-| auto_create_agent_alerts | true | Specifies whether to create default agent level alerts automatically when an agent is registered. |
-| auto_create_server_alerts | true | Specifies whether to create default server level alerts automatically when a server is bound to an agent. |
-| chart_disable_bullets | false | Enables/disables bullets on line charts on dashboards and Capacity Manager reports. |
-| cm_data_points_per_report | 50 | Specifies the number of data points to plot on charts on Capacity Manager reports. |
-| cm_max_end_date_in_years | 5 years | Specifies the maximum amount of time for the Capacity Manager to extrapolate data for. Ensures that threshold-based end dates of reports aren't extrapolated indefinitely. |
-| dash_alerts_timeout | 60 seconds | Specifies the number of seconds after which the components of the Alerts dashboard are refreshed. |
-| dash_db_comrol_span | 7 days | Specifies the number of days worth of data to plot on the Commit/Rollback Analysis chart on the Database Analysis and Server Analysis dashboards. |
-| dash_db_comrol_timeout | 1800 seconds | Specifies the number of seconds after which the Commits/Rollbacks line chart is refreshed on the Database Analysis and Server Analysis dashboards. |
-| dash_db_connovervw_timeout | 300 seconds | Specifies the number of seconds after which the Connection Overview pie chart is refreshed on the Database Analysis dashboard. |
-| dash_db_eventlag_span | 7 days | Specifies the number of days worth of data to plot on the Number of Events Lag chart for slony replication on the Database Analysis dashboard. |
-| dash_db_eventlag_timeout | 1800 seconds | Specifies the number of seconds after which the Number of Events Lag line chart for slony replication is refreshed on the Database Analysis dashboard. |
-| dash_db_hottable_rows | 25 rows | Specifies the number of rows to show on the HOT Table Analysis table on the Database Analysis dashboard. |
-| dash_db_hottable_timeout | 300 seconds | Specifies the number of seconds after which the Hot Tables table is refreshed on the Database Analysis dashboard. |
-| dash_db_io_span | 7 days | Specifies the number of days worth of data to plot on the Database I/O Analysis chart on the Database Analysis and I/O Analysis dashboards. |
-| dash_db_io_timeout | 1800 seconds | Specifies the number of seconds after which the Database I/O line chart is refreshed on the Database Analysis and I/O Analysis dashboards. |
-| dash_db_rowact_span | 7 days | Specifies the number of days worth of data to plot on the Row Activity Analysis chart on the Database Analysis, I/O Analysis, and Server Analysis dashboards. |
-| dash_db_rowact_timeout | 1800 seconds | Specifies the number of seconds after which the Row Activity line chart is refreshed on the Database Analysis, I/O Analysis, and Server Analysis dashboards. |
-| dash_db_storage_timeout | 300 seconds | Specifies the number of seconds after which the Storage bar chart is refreshed on the Database Analysis dashboard. |
-| dash_db_timelag_span | 7 days | Specifies the number of days worth of data to plot on the Time Lag chart for Slony replication on the Database Analysis dashboard. |
-| dash_db_timelag_timeout | 1800 seconds | Specifies the number of seconds after which the Time Lag line chart for Slony replication is refreshed on the Database Analysis dashboard. |
-| dash_db_useract_span | 7 days | Specifies the number of days worth of data to plot on the User Activity Analysis chart on the Database Analysis dashboard. |
-| dash_db_useract_timeout | 1800 seconds | Specifies the number of seconds after which the User Activity line chart is refreshed on the Database Analysis dashboard. |
-| dash_efm_timeout | 300 seconds | Specifies the number of seconds after which the Failover Manager Node Status and Failover Manager Cluster Info line chart is refreshed on the Streaming Replication dashboard. |
-| dash_global_overview_timeout | 30 seconds | Specifies the number of seconds after which the components of the Global Overview dashboard are refreshed. |
-| dash_header_timeout | 60 seconds | Specifies the number of seconds after which the information on the header of all the dashboards are refreshed. |
-| dash_io_chkpt_span | 7 days | Specifies the number of days worth of data to plot on the Checkpoints chart on the I/O Analysis dashboard. |
-| dash_io_chkpt_timeout | 1800 seconds | Specifies the number of seconds after which the Checkpoints line chart is refreshed on the I/O Analysis dashboard. |
-| dash_io_hotindx_timeout | 300 seconds | Specifies the number of seconds after which the Hot Indexes bar chart is refreshed on the I/O Analysis dashboard. |
-| dash_io_hottbl_timeout | 300 seconds | Specifies the number of seconds after which the Hot Tables bar chart is refreshed on the I/O Analysis dashboard. |
-| dash_io_index_objectio_rows | 25 rows | Specifies the number of rows displayed on the Index Activity table on the I/O Analysis and Object Activity Analysis dashboards. |
-| dash_io_index_objectio_timeout | 60 seconds | Specifies the number of seconds after which the Index Activity table is refreshed on the I/O Analysis and Object Activity Analysis dashboards. |
-| dash_io_objectio_rows | 25 rows | Specifies the number of rows displayed on the Object I/O Details table on the I/O Analysis and Object Activity Analysis dashboards. |
-| dash_io_objectio_timeout | 300 seconds | Specifies the number of seconds after which the Object I/O Details table is refreshed on the I/O Analysis and Object Activity Analysis Dashboards. |
-| dash_memory_hostmemact_span | 7 days | Specifies the number of days worth of data to plot on the Host Memory Activity Analysis chart on the Memory Analysis dashboard. |
-| dash_memory_hostmemact_timeout | 1800 seconds | Specifies the number of seconds after which the Host Memory Activity line chart is refreshed on the Memory Analysis dashboard. |
-| dash_memory_hostmemconf_timeout | 300 seconds | Specifies the number of seconds after which the Host Memory Configuration pie chart is refreshed on the Memory Analysis and Server Analysis dashboards. |
-| dash_memory_servmemact_span | 7 days | Specifies the number of days worth of data to plot on the server Memory Activity Analysis chart on the Memory Analysis dashboard. |
-| dash_memory_servmemact_timeout | 1800 seconds | Specifies the number of seconds after which the Server Memory Activity line chart is refreshed on the Memory Analysis dashboard. |
-| dash_memory_servmemconf_timeout | 300 seconds | Specifies the number of seconds after which the Server Memory Configuration pie chart is refreshed on the Memory Analysis dashboard. |
-| dash_objectact_objstorage_rows | 15 rows | Specifies the number of rows to show on the Object Storage table on the Object Activity Analysis dashboard. |
-| dash_objectact_objstorage_timeout | 300 seconds | Specifies the number of seconds after which the Object Storage table is refreshed on the Object Activity Analysis dashboard. |
-| dash_objectact_objtopindexes_timeout | 300 seconds | Specifies the number of seconds after which the Top 5 Largest Indexes bar chart is refreshed on the Object Activity Analysis dashboard. |
-| dash_objectact_objtoptables_timeout | 300 seconds | Specifies the number of seconds after which the Top 5 Largest Tables bar chart is refreshed on the Object Activity Analysis dashboard. |
-| dash_os_cpu_span | 7 days | Specifies the number of days worth of data to plot on the CPU chart on the Operating System Analysis dashboard. |
-| dash_os_cpu_timeout | 1800 seconds | Specifies the number of seconds after which the CPU line chart is refreshed on the Operating System Analysis dashboard. |
-| dash_os_data_span | 7 days | Specifies the number of days worth of data to plot on the I/O line chart on the Operating System Analysis dashboard. |
-| dash_os_disk_span | 7 days | Specifies the number of days worth of data to plot on the Utilisation chart on the Operating System Analysis dashboard. |
-| dash_os_hostfs_timeout | 1800 seconds | Specifies the number of seconds after which the Host File System Details table is refreshed on the Operating System Analysis dashboard. |
-| dash_os_io_timeout | 1800 seconds | Specifies the number of seconds after which the I/O line chart is refreshed on the Operating System Analysis dashboard. |
-| dash_os_memory_span | 7 days | Specifies the number of days worth of data to plot on the Memory chart on the Operating System Analysis dashboard. |
-| dash_os_memory_timeout | 1800 seconds | Specifies the number of seconds after which the Memory line chart is refreshed on the Operating System Analysis dashboard. |
-| dash_os_packet_span | 7 days | Specifies the number of days worth of data to plot on the Packet chart on the Operating System Analysis dashboard. |
-| dash_os_packet_timeout | 1800 seconds | Specifies the number of seconds after which the Network Packets line chart is refreshed on the Operating System Analysis dashboard. |
-| dash_os_process_span | 7 days | Specifies the number of days worth of data to plot on the Process chart on the Operating System Analysis dashboard. |
-| dash_os_process_timeout | 1800 seconds | Specifies the number of seconds after which the Process line chart is refreshed on the Operating System Analysis dashboard. |
-| dash_os_storage_timeout | 1800 seconds | Specifies the number of seconds after which the Storage pie chart is refreshed on the Operating System Analysis dashboard. |
-| dash_os_traffic_span | 7 days | Specifies the number of days worth of data to plot on the Traffic chart on the Operating System Analysis dashboard. |
-| dash_os_traffic_timeout | 1800 seconds | Specifies the number of seconds after which the Traffic line chart is refreshed on the Operating System Analysis dashboard. |
-| dash_os_util_timeout | 1800 seconds | Specifies the number of seconds after which the Utilization line chart is refreshed on the Operating System Analysis dashboard. |
-| dash_probe_log_timeout | 300 seconds | Specifies the number of seconds after which the Probe Log table refreshed. |
-| dash_replication_archivestat_span | 7 days | Specifies the number of days worth of data to plot on the WAL Archive Status chart on the Streaming Replication Analysis dashboard. |
-| dash_replication_archivestat_timeout | 1800 seconds | Specifies the number of seconds after which the WAL Archive Status line chart is refreshed on the Streaming Replication dashboard. |
-| dash_replication_pagelag_span | 7 days | Specifies the number of days worth of data to plot on the WAL Lag Pages chart on the Streaming Replication dashboard. |
-| dash_replication_pagelag_timeout | 1800 seconds | Specifies the number of seconds after which the WAL Lag Pages line chart is refreshed on the Streaming Replication dashboard. |
-| dash_replication_segmentlag_span | 7 days | Specifies the number of days worth of data to plot on the WAL Lag Segments chart on the Streaming Replication dashboard. |
-| dash_replication_segmentlag_timeout | 1800 seconds | Specifies the number of seconds after which the WAL Lag Segments line chart is refreshed on the Streaming Replication dashboard. |
-| dash_replication_timelag_span | 7 days | Specifies the number of days worth of data to plot on the Replication Lag Time chart on the Streaming Replication dashboard. |
-| dash_replication_timelag_timeout | 1800 seconds | Specifies the number of seconds after which the Replication Lag Time line chart is refreshed on the Streaming Replication dashboard. |
-| dash_server_buffers_written | 168 hours | Specifies the number of days worth of data to plot on the Background Writer Statistics chart on the Server Analysis dashboard. |
-| dash_server_buffers_written_timeout | 300 seconds | Specifies the number of seconds after which the Background Writer Statistics line chart is refreshed on the Server Analysis dashboard. |
-| dash_server_connovervw_timeout | 300 seconds | Specifies the number of seconds after which the Connection Overview pie chart is refreshed on the Server Analysis dashboard. |
-| dash_server_database_timeout | 300 seconds | Specifies the number of seconds after which the Databases table is refreshed on the Server Analysis dashboard. |
-| dash_server_dbsize_span | 7 days | Specifies the number of days worth of data to plot on the Database Size Analysis on the Server Analysis dashboard. |
-| dash_server_dbsize_timeout | 1800 seconds | Specifies the number of seconds after which the Database Size line chart is refreshed on the Server Analysis dashboard. |
-| dash_server_disk_timeout | 1800 seconds | Specifies the number of seconds after which the Disk line chart is refreshed on the Server Analysis dashboard. |
-| dash_server_global_span | 7 days | Specifies the number of days worth of data to plot on the Disk line chart on the Server Analysis dashboard. |
-| dash_server_sharedbuff_span | 7 days | Specifies the number of days worth of data to plot on the Shared Buffer chart on the Server Analysis dashboard. |
-| dash_server_sharedbuff_timeout | 1800 seconds | Specifies the number of seconds after which the Shared Buffers line chart is refreshed on the Server Analysis dashboard. |
-| dash_server_tabspacesize_span | 7 days | Specifies the number of days worth of data to plot on the Tablespace Size chart on the Server Analysis dashboard. |
-| dash_server_tabspacesize_timeout | 1800 seconds | Specifies the number of seconds after which the Tablespace Size line chart is refreshed on the Server Analysis dashboard. |
-| dash_server_useract_span | 7 days | Specifies the number of days worth of data to plot on the User Activity chart on the Server Analysis dashboard. |
-| dash_server_useract_timeout | 1800 seconds | Specifies the number of seconds after which the User Activity line chart is refreshed on the Server Analysis dashboard. |
-| dash_sessact_lockact_timeout | 300 seconds | Specifies the number of seconds after which the Session Lock Activity table is refreshed on the Session Activity Analysis dashboard. |
-| dash_sessact_workload_timeout | 300 seconds | Specifies the number of seconds after which the Session Workload table is refreshed on the Session Activity Analysis dashboard. |
-| dash_sess_waits_timewait_timeout | 300 seconds | Specifies the number of seconds after which the Session Waits By Time Waited pie chart is refreshed on the Session Waits Analysis dashboard. |
-| dash_sess_waits_waitdtl_timeout | 300 seconds | Specifies the number of seconds after which the Session Waits Details table is refreshed on the Session Waits Analysis dashboard. |
-| dash_storage_dbdtls_timeout | 300 seconds | Specifies the number of seconds after which the Database Details table is refreshed on the Storage Analysis dashboard. |
-| dash_storage_dbovervw_timeout | 300 seconds | Specifies the number of seconds after which the Database Overview pie chart is refreshed on the Storage Analysis dashboard. |
-| dash_storage_hostdtls_timeout | 300 seconds | Specifies the number of seconds after which the Host Details table is refreshed. |
-| dash_storage_hostovervw_timeout | 300 seconds | Specifies the number of seconds after which the Host Overview pie chart is refreshed on the Storage Analysis dashboard. |
-| dash_storage_tblspcdtls_timeout | 300 seconds | Specifies the number of seconds after which the Tablespace Details table is refreshed on the Storage Analysis dashboard. |
-| dash_storage_tblspcovervw_timeout | 300 seconds | Specifies the number of seconds after which the Tablespace Overview pie chart is refreshed on the Storage Analysis dashboard. |
-| dash_sys_waits_nowaits_timeout | 300 seconds | Specifies the number of seconds after which the System Waits By Number Of Waits pie chart is refreshed on the System Waits Analysis dashboard. |
-| dash_sys_waits_timewait_timeout | 300 seconds | Specifies the number of seconds after which the System Waits By Time Waited pie chart is refreshed on the System Waits Analysis dashboard. |
-| dash_sys_waits_waitdtl_timeout | 300 seconds | Specifies the number of seconds after which the System Waits Details table is refreshed on the System Waits Analysis dashboard. |
-| deleted_charts_retention_time | 7 days | Specifies the number of days that a custom chart (displayed on a user-defined dashboard) is stored. |
-| deleted_objects_data_retention_time | 3 days | Specifies the number of days after which the probe data of any deleted object, for example, server or agents or Barman server, is deleted from the pemhistory and pemdata schema. It deletes the job in case you don't want to delete the data of those obsolete objects. |
-| deleted_probes_retention_time | 7 days | Specifies the number of days that a custom probe (displayed on a user-defined dashboard) is stored. |
-| download_chart_format | jpeg | Specifies the format in which a downloaded chart is stored. Can be jpeg or png. |
+
+| Parameter name | Value and unit | Description |
+|--------------------------------------|--------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| audit_log_retention_time | 30 days | Specifies the number of days for an audit log to be retained on the PEM server. |
+| auto_create_agent_alerts | true | Specifies whether to create default agent level alerts automatically when an agent is registered. |
+| auto_create_server_alerts | true | Specifies whether to create default server level alerts automatically when a server is bound to an agent. |
+| chart_disable_bullets | false | Enables/disables bullets on line charts on dashboards and Capacity Manager reports. |
+| cm_data_points_per_report | 50 | Specifies the number of data points to plot on charts on Capacity Manager reports. |
+| cm_max_end_date_in_years | 5 years | Specifies the maximum amount of time for the Capacity Manager to extrapolate data for. Ensures that threshold-based end dates of reports aren't extrapolated indefinitely. |
+| dash_alerts_timeout | 60 seconds | Specifies the number of seconds after which the components of the Alerts dashboard are refreshed. |
+| dash_db_comrol_span | 7 days | Specifies the number of days worth of data to plot on the Commit/Rollback Analysis chart on the Database Analysis and Server Analysis dashboards. |
+| dash_db_comrol_timeout | 1800 seconds | Specifies the number of seconds after which the Commits/Rollbacks line chart is refreshed on the Database Analysis and Server Analysis dashboards. |
+| dash_db_connovervw_timeout | 300 seconds | Specifies the number of seconds after which the Connection Overview pie chart is refreshed on the Database Analysis dashboard. |
+| dash_db_eventlag_span | 7 days | Specifies the number of days worth of data to plot on the Number of Events Lag chart for slony replication on the Database Analysis dashboard. |
+| dash_db_eventlag_timeout | 1800 seconds | Specifies the number of seconds after which the Number of Events Lag line chart for slony replication is refreshed on the Database Analysis dashboard. |
+| dash_db_hottable_rows | 25 rows | Specifies the number of rows to show on the HOT Table Analysis table on the Database Analysis dashboard. |
+| dash_db_hottable_timeout | 300 seconds | Specifies the number of seconds after which the Hot Tables table is refreshed on the Database Analysis dashboard. |
+| dash_db_io_span | 7 days | Specifies the number of days worth of data to plot on the Database I/O Analysis chart on the Database Analysis and I/O Analysis dashboards. |
+| dash_db_io_timeout | 1800 seconds | Specifies the number of seconds after which the Database I/O line chart is refreshed on the Database Analysis and I/O Analysis dashboards. |
+| dash_db_rowact_span | 7 days | Specifies the number of days worth of data to plot on the Row Activity Analysis chart on the Database Analysis, I/O Analysis, and Server Analysis dashboards. |
+| dash_db_rowact_timeout | 1800 seconds | Specifies the number of seconds after which the Row Activity line chart is refreshed on the Database Analysis, I/O Analysis, and Server Analysis dashboards. |
+| dash_db_storage_timeout | 300 seconds | Specifies the number of seconds after which the Storage bar chart is refreshed on the Database Analysis dashboard. |
+| dash_db_timelag_span | 7 days | Specifies the number of days worth of data to plot on the Time Lag chart for Slony replication on the Database Analysis dashboard. |
+| dash_db_timelag_timeout | 1800 seconds | Specifies the number of seconds after which the Time Lag line chart for Slony replication is refreshed on the Database Analysis dashboard. |
+| dash_db_useract_span | 7 days | Specifies the number of days worth of data to plot on the User Activity Analysis chart on the Database Analysis dashboard. |
+| dash_db_useract_timeout | 1800 seconds | Specifies the number of seconds after which the User Activity line chart is refreshed on the Database Analysis dashboard. |
+| dash_efm_timeout | 300 seconds | Specifies the number of seconds after which the Failover Manager Node Status and Failover Manager Cluster Info line chart is refreshed on the Streaming Replication dashboard. |
+| dash_global_overview_timeout | 30 seconds | Specifies the number of seconds after which the components of the Global Overview dashboard are refreshed. |
+| dash_header_timeout | 60 seconds | Specifies the number of seconds after which the information on the header of all the dashboards are refreshed. |
+| dash_io_chkpt_span | 7 days | Specifies the number of days worth of data to plot on the Checkpoints chart on the I/O Analysis dashboard. |
+| dash_io_chkpt_timeout | 1800 seconds | Specifies the number of seconds after which the Checkpoints line chart is refreshed on the I/O Analysis dashboard. |
+| dash_io_hotindx_timeout | 300 seconds | Specifies the number of seconds after which the Hot Indexes bar chart is refreshed on the I/O Analysis dashboard. |
+| dash_io_hottbl_timeout | 300 seconds | Specifies the number of seconds after which the Hot Tables bar chart is refreshed on the I/O Analysis dashboard. |
+| dash_io_index_objectio_rows | 25 rows | Specifies the number of rows displayed on the Index Activity table on the I/O Analysis and Object Activity Analysis dashboards. |
+| dash_io_index_objectio_timeout | 60 seconds | Specifies the number of seconds after which the Index Activity table is refreshed on the I/O Analysis and Object Activity Analysis dashboards. |
+| dash_io_objectio_rows | 25 rows | Specifies the number of rows displayed on the Object I/O Details table on the I/O Analysis and Object Activity Analysis dashboards. |
+| dash_io_objectio_timeout | 300 seconds | Specifies the number of seconds after which the Object I/O Details table is refreshed on the I/O Analysis and Object Activity Analysis Dashboards. |
+| dash_memory_hostmemact_span | 7 days | Specifies the number of days worth of data to plot on the Host Memory Activity Analysis chart on the Memory Analysis dashboard. |
+| dash_memory_hostmemact_timeout | 1800 seconds | Specifies the number of seconds after which the Host Memory Activity line chart is refreshed on the Memory Analysis dashboard. |
+| dash_memory_hostmemconf_timeout | 300 seconds | Specifies the number of seconds after which the Host Memory Configuration pie chart is refreshed on the Memory Analysis and Server Analysis dashboards. |
+| dash_memory_servmemact_span | 7 days | Specifies the number of days worth of data to plot on the server Memory Activity Analysis chart on the Memory Analysis dashboard. |
+| dash_memory_servmemact_timeout | 1800 seconds | Specifies the number of seconds after which the Server Memory Activity line chart is refreshed on the Memory Analysis dashboard. |
+| dash_memory_servmemconf_timeout | 300 seconds | Specifies the number of seconds after which the Server Memory Configuration pie chart is refreshed on the Memory Analysis dashboard. |
+| dash_objectact_objstorage_rows | 15 rows | Specifies the number of rows to show on the Object Storage table on the Object Activity Analysis dashboard. |
+| dash_objectact_objstorage_timeout | 300 seconds | Specifies the number of seconds after which the Object Storage table is refreshed on the Object Activity Analysis dashboard. |
+| dash_objectact_objtopindexes_timeout | 300 seconds | Specifies the number of seconds after which the Top 5 Largest Indexes bar chart is refreshed on the Object Activity Analysis dashboard. |
+| dash_objectact_objtoptables_timeout | 300 seconds | Specifies the number of seconds after which the Top 5 Largest Tables bar chart is refreshed on the Object Activity Analysis dashboard. |
+| dash_os_cpu_span | 7 days | Specifies the number of days worth of data to plot on the CPU chart on the Operating System Analysis dashboard. |
+| dash_os_cpu_timeout | 1800 seconds | Specifies the number of seconds after which the CPU line chart is refreshed on the Operating System Analysis dashboard. |
+| dash_os_data_span | 7 days | Specifies the number of days worth of data to plot on the I/O line chart on the Operating System Analysis dashboard. |
+| dash_os_disk_span | 7 days | Specifies the number of days worth of data to plot on the Utilisation chart on the Operating System Analysis dashboard. |
+| dash_os_hostfs_timeout | 1800 seconds | Specifies the number of seconds after which the Host File System Details table is refreshed on the Operating System Analysis dashboard. |
+| dash_os_io_timeout | 1800 seconds | Specifies the number of seconds after which the I/O line chart is refreshed on the Operating System Analysis dashboard. |
+| dash_os_memory_span | 7 days | Specifies the number of days worth of data to plot on the Memory chart on the Operating System Analysis dashboard. |
+| dash_os_memory_timeout | 1800 seconds | Specifies the number of seconds after which the Memory line chart is refreshed on the Operating System Analysis dashboard. |
+| dash_os_packet_span | 7 days | Specifies the number of days worth of data to plot on the Packet chart on the Operating System Analysis dashboard. |
+| dash_os_packet_timeout | 1800 seconds | Specifies the number of seconds after which the Network Packets line chart is refreshed on the Operating System Analysis dashboard. |
+| dash_os_process_span | 7 days | Specifies the number of days worth of data to plot on the Process chart on the Operating System Analysis dashboard. |
+| dash_os_process_timeout | 1800 seconds | Specifies the number of seconds after which the Process line chart is refreshed on the Operating System Analysis dashboard. |
+| dash_os_storage_timeout | 1800 seconds | Specifies the number of seconds after which the Storage pie chart is refreshed on the Operating System Analysis dashboard. |
+| dash_os_traffic_span | 7 days | Specifies the number of days worth of data to plot on the Traffic chart on the Operating System Analysis dashboard. |
+| dash_os_traffic_timeout | 1800 seconds | Specifies the number of seconds after which the Traffic line chart is refreshed on the Operating System Analysis dashboard. |
+| dash_os_util_timeout | 1800 seconds | Specifies the number of seconds after which the Utilization line chart is refreshed on the Operating System Analysis dashboard. |
+| dash_probe_log_timeout | 300 seconds | Specifies the number of seconds after which the Probe Log table refreshed. |
+| dash_replication_archivestat_span | 7 days | Specifies the number of days worth of data to plot on the WAL Archive Status chart on the Streaming Replication Analysis dashboard. |
+| dash_replication_archivestat_timeout | 1800 seconds | Specifies the number of seconds after which the WAL Archive Status line chart is refreshed on the Streaming Replication dashboard. |
+| dash_replication_pagelag_span | 7 days | Specifies the number of days worth of data to plot on the WAL Lag Pages chart on the Streaming Replication dashboard. |
+| dash_replication_pagelag_timeout | 1800 seconds | Specifies the number of seconds after which the WAL Lag Pages line chart is refreshed on the Streaming Replication dashboard. |
+| dash_replication_segmentlag_span | 7 days | Specifies the number of days worth of data to plot on the WAL Lag Segments chart on the Streaming Replication dashboard. |
+| dash_replication_segmentlag_timeout | 1800 seconds | Specifies the number of seconds after which the WAL Lag Segments line chart is refreshed on the Streaming Replication dashboard. |
+| dash_replication_timelag_span | 7 days | Specifies the number of days worth of data to plot on the Replication Lag Time chart on the Streaming Replication dashboard. |
+| dash_replication_timelag_timeout | 1800 seconds | Specifies the number of seconds after which the Replication Lag Time line chart is refreshed on the Streaming Replication dashboard. |
+| dash_server_buffers_written | 168 hours | Specifies the number of days worth of data to plot on the Background Writer Statistics chart on the Server Analysis dashboard. |
+| dash_server_buffers_written_timeout | 300 seconds | Specifies the number of seconds after which the Background Writer Statistics line chart is refreshed on the Server Analysis dashboard. |
+| dash_server_connovervw_timeout | 300 seconds | Specifies the number of seconds after which the Connection Overview pie chart is refreshed on the Server Analysis dashboard. |
+| dash_server_database_timeout | 300 seconds | Specifies the number of seconds after which the Databases table is refreshed on the Server Analysis dashboard. |
+| dash_server_dbsize_span | 7 days | Specifies the number of days worth of data to plot on the Database Size Analysis on the Server Analysis dashboard. |
+| dash_server_dbsize_timeout | 1800 seconds | Specifies the number of seconds after which the Database Size line chart is refreshed on the Server Analysis dashboard. |
+| dash_server_disk_timeout | 1800 seconds | Specifies the number of seconds after which the Disk line chart is refreshed on the Server Analysis dashboard. |
+| dash_server_global_span | 7 days | Specifies the number of days worth of data to plot on the Disk line chart on the Server Analysis dashboard. |
+| dash_server_sharedbuff_span | 7 days | Specifies the number of days worth of data to plot on the Shared Buffer chart on the Server Analysis dashboard. |
+| dash_server_sharedbuff_timeout | 1800 seconds | Specifies the number of seconds after which the Shared Buffers line chart is refreshed on the Server Analysis dashboard. |
+| dash_server_tabspacesize_span | 7 days | Specifies the number of days worth of data to plot on the Tablespace Size chart on the Server Analysis dashboard. |
+| dash_server_tabspacesize_timeout | 1800 seconds | Specifies the number of seconds after which the Tablespace Size line chart is refreshed on the Server Analysis dashboard. |
+| dash_server_useract_span | 7 days | Specifies the number of days worth of data to plot on the User Activity chart on the Server Analysis dashboard. |
+| dash_server_useract_timeout | 1800 seconds | Specifies the number of seconds after which the User Activity line chart is refreshed on the Server Analysis dashboard. |
+| dash_sess_waits_timewait_timeout | 300 seconds | Specifies the number of seconds after which the Session Waits By Time Waited pie chart is refreshed on the Session Waits Analysis dashboard. |
+| dash_sess_waits_waitdtl_timeout | 300 seconds | Specifies the number of seconds after which the Session Waits Details table is refreshed on the Session Waits Analysis dashboard. |
+| dash_sessact_lockact_timeout | 300 seconds | Specifies the number of seconds after which the Session Lock Activity table is refreshed on the Session Activity Analysis dashboard. |
+| dash_sessact_workload_timeout | 300 seconds | Specifies the number of seconds after which the Session Workload table is refreshed on the Session Activity Analysis dashboard. |
+| dash_storage_dbdtls_timeout | 300 seconds | Specifies the number of seconds after which the Database Details table is refreshed on the Storage Analysis dashboard. |
+| dash_storage_dbovervw_timeout | 300 seconds | Specifies the number of seconds after which the Database Overview pie chart is refreshed on the Storage Analysis dashboard. |
+| dash_storage_hostdtls_timeout | 300 seconds | Specifies the number of seconds after which the Host Details table is refreshed. |
+| dash_storage_hostovervw_timeout | 300 seconds | Specifies the number of seconds after which the Host Overview pie chart is refreshed on the Storage Analysis dashboard. |
+| dash_storage_tblspcdtls_timeout | 300 seconds | Specifies the number of seconds after which the Tablespace Details table is refreshed on the Storage Analysis dashboard. |
+| dash_storage_tblspcovervw_timeout | 300 seconds | Specifies the number of seconds after which the Tablespace Overview pie chart is refreshed on the Storage Analysis dashboard. |
+| dash_sys_waits_nowaits_timeout | 300 seconds | Specifies the number of seconds after which the System Waits By Number Of Waits pie chart is refreshed on the System Waits Analysis dashboard. |
+| dash_sys_waits_timewait_timeout | 300 seconds | Specifies the number of seconds after which the System Waits By Time Waited pie chart is refreshed on the System Waits Analysis dashboard. |
+| dash_sys_waits_waitdtl_timeout | 300 seconds | Specifies the number of seconds after which the System Waits Details table is refreshed on the System Waits Analysis dashboard. |
+| deleted_charts_retention_time | 7 days | Specifies the number of days that a custom chart (displayed on a user-defined dashboard) is stored. |
+| deleted_objects_data_retention_time | 3 days | Specifies the number of days after which the probe data of any deleted object, for example, server or agents or Barman server, is deleted from the pemhistory and pemdata schema. It deletes the job in case you don't want to delete the data of those obsolete objects. |
+| deleted_probes_retention_time | 7 days | Specifies the number of days that a custom probe (displayed on a user-defined dashboard) is stored. |
+| download_chart_format | jpeg | Specifies the format in which a downloaded chart is stored. Can be jpeg or png. |
| flapping_detection_state_change | 3 | Specifies the number of state changes detected within a specified interval to define a given alert as flapping. Flapping starts when more than `N` state changes have occurred over \[ `N` + 1 \* (min(probe_interval) \* 2)] minutes and the fine state is not None. The default value of `N` is 2 or 3, and min(probe_interval) is the smallest interval for all the probes used by the alert. Flapping ends when ZERO state changes have occurred over \[2 `N` \* min(probe_interval)] minutes. |
-| job_retention_time | 30 days | Specifies the number of days that nonrecurring scheduled tasks and their associated jobs are retained. |
-| long_running_transaction_minutes | 5 minutes | Specifies the number of minutes a query executes before being considered long running. |
-| nagios_cmd_file_name | <file_name> | Specifies nagios command file to which passive service check results are sent. |
-| nagios_enabled | t | Specifies whether alert notification are submitted to nagios. |
+| job_retention_time | 30 days | Specifies the number of days that nonrecurring scheduled tasks and their associated jobs are retained. |
+| long_running_transaction_minutes | 5 minutes | Specifies the number of minutes a query executes before being considered long running. |
+| nagios_cmd_file_name | <file_name> | Specifies nagios command file to which passive service check results are sent. |
+| nagios_enabled | t | Specifies whether alert notification are submitted to nagios. |
| nagios_medium_alert_as_critical | f | Specifies whether a medium level PEM alert is considered critical in nagios. |
-| nagios_spool_retention_time | 7 days | Specifies the number of days to retain nagios messages in the spool table before they are discarded. |
-| probe_log_retention_time | 30 days | Specifies the number of days that probe log records are retained. |
+| nagios_spool_retention_time | 7 days | Specifies the number of days to retain nagios messages in the spool table before they are discarded. |
+| probe_log_retention_time | 30 days | Specifies the number of days that probe log records are retained. |
| reminder_notification_interval | 24 hours | Specifies the number of hours after which a reminder email is sent in case an alert wasn't cleared. |
-| server_log_retention_time | 30 days | Specifies the number of days that the server log is retained on the PEM server. |
-| show_data_tab_on_graph | false | If `true`, a **Data** tab is added to each graph. Select the **Data** tab to review the data that's plotted on the graph. |
-| smtp_authentication | false | Specifies whether to enable/disable authentication over SMTP. |
-| smtp_enabled | true | Specifies whether to enable/disable sending of emails. |
-| smtp_encryption | false | Specifies whether to send SMTP email using an encrypted connection. |
-| smtp_password | | Specifies the password to use to connect to the SMTP server. |
-| smtp_port | 25 | Specifies the SMTP server port to use for sending email. |
-| smtp_server | 127.0.0.1 | Specifies the SMTP server host address to use for sending email. |
-| smtp_spool_retention_time | 7 days | Specifies the number of days to retain sent email messages in the spool table before they are discarded. |
-| smtp_username | | Specifies the username to used to connect to an SMTP server. |
-| snmp_community | public | Specifies the SNMP community used when sending traps. Used only with SNMPv1 and SNMPv2. |
-| snmp_enabled | true | Specifies whether to enable/disable sending SNMP traps. |
-| snmp_port | 162 | Specifies the SNMP server port to use for sending SNMP traps. |
-| snmp_server | 127.0.0.1 | Specifies the SNMP server host address to use for sending SNMP traps. |
-| snmp_spool_retention_time | 7 days | Specifies the number of days to retain sent traps in the spool table before they are discarded. |
-| snmp_security_name | | Specifies the user name or security name for sending SNMP traps. Used only with SNMPv3. |
-| snmp_security_engine_id | | Specifies the engine id of the SNMP agent on the SNMP server. Used only with SNMPv3. |
-| snmp_security_level | NOAUTH_NOPRIV | Specifies security level. Its possible values are: AUTH_NOPRIV - Authentication, No Privacy or AUTH_PRIV - Authentication, Privacy or NOAUTH_NOPRIV - no Authentication, no Privacy. Used only with SNMPv3. |
-| snmp_context_name | | Specifies the context name, the identifier for MIB objects when sending SNMP traps. Used only with SNMPv3. |
-| snmp_context_engine_id | | Specifies the context engine id, the identifier for MIB objects when sending SNMP traps. If not specified, snmp_security_engine_id is used. Used only with SNMPv3. |
-| snmp_authentication_protocol | NONE | Specifies the authentication type for SNMP traps. Its possible values are NONE, HMACMD5, and HMACSHA. Used only with SNMPv3. |
-| snmp_privacy_protocol | NONE | Specifies the privacy protocol for SNMP traps. Its possible values are NONE, DES, AES128, IDEA, AES192, or AES256. Used only with SNMPv3. |
-| snmp_authentication_password | | Specifies the authentication password associated with security name mentioned in snmp_security_name. Used only for SNMPv3. |
-| snmp_privacy_password | | Specifies the privacy password associated with security name mentioned in snmp_security_name. Used only for SNMPv3. |
-| webclient_help_pg | EDB hosted documentation | Specifies the location of the online PostgreSQL core documentation. |
+| server_log_retention_time | 30 days | Specifies the number of days that the server log is retained on the PEM server. |
+| show_data_tab_on_graph | false | If `true`, a **Data** tab is added to each graph. Select the **Data** tab to review the data that's plotted on the graph. |
+| smtp_authentication | false | Specifies whether to enable/disable authentication over SMTP. |
+| smtp_enabled | true | Specifies whether to enable/disable sending of emails. |
+| smtp_encryption | false | Specifies whether to send SMTP email using an encrypted connection. |
+| smtp_password | | Specifies the password to use to connect to the SMTP server. |
+| smtp_port | 25 | Specifies the SMTP server port to use for sending email. |
+| smtp_server | 127.0.0.1 | Specifies the SMTP server host address to use for sending email. |
+| smtp_spool_retention_time | 7 days | Specifies the number of days to retain sent email messages in the spool table before they are discarded. |
+| smtp_username | | Specifies the username to used to connect to an SMTP server. |
+| snmp_authentication_password | | Specifies the authentication password associated with security name mentioned in snmp_security_name. Used only for SNMPv3. |
+| snmp_authentication_protocol | NONE | Specifies the authentication type for SNMP traps. Its possible values are NONE, HMACMD5, and HMACSHA. Used only with SNMPv3. |
+| snmp_community | public | Specifies the SNMP community used when sending traps. Used only with SNMPv1 and SNMPv2. |
+| snmp_context_engine_id | | Specifies the context engine id, the identifier for MIB objects when sending SNMP traps. If not specified, snmp_security_engine_id is used. Used only with SNMPv3. |
+| snmp_context_name | | Specifies the context name, the identifier for MIB objects when sending SNMP traps. Used only with SNMPv3. |
+| snmp_enabled | true | Specifies whether to enable/disable sending SNMP traps. |
+| snmp_port | 162 | Specifies the SNMP server port to use for sending SNMP traps. |
+| snmp_privacy_password | | Specifies the privacy password associated with security name mentioned in snmp_security_name. Used only for SNMPv3. |
+| snmp_privacy_protocol | NONE | Specifies the privacy protocol for SNMP traps. Its possible values are NONE, DES, AES128, IDEA, AES192, or AES256. Used only with SNMPv3. |
+| snmp_security_engine_id | | Specifies the engine id of the SNMP agent on the SNMP server. Used only with SNMPv3. |
+| snmp_security_level | NOAUTH_NOPRIV | Specifies security level. Its possible values are:
- AUTH_NOPRIV - Authentication, No Privacy. Then NONE option is possible for privacy protocol only.
AUTH_PRIV - Authentication, Privacy. Then NONE option selection shouldn't be allowed for authentication and privacy protocol.
NOAUTH_NOPRIV - no Authentication, no Privacy. Then only NONE option is possible for authentication and privacy protocol.
Used only with SNMPv3. |
+| snmp_security_name | | Specifies the user name or security name for sending SNMP traps. Used only with SNMPv3. |
+| snmp_server | 127.0.0.1 | Specifies the SNMP server host address to use for sending SNMP traps. |
+| snmp_spool_retention_time | 7 days | Specifies the number of days to retain sent traps in the spool table before they're discarded. |
+| webclient_help_pg | EDB hosted documentation | Specifies the location of the online PostgreSQL core documentation. |
diff --git a/product_docs/docs/pem/9/monitoring_event_history.mdx b/product_docs/docs/pem/9/monitoring_event_history.mdx
new file mode 100644
index 00000000000..89fdbf1666d
--- /dev/null
+++ b/product_docs/docs/pem/9/monitoring_event_history.mdx
@@ -0,0 +1,18 @@
+---
+title: "Monitoring event history"
+---
+
+You can monitor the executed event details in the `pem.event_history` table. It provides the username, execution time of the event, type of the event occurred, and the details regarding the event. The fields in the table are:
+
+!!!Note
+Currently, `pem.event_history` table records only the alert blackout history.
+!!!
+
+| Field name | Description |
+|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| recorded_time | Records the execution time of the event. Displays the date and time along with the fractional seconds. |
+| user_name | The username of the user who executed or scheduled the event. |
+| component | The event name. |
+| operation | The action taken for the event. For example, if the user has enabled an alert blackout, it displays `enable_alert_blackout`. |
+| message | The description of the action taken for the event. For example, if the user has enabled an alert blackout for the server, it displays `Enabled the alert blackout for the server`. |
+| details | The JSON output of the event that occurred. |
diff --git a/product_docs/docs/pem/9/monitoring_performance/alerts.mdx b/product_docs/docs/pem/9/monitoring_performance/alerts.mdx
index d84e2b4ed4b..0c1b4719258 100644
--- a/product_docs/docs/pem/9/monitoring_performance/alerts.mdx
+++ b/product_docs/docs/pem/9/monitoring_performance/alerts.mdx
@@ -238,87 +238,92 @@ The tables that follow list the system-defined alert templates that you can use
### Templates applicable on server
-| Template name | Description | Probe dependency |
-| --------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ |----------------------|
-| Total table bloat in server | The total space wasted by tables in server, in MB | table_bloat, settings |
-| Largest table (by multiple of unbloated size) | Largest table in server, calculated as a multiple of its own estimated unbloated size; exclude tables smaller than N MB | table_bloat, settings |
-| Highest table bloat in server | The most space wasted by a table in server, in MB | table_bloat, settings |
-| Average table bloat in server | The average space wasted by tables in server, in MB | table_bloat, settings |
-| Table size in server | The size of tables in server, in MB | table_size |
-| Database size in server | The size of databases in server, in MB | database_size |
-| Number of WAL files | Total number of Write Ahead Log files | number_of_wal_files |
-| Number of prepared transactions | Number of transactions in prepared state | number_of_prepared_transactions |
-| Total connections | Total number of connections in the server | session_info |
-| Total connections as percentage of max_connections | Total number of connections in the server as a percentage of maximum connections allowed on server, settings | session_info, settings |
+| Template name | Description | Probe dependency |
+|-----------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------|
+| Total table bloat in server | The total space wasted by tables in server, in MB | table_bloat, settings |
+| Largest table (by multiple of unbloated size) | Largest table in server, calculated as a multiple of its own estimated unbloated size; exclude tables smaller than N MB | table_bloat, settings |
+| Highest table bloat in server | The most space wasted by a table in server, in MB | table_bloat, settings |
+| Average table bloat in server | The average space wasted by tables in server, in MB | table_bloat, settings |
+| Table size in server | The size of tables in server, in MB | table_size |
+| Database size in server | The size of databases in server, in MB | database_size |
+| Number of WAL files | Total number of Write Ahead Log files | number_of_wal_files |
+| Number of prepared transactions | Number of transactions in prepared state | number_of_prepared_transactions |
+| Total connections | Total number of connections in the server | session_info |
+| Total connections as percentage of max_connections | Total number of connections in the server as a percentage of maximum connections allowed on server, settings | session_info, settings |
| Unused, non-superuser connections | Number of unused, non-superuser connections on the server, user_info, settings | session_info, user_info, settings |
| Unused, non-superuser connections as percentage of max_connections | Number of unused, non-superuser connections on the server as a percentage of max_connections of max_connections, user_info, settings | session_info, user_info, settings |
-| Ungranted locks | Number of ungranted locks in server | blocked_session_info |
-| Percentage of buffers written by backends | The percentage of buffers written by backends vs. the total buffers written | background_writer_statistics |
-| Percentage of buffers written by checkpoint | The percentage of buffers written by the checkpoints vs. the total buffers written | background_writer_statistics |
-| Buffers written per second | Number of buffers written per second, over the last two probe cycles | background_writer_statistics |
-| Buffers allocated per second | Number of buffers allocated per second, over the last two probe cycles | background_writer_statistics |
-| Connections in idle state | Number of connections in server that are in idle state | session_info |
-| Connections in idle-in-transaction state | Number of connections in server that are in idle-in-transaction state | session_info |
-| Connections in idle-in-transaction state, as percentage of max_connections | Number of connections in server that are in idle-in-transaction state, as a percentage of maximum connections allowed on server, settings | session_info, settings |
-| Long-running idle connections | Number of connections in the server that have been idle for more than N seconds | session_info |
-| Long-running idle connections and idle transactions | Number of connections in the server that have been idle or transactions idle-in-transaction for more than N seconds | session_info |
-| Long-running idle transactions | Number of connections in the server that have been idle in transaction for more than N seconds | session_info |
-| Long-running transactions | Number of transactions in server that have been running for more than N seconds | session_info |
-| Long-running queries | Number of queries in server that have been running for more than N seconds | session_info |
-| Long-running vacuums | Number of vacuum operations in server that have been running for more than N seconds | session_info |
-| Long-running autovacuums | Number of autovacuum operations in server that have been running for more than N seconds | session_info |
-| Committed transactions percentage | Percentage of transactions in the server that committed vs. that rolled-back over last N minutes | database_statistics |
-| Shared buffers hit percentage | Percentage of block read requests in the server that were satisfied by shared buffers, over last N minutes | database_statistics |
-| Tuples inserted | Tuples inserted into server over last N minutes | database_statistics |
-| InfiniteCache buffers hit percentage | Percentage of block read requests in the server that were satisfied by InfiniteCache, over last N minutes | database_statistics |
-| Tuples fetched | Tuples fetched from server over last N minutes | database_statistics |
-| Tuples returned | Tuples returned from server over last N minutes | database_statistics |
-| Dead Tuples | Number of estimated dead tuples in server | table_statistics |
-| Tuples updated | Tuples updated in server over last N minutes | database_statistics |
-| Tuples deleted | Tuples deleted from server over last N minutes | database_statistics |
-| Tuples hot updated | Tuples hot updated in server, over last N minutes | table_statistics |
-| Sequential Scans | Number of full table scans in server, over last N minutes | table_statistics |
-| Index Scans | Number of index scans in server, over last N minutes | table_statistics |
-| Hot update percentage | Percentage of hot updates in the server over last N minutes | table_statistics |
-| Live Tuples | Number of estimated live tuples in server | table_statistics |
-| Dead tuples percentage | Percentage of estimated dead tuples in server | table_statistics |
-| Last Vacuum | Hours since last vacuum on the server | table_statistics |
-| Last AutoVacuum | Hours since last autovacuum on the server | table_statistics |
-| Last Analyze | Hours since last analyze on the server | table_statistics |
-| Last AutoAnalyze | Hours since last autoanalyze on the server | table_statistics |
-| Percentage of buffers written by backends over the last N minutes | The percentage of buffers written by backends vs. the total buffers backends over last N | background_writer_statistics |
-| Table Count | Total number of tables in server | oc_table |
-| Function Count | Total number of functions in server | oc_function |
-| Sequence Count | Total number of sequences in server | oc_sequence |
-| A user expires in N days | Number of days before a user's validity expires | user_info |
-| Index size as a percentage of table size | Size of the indexes in server, as a percentage of their tables' size | index_size, oc_index, table_size |
-| Largest index by table-size percentage | Largest index in server, calculated as percentage of its table's size, oc_index, table_size | index_size, oc_index, table_size |
-| Number of ERRORS in the logfile on server M in the last X hours | The number of ERRORS in the logfile on server M in last X hours | N/A |
-| Number of WARNINGS in the logfile on server M in the last X hours | The number of WARNINGS in logfile on server M in the last X hours | N/A |
-| Number of WARNINGS or ERRORS in the logfile on server M in the last X hours | The number of WARNINGS or ERRORS in the logfile on server M in the last X hours | N/A |
-| Number of attacks detected in the last N minutes | The number of SQL injection attacks occurred in the last N minutes | sql_protect |
-| Number of attacks detected in the last N minutes by username | The number of SQL injection attacks occurred in the last N minutes by username | sql_protect |
-| Number of replica servers lag behind the primary by write location | Streaming Replication: number of replica servers lag behind the primary by write location | streaming_replication |
-| Number of replica servers lag behind the primary by flush location | Streaming Replication: number of replica servers lag behind the primary by flush location | streaming_replication |
-| Number of replica servers lag behind the primary by replay location | Streaming Replication: number of replica servers lag behind the primary by replay location | streaming_replication |
-| Replica server lag behind the primary by write location | Streaming Replication: replica server lag behind the primary by write location in MB | streaming_replication |
-| Replica server lag behind the primary by flush location | Streaming Replication: replica server lag behind the primary by flush location in MB | streaming_replication |
-| Replica server lag behind the primary by replay location | Streaming Replication: replica server lag behind the primary by replay location in MB | streaming_replication |
-| Replica server lag behind the primary by size (MB) | Streaming Replication: replica server lag behind the primary by size in MB | streaming_replication |
-| Replica server lag behind the primary by WAL segments | Streaming Replication: replica server lag behind the primary by WAL segments | streaming_replication |
-| Replica server lag behind the primary by WAL pages | Streaming Replication: replica server lag behind the primary by WAL pages | streaming_replication |
-| Total materialized view bloat in server | The total space wasted by materialized views in server, in MB | mview_bloat, settings |
-| Largest materialized view (by multiple of unbloated size) | Largest materialized view in server, calculated as a multiple of its own estimated unbloated size; exclude materialized views smaller than N MB | mview_bloat, settings |
-| Highest materialized view bloat in server | The most space wasted by a materialized view in server, in MB | mview_bloat, settings |
-| Average materialized view bloat in server | The average space wasted by materialized views in server, in MB | mview_bloat, settings |
-| Materialized view size in server | The size of materialized view in server, in MB | mview_size |
-| View Count | Total number of views in server | oc_views |
-| Materialized View Count | Total number of materialized views in server | oc_views |
-| Audit config mismatch | Check for audit config parameter mismatch | audit_configuration |
-| Server Down | Specified server is currently inaccessible | N/A |
-| Number of WAL archives pending | Streaming Replication: number of WAL files pending to be replayed at replica | wal_archive_status |
-| Number of minutes lag of replica server from primary server | Streaming Replication: number of minutes replica node is lagging behind the primary node | streaming_replication_lag_time |
-| Log config mismatch | Check for log config parameter mismatch | log_configuration |
+| Ungranted locks | Number of ungranted locks in server | blocked_session_info |
+| Percentage of buffers written by backends | The percentage of buffers written by backends vs. the total buffers written | background_writer_statistics |
+| Percentage of buffers written by checkpoint | The percentage of buffers written by the checkpoints vs. the total buffers written | background_writer_statistics |
+| Buffers written per second | Number of buffers written per second, over the last two probe cycles | background_writer_statistics |
+| Buffers allocated per second | Number of buffers allocated per second, over the last two probe cycles | background_writer_statistics |
+| Connections in idle state | Number of connections in server that are in idle state | session_info |
+| Connections in idle-in-transaction state | Number of connections in server that are in idle-in-transaction state | session_info |
+| Connections in idle-in-transaction state, as percentage of max_connections | Number of connections in server that are in idle-in-transaction state, as a percentage of maximum connections allowed on server, settings | session_info, settings |
+| Long-running idle connections | Number of connections in the server that have been idle for more than N seconds | session_info |
+| Long-running idle connections and idle transactions | Number of connections in the server that have been idle or transactions idle-in-transaction for more than N seconds | session_info |
+| Long-running idle transactions | Number of connections in the server that have been idle in transaction for more than N seconds | session_info |
+| Long-running transactions | Number of transactions in server that have been running for more than N seconds | session_info |
+| Long-running queries | Number of queries in server that have been running for more than N seconds | session_info |
+| Long-running vacuums | Number of vacuum operations in server that have been running for more than N seconds | session_info |
+| Long-running autovacuums | Number of autovacuum operations in server that have been running for more than N seconds | session_info |
+| Committed transactions percentage | Percentage of transactions in the server that committed vs. that rolled-back over last N minutes | database_statistics |
+| Shared buffers hit percentage | Percentage of block read requests in the server that were satisfied by shared buffers, over last N minutes | database_statistics |
+| Tuples inserted | Tuples inserted into server over last N minutes | database_statistics |
+| InfiniteCache buffers hit percentage | Percentage of block read requests in the server that were satisfied by InfiniteCache, over last N minutes | database_statistics |
+| Tuples fetched | Tuples fetched from server over last N minutes | database_statistics |
+| Tuples returned | Tuples returned from server over last N minutes | database_statistics |
+| Dead Tuples | Number of estimated dead tuples in server | table_statistics |
+| Tuples updated | Tuples updated in server over last N minutes | database_statistics |
+| Tuples deleted | Tuples deleted from server over last N minutes | database_statistics |
+| Tuples hot updated | Tuples hot updated in server, over last N minutes | table_statistics |
+| Sequential Scans | Number of full table scans in server, over last N minutes | table_statistics |
+| Index Scans | Number of index scans in server, over last N minutes | table_statistics |
+| Hot update percentage | Percentage of hot updates in the server over last N minutes | table_statistics |
+| Live Tuples | Number of estimated live tuples in server | table_statistics |
+| Dead tuples percentage | Percentage of estimated dead tuples in server | table_statistics |
+| Last Vacuum | Hours since last vacuum on the server | table_statistics |
+| Last AutoVacuum | Hours since last autovacuum on the server | table_statistics |
+| Last Analyze | Hours since last analyze on the server | table_statistics |
+| Last AutoAnalyze | Hours since last autoanalyze on the server | table_statistics |
+| Percentage of buffers written by backends over the last N minutes | The percentage of buffers written by backends vs. the total buffers backends over last N | background_writer_statistics |
+| Table Count | Total number of tables in server | oc_table |
+| Function Count | Total number of functions in server | oc_function |
+| Sequence Count | Total number of sequences in server | oc_sequence |
+| Number of users expiring in N days | Number of users whose accounts are expiring in N days | user_info |
+| Number of users whose password expiring in N days | Number of users whose password have expired or are expiring in N days | user_info |
+| Index size as a percentage of table size | Size of the indexes in server, as a percentage of their tables' size | index_size, oc_index, table_size |
+| Largest index by table-size percentage | Largest index in server, calculated as percentage of its table's size, oc_index, table_size | index_size, oc_index, table_size |
+| Number of ERRORS in the logfile on server M in the last X hours | The number of ERRORS in the logfile on server M in last X hours | N/A |
+| Number of WARNINGS in the logfile on server M in the last X hours | The number of WARNINGS in logfile on server M in the last X hours | N/A |
+| Number of WARNINGS or ERRORS in the logfile on server M in the last X hours | The number of WARNINGS or ERRORS in the logfile on server M in the last X hours | N/A |
+| Number of attacks detected in the last N minutes | The number of SQL injection attacks occurred in the last N minutes | sql_protect |
+| Number of attacks detected in the last N minutes by username | The number of SQL injection attacks occurred in the last N minutes by username | sql_protect |
+| Number of replica servers lag behind the primary by write location | Streaming Replication: number of replica servers lag behind the primary by write location | streaming_replication |
+| Number of replica servers lag behind the primary by flush location | Streaming Replication: number of replica servers lag behind the primary by flush location | streaming_replication |
+| Number of replica servers lag behind the primary by replay location | Streaming Replication: number of replica servers lag behind the primary by replay location | streaming_replication |
+| Replica server lag behind the primary by write location | Streaming Replication: replica server lag behind the primary by write location in MB | streaming_replication |
+| Replica server lag behind the primary by flush location | Streaming Replication: replica server lag behind the primary by flush location in MB | streaming_replication |
+| Replica server lag behind the primary by replay location | Streaming Replication: replica server lag behind the primary by replay location in MB | streaming_replication |
+| Replica server lag behind the primary by size (MB) | Streaming Replication: replica server lag behind the primary by size in MB | streaming_replication |
+| Replica server lag behind the primary by WAL segments | Streaming Replication: replica server lag behind the primary by WAL segments | streaming_replication |
+| Replica server lag behind the primary by WAL pages | Streaming Replication: replica server lag behind the primary by WAL pages | streaming_replication |
+| Total materialized view bloat in server | The total space wasted by materialized views in server, in MB | mview_bloat, settings |
+| Largest materialized view (by multiple of unbloated size) | Largest materialized view in server, calculated as a multiple of its own estimated unbloated size; exclude materialized views smaller than N MB | mview_bloat, settings |
+| Highest materialized view bloat in server | The most space wasted by a materialized view in server, in MB | mview_bloat, settings |
+| Average materialized view bloat in server | The average space wasted by materialized views in server, in MB | mview_bloat, settings |
+| Materialized view size in server | The size of materialized view in server, in MB | mview_size |
+| View Count | Total number of views in server | oc_views |
+| Materialized View Count | Total number of materialized views in server | oc_views |
+| Audit config mismatch | Check for audit config parameter mismatch | audit_configuration |
+| Server Down | Specified server is currently inaccessible | N/A |
+| Number of WAL archives pending | Streaming Replication: number of WAL files pending to be replayed at replica | wal_archive_status |
+| Number of minutes lag of replica server from primary server | Streaming Replication: number of minutes replica node is lagging behind the primary node | streaming_replication_lag_time |
+| Log config mismatch | Check for log config parameter mismatch | log_configuration |
+| PGD Group Raft Consensus | PGD group raft consensus not working | bdr_monitor_group_raft |
+| PGD Group Raft Leader ID not matching | PGD group raft leader id not matching | bdr_group_raft_details |
+| PGD Group versions check | PGD/pglogical version mismatched in PGD group | bdr_monitor_group_raft |
+| Transaction ID exhaustion (wraparound) | Check for Transaction ID exhaustion (wraparound) | |
### Templates applicable on database
@@ -667,6 +672,8 @@ You can select **Clone** from the top-right corner of the dialog box to clone th
Select **Delete** from the top-right corner of the dialog box to remove a scheduled alert blackout. Select the servers or agents and then select **Delete**.
-Select a server for which you want to delete the scheduled alert backout, and then select **Delete**. The server prompts for confirmation before deleting that row.
+Select a server for which you want to delete the scheduled alert blackout, and then select **Delete**. The server prompts for confirmation before deleting that row.
You can select **Reset** to reset the details on the Alert Blackout dialog box to the default settings. Saved blackouts aren't affected.
+
+You can view the scheduled alert blackout details from the `event_history` table in the `pem` schema once the schedule is executed. For more information, see [Monitoring event history](../monitoring_event_history.mdx).
\ No newline at end of file
diff --git a/product_docs/docs/pem/9/pem_rel_notes/950_rel_notes.mdx b/product_docs/docs/pem/9/pem_rel_notes/950_rel_notes.mdx
new file mode 100644
index 00000000000..292a995314a
--- /dev/null
+++ b/product_docs/docs/pem/9/pem_rel_notes/950_rel_notes.mdx
@@ -0,0 +1,29 @@
+---
+title: "Postgres Enterprise Manager 9.5.0 release notes"
+navTitle: Version 9.5.0
+---
+
+Released: 15 Feb 2024
+
+New features, enhancements, bug fixes, and other changes in PEM 9.5.0 include:
+
+| Type | Description |
+|--------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Enhancement | Added `EmailGroup` and `EmailGroupID` in webhook payload. |
+| Enhancement | Added an alert template to validate password expiry for users in EDB Postgres Advanced Server. Also modified the alert template `A user expired in N days` to `Number of users expiring in N days` that checks the number of users expiring.[^*] |
+| Enhancement | Added a button on the Manage Probes window that resets the probe configurations to the defaults. |
+| Enhancement | Added a **Cleared Alert** button in notifications. This gives you the flexibility to disable notifications for cleared alerts while still receiving notifications when alerts occur. |
+| Enhancement | You can now create your own web server certificates, as EDB details are no longer added to the web server certificates generated by PEM. |
+| Enhancement | Added a capability to identify the user who changed the blackout status, date, and time. |
+| Bug fix | Fixed an issue where a deleted custom probe was still visible in the `Probe Dependency` tab of Alert Templates. |
+| Bug fix | Fixed a `missing permission` issue by granting the `pem_agent` user permission to the `custom_email_template` table. |
+| Bug fix | Excluded zero-sized tables from PEM alert template `largest index by table-size percentage`. Also added a detailed SQL statement for this template. |
+| Bug fix | Fixed an issue whereby the CLI attempted to call an obsolete version of the PEM API, resulting in commands failing. |
+| Bug fix | Now, the members of the `pem_user` and `pem_manage_efm` roles can access the EFM features. The member of the `pem_user` and `pem_server_service_manager`role can start/stop the database service from PEM GUI. |
+| Bug fix | Added `application_name` column in the `session_info` probe. |
+| Bug fix | Fixed the `too few arguments` exception for the webhooks. |
+| Bug fix | Resolved the issue where the User column displayed the current username logged into the PEM user interface in the chart display of the `pg_hba.conf` file. Now it displays the respective usernames in the `pg_hba.conf` file. |
+| Bug fix | Fixed a `Dashboard info not found` error while opening the dashboards in the user interface. |
+
+
+[^*]: Alerts configured using the outgoing alert template `A user expired in N days` must be recreated using the new template.
\ No newline at end of file
diff --git a/product_docs/docs/pem/9/pem_rel_notes/index.mdx b/product_docs/docs/pem/9/pem_rel_notes/index.mdx
index 4c2cebdced6..713939eaefa 100644
--- a/product_docs/docs/pem/9/pem_rel_notes/index.mdx
+++ b/product_docs/docs/pem/9/pem_rel_notes/index.mdx
@@ -1,6 +1,7 @@
---
title: "Release notes"
navigation:
+ - 950_rel_notes
- 941_rel_notes
- 940_rel_notes
- 931_rel_notes
@@ -17,6 +18,7 @@ The Postgres Enterprise Manager (PEM) documentation describes the latest version
| Version | Release Date | Upstream Merges | Accessibility Conformance |
| ------------------------- | ------------ | --------------------------------------------------------------------------| --------------------------------------------------------------------------------------------------- |
+| [9.5.0](950_rel_notes) | 15 Feb 2024 | NA | [Conformance Report](https://www.enterprisedb.com/accessibility) |
| [9.4.1](941_rel_notes) | 28 Nov 2023 | NA | [Conformance Report](https://www.enterprisedb.com/accessibility) |
| [9.4.0](940_rel_notes) | 16 Nov 2023 | NA | [Conformance Report](https://www.enterprisedb.com/accessibility) |
| [9.3.1](931_rel_notes) | 12 Oct 2023 | NA | [Conformance Report](https://www.enterprisedb.com/accessibility) |
diff --git a/product_docs/docs/pem/9/pem_rest_api.mdx b/product_docs/docs/pem/9/pem_rest_api.mdx
new file mode 100644
index 00000000000..59f4210c3bd
--- /dev/null
+++ b/product_docs/docs/pem/9/pem_rest_api.mdx
@@ -0,0 +1,5 @@
+---
+title: "PEM Rest API"
+---
+
+EDB Postgres Enterprise Manager supports the REST API feature. The REST API feature offers effective data read and write capabilities and access to various functionalities without logging in to the PEM UI. For more information, see [REST API usage](https://www.enterprisedb.com/blog/rest-api-usage-in-pem).
\ No newline at end of file
diff --git a/product_docs/docs/pwr/1/configuring.mdx b/product_docs/docs/pwr/1/configuring.mdx
new file mode 100644
index 00000000000..7f92d35f12f
--- /dev/null
+++ b/product_docs/docs/pwr/1/configuring.mdx
@@ -0,0 +1,56 @@
+---
+title: "Configuring Postgres Workload Report"
+navTitle: "Configuring"
+---
+
+## `pwr` configuration file
+
+To reduce the number of command-line arguments needed when executing `pwr`, you can use a configuration file to specify options that always have the same value but that value differs from the default.
+
+During execution, `pwr` looks for an existing configuration file in `~/.pwr.conf` and `/etc/pwr.conf`, in that order using the first one found.
+However, if the `--config` option specifies a configuration file, that overides the default locations.
+
+The installation package creates a template for the configuration file in `/etc/pwr.conf.templ`. We recommend copying this file to one of the
+two places where `pwr` looks for a configuration file and editing the options in the template as necessary.
+
+The following sections describe the options you can configure.
+
+### `input_dir`
+
+Identifies an existing directory where the `edb_wait_states` contents of a Lasso report are located. This option is used mainly for `pwr report` execution (see [Using](using)).
+
+### `output_dir`
+
+Specifies where reports are outputted. During execution, Postgres Workload Report tries to create the directory if it doesn't exist.
+
+### `report_name`
+
+Provides the name of the report files generated. Usually, this option is specified from the command line because different reports typically have different names.
+
+### `log_file`
+
+Provides the full path to the file where `pwr` writes the `stdout` and `stderr` logs.
+
+### `log_level`
+
+Specifies the logging level to use when running Postgres Workload Report. Valid values are, from more verbose to less verbose:
+
+ `DEBUG`
+ `INFO` (default if not specified)
+ `WARNING`
+ `ERROR`
+ `CRITICAL`
+
+See [the Python logging](https://docs.python.org/3/library/logging.html#logging-levels) documentation for more information about log levels.
+
+### `log_format`
+
+Provides the format of the log messages that are written to the log file. See [the Python logging](https://docs.python.org/3/library/logging.html#logrecord-attributes) documentation for more information on log formatting.
+
+### `assets_dir`
+
+Identifies the directory where you can find the jinja templates used to format the HTML output and the CSS used for PDF output. Typically, the directory you may want to use by default is `/usr/share/pwr/assets`, which contains the assets provided with the `edb-pwr` package.
+
+
+
+
diff --git a/product_docs/docs/pwr/1/index.mdx b/product_docs/docs/pwr/1/index.mdx
new file mode 100644
index 00000000000..0c4fd87fdd5
--- /dev/null
+++ b/product_docs/docs/pwr/1/index.mdx
@@ -0,0 +1,20 @@
+---
+title: "Postgres Workload Report"
+navigation:
+ - installing
+ - configuring
+ - using
+---
+
+Postgres Workload Report (PWR) is a Python-based tool used for building PostgreSQL workload reports in HTML, Markdown, DOCX, and PDF mode, mimicking the reports provided by the Automatic Workload Repository (AWR) reporting tool from Oracle.
+
+Using a Postgres connection, you can execute Postgres Workload Report from any machine with access to the server for which you want a report. Postgres Workload Report uses Lasso for collecting data, so Lasso is a prerequisite. The `edb-pwr` package has an explicit dependency on `edb-lasso` being installed.
+
+On the Postgres server, you should have `edb_wait_states` version 1.2 or later installed and loaded. The extension also must be created,
+preferably on the main database, so the `edb_wait_states` functions are available for Postgres Workload Report to collect saved snapshot data.
+
+For more information about PWR, see:
+
+ - [Installing](installing)
+ - [Configuring](configuring)
+ - [Using](using)
diff --git a/product_docs/docs/pwr/1/installing.mdx b/product_docs/docs/pwr/1/installing.mdx
new file mode 100644
index 00000000000..29800d23a9a
--- /dev/null
+++ b/product_docs/docs/pwr/1/installing.mdx
@@ -0,0 +1,25 @@
+---
+title: "Installing Postgres Workload Report"
+navTitle: "Installing"
+---
+
+Postgres Workload Report is provided as a Python source distributed package for all supported operating systems. It can be found in the `enterprise` and `standard` repositories.
+
+The following command installs Postgres Workload Report and, if necessary, also pulls the `edb-lasso` package from the repository and installs it too.
+
+To install Postgres Workload Report, enter the following command:
+
+```shell
+sudo -y install edb-pwr
+```
+
+Where:
+
+`` is the package manager used with your operating system.
+
+| Package manager | Operating system |
+| --------------- | -------------------------------- |
+| dnf | RHEL 8/9 and derivatives |
+| yum | RHEL 7 and derivatives, CentOS 7 |
+| zypper | SLES |
+
diff --git a/product_docs/docs/pwr/1/rel_notes/100_rel_notes.mdx b/product_docs/docs/pwr/1/rel_notes/100_rel_notes.mdx
new file mode 100644
index 00000000000..7cc1140af1e
--- /dev/null
+++ b/product_docs/docs/pwr/1/rel_notes/100_rel_notes.mdx
@@ -0,0 +1,12 @@
+---
+title: "Postgres Workload Report 1.0.0 release notes"
+navTitle: Version 1.0.0
+---
+
+Released: 15 Feb 2024
+
+New features, enhancements, bug fixes, and other changes in Postgres Workload Report 1.0.0 include:
+
+| Type | Description |
+| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Feature | This is the initial release. |
\ No newline at end of file
diff --git a/product_docs/docs/pwr/1/rel_notes/index.mdx b/product_docs/docs/pwr/1/rel_notes/index.mdx
new file mode 100644
index 00000000000..3f19ff545d1
--- /dev/null
+++ b/product_docs/docs/pwr/1/rel_notes/index.mdx
@@ -0,0 +1,11 @@
+---
+title: "Release notes"
+navigation:
+ - 100_rel_notes
+---
+
+The Postgres Workoad Report (PWR) documentation describes the latest version of PWR 1 including minor releases and patches. The release notes in this section provide information on what was new in each release. For new functionality introduced in a minor or patch release, there are also indicators within the content about what release introduced the feature.
+
+| Version | Release Date |
+| ------------------------- | ------------ |
+| [1.0.0](100_rel_notes) | 15 Feb 2024 |
diff --git a/product_docs/docs/pwr/1/using.mdx b/product_docs/docs/pwr/1/using.mdx
new file mode 100644
index 00000000000..852bea6ec5c
--- /dev/null
+++ b/product_docs/docs/pwr/1/using.mdx
@@ -0,0 +1,61 @@
+---
+title: "Using Postgres Workload Report"
+navTitle: "Using"
+---
+
+
+## Prerequisites
+
+Postgres Workload Report can only provide reports for Postgres servers where the `edb_wait_states` extension, version 1.2 or later, is loaded. Furthermore, PWR can only provide query wait reports for intervals of time when the `edb_wait_states` extension was loaded on the server.
+
+For more information, see [`edb_wait_states`](https://www.enterprisedb.com/docs/pg_extensions/wait_states/).
+
+## Source information for reports
+
+After the `edb-pwr` and `edb-lasso` packages are installed on the machine, and the server has been running with the `edb_wait_states` extension loaded for a long enough period of time, you can use Postgres Workload Report to extract reports of wait states for the queries that were running during the interval of time specified.
+
+Alternatively, Postgres Workload Report can generate reports from an existing Lasso report, assuming the report was executed on a server with `edb_wait_states` loaded. For this reason Postgres Workload Report has a mandatory first argument, which can be either of the following
+two:
+
+- `execute`
+- `report`
+
+The `execute` argument performs end-to-end execution, calling `lasso` with appropriate options and using the generated tarball report as the input
+to generate an HTML, Markdown, DOCX, or PDF report.
+
+The `report` argument uses the directory for stored `lasso` report contents as input for processing and generating the wait states report. To use the `report` argument, a decompressed and untarred `lasso` report must already exist.
+
+## Example for the `execute` option
+
+For this example we want to generate a report on query waits for the Postgres server `myserver` for the interval between January 10th at 9:00 and January 10th at 13:00. An incident happened around that time which must be investigated to provide a root cause. The main database on the server where `edb_wait_states` is installed is `my-oltp`.
+
+To get such a report in HTML format we would use the following command:
+
+```shell
+pwr execute --host-name myserver --sampling-start '2024-01-10 09:00+00' \
+ --sampling-end '2024-01-10 13:00+00' --html \
+ --report-name 'Jan10_incident' my-oltp postgres
+```
+
+We can run `pwr execute -h` to get the full output of options available.
+
+## Example for the `report` option
+
+In some cases you may already have a Lasso report and would like PWR to use the Lasso report as the source and build a report based on it. For these cases, the `pwr report` option is useful.
+
+For this example we can use the same scenario already described but we use a Lasso report that has already been executed using the time boundaries shared above. Let's suppose that the Lasso report's name is `edb-lasso-Jan10-incident.tar.bz2` and it is located in the home directory of the machine where `pwr report` will be executed.
+
+The following command executions generate an HTML report saved in `~/pwr_output/Jan10_incident.html`:
+
+```shell
+cd ~/
+mkdir -p pwr_tmp/
+tar jxf ../edb-lasso-Jan10-incident.tar.bz2 -C ~/pwr_tmp/ --strip-components=1
+pwr report \
+ --input-dir ~/pwr_tmp/postgresql/dbs/my-oltp/edb_wait_states/ \
+ --html --output-dir ~/pwr_output/ --report-name 'Jan10_incident'
+```
+
+We can run `pwr report -h` to get the full output for the options available.
+
+
diff --git a/src/constants/products.js b/src/constants/products.js
index 64bddb23ae7..81cf5cb8eb7 100644
--- a/src/constants/products.js
+++ b/src/constants/products.js
@@ -74,10 +74,23 @@ export const products = {
name: "EDB Postgres for Kubernetes",
iconName: IconNames.KUBERNETES,
},
+ pwr: {
+ name: "Postgres Workload Report",
+ iconName: IconNames.TOOLS,
+ },
+ // note: the key here doesn't have to be anything specific,
+ // as long as it matches the value used for the `product:` key in the relevant frontmatter
+ // I recommend using the actual product name (same as what's used in the next line), just to
+ // make it obvious that this ISN'T a directory name or something defined in gatsby_config.js
+ // But we could also call it "Bob", as long as e.g. pg_extensions/pg_squeeze/index.mdx contains product: Bob
"EDB Query Advisor": {
name: "EDB Query Advisor",
iconName: IconNames.POSTGRESQL,
},
+ "PG Squeeze": {
+ name: "PG Squeeze",
+ iconName: IconNames.POSTGRESQL,
+ },
CloudNativePG: { name: "CloudNativePG" },
repmgr: { name: "repmgr", iconName: IconNames.HIGH_AVAILABILITY },
slony: { name: "Slony Replication", iconName: IconNames.NETWORK2 },
diff --git a/src/pages/index.js b/src/pages/index.js
index 2b8bbd72b76..d6064fea63e 100644
--- a/src/pages/index.js
+++ b/src/pages/index.js
@@ -194,6 +194,10 @@ const Page = () => {
EDB Wait States
+
+ PG Squeeze
+
+
EDB Job Scheduler
@@ -319,6 +323,9 @@ const Page = () => {
LiveCompare
+
+ Postgres Workload Report
+
diff --git a/tools/user/import/extensions/exsp.js b/tools/user/import/extensions/exsp.js
index 45f699ab439..c1841469e6f 100644
--- a/tools/user/import/extensions/exsp.js
+++ b/tools/user/import/extensions/exsp.js
@@ -262,7 +262,7 @@ function composeRow(row, lastRow, currentState) {
true,
),
);
- } else if (row[i] == "n/a") {
+ } else if (row[i] == "n/a" || row[i] == "") {
/* Hide n/a from spreadsheet as - (n/a is internal status only) */
output.push(
composeCell(
@@ -276,7 +276,7 @@ function composeRow(row, lastRow, currentState) {
),
);
} else {
- console.log(`Unhandled value ${row[i]}`);
+ console.log(`Unhandled value "${row[i]}"`);
}
}
output.push("\n");