diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/creating_cluster/creating_a_cluster.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/creating_cluster/creating_a_cluster.mdx index 23927134752..f4fd50a94fb 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/creating_cluster/creating_a_cluster.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/creating_cluster/creating_a_cluster.mdx @@ -7,8 +7,6 @@ redirects: - /purl/upm/cluster-settings-tab/ - /purl/upm/read-only-connections/ - /purl/upm/csp-auth/ - - /purl/upm/csp-azure-ad-usermanagement/ - - /purl/upm/csp-aws-ad-usermanagement/ - /purl/upm/create-a-cluster/ - /purl/upm/private-endpoints-info/ - /purl/upm/read-only-workloads/ @@ -173,7 +171,7 @@ The following options aren't available when creating your cluster: When provisioning database storage, not all of the storage space you specify is available for holding your data. Some space is reserved for other purposes. For a full explanation of the structure of a Postgres data directory, see [Database File Layout](https://www.postgresql.org/docs/current/storage-file-layout.html). You can make more storage space available for data if you specify separate storage for write ahead logs (WAL). -8. In the **Network, Logs, & Telemetry** section: +8. In the **Network, Logs, & Telemetry** section: In **Connectivity Type**, specify whether to use private or public networking. Networking is set to **Public** by default. Public means that any client can connect to your cluster’s public IP address over the internet. Optionally, you can limit traffic to your public cluster by specifying an IP allowlist, which allows access only to certain blocks of IP addresses. To limit access, select **Use allowlists** and add one or more classless inter-domain routing (CIDR) blocks. CIDR is a method for allocating IP addresses and IP routing to a whole network or subnet. If you have any CIDR block entries, access is limited to those IP addresses. If none are specified, all network traffic is allowed. @@ -224,8 +222,12 @@ For more information, see [Periodic maintenance](/edb-postgres-ai/cloud-service/ ### Connections + + #### Read-only workloads + + !!! Note The **Read-only Workloads** option is not available on single node clusters. @@ -259,6 +261,8 @@ Use the **PgBouncer Configuration Settings** menu to set PgBouncer-specific sett #### Identity and Access Management (IAM) Authentication + + Enable **Identity and Access Management (IAM) Authentication** to turn on the ability to log in to Postgres using your AWS IAM credentials. For this feature to take effect, after you create the cluster, you must add each user to a role that uses AWS IAM authentication in Postgres. For details, see [IAM authentication for Postgres](/edb-postgres-ai/cloud-service/using_cluster/postgres_access/database_authentication/#iam-authentication-for-postgres). #### Superuser Access @@ -267,6 +271,8 @@ Enable **Superuser Access** to grant superuser privileges to the edb_admin role. ### Security + + Enable **Transparent Data Encryption (TDE)** to use your own encryption key. This option is available for EDB Postgres Advanced Server and EDB Postgres Extended Server for version 15 and later. Select an encryption key from your project and region to encrypt the cluster with TDE. To learn more about TDE support, see [Transparent Data Encryption](/edb-postgres-ai/cloud-service/security/security/#your-own-encryption-key---transparent-data-encryption-tde). !!!Note "Important" diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/connecting_to_your_cloud/connecting_azure.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/connecting_to_your_cloud/connecting_azure.mdx index 6dfa692e370..5366d01c476 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/connecting_to_your_cloud/connecting_azure.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/connecting_to_your_cloud/connecting_azure.mdx @@ -2,6 +2,7 @@ title: Connecting your Azure cloud navTitle: Azure redirects: + - /purl/upm/azure-subscription/ - /biganimal/latest/getting_started/02_connecting_to_your_cloud/connecting_azure/ #generated for BigAnimal URL path removal branch - /biganimal/latest/getting_started/02_azure_market_setup/ --- diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/connecting_to_your_cloud/index.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/connecting_to_your_cloud/index.mdx index c252d8bed0b..f0958c08f43 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/connecting_to_your_cloud/index.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/connecting_to_your_cloud/index.mdx @@ -2,7 +2,6 @@ title: Connecting your cloud description: How to connect your own cloud account to the Cloud Service redirects: - - /purl/upm/azure-subscription/ - /purl/upm/connect-your-cloud-overview/ - /purl/upm/connect-your-cloud/ - /biganimal/latest/getting_started/02_connect_cloud_account/ diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/managing_regions.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/managing_regions.mdx index e937591a253..569be52702d 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/managing_regions.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/managing_regions.mdx @@ -36,6 +36,9 @@ You can activate a region ahead of time using the Regions page. ## Suspend, reactivate, or delete a region + + + Before you suspend or delete a region, you must delete all clusters in that region. 1. On the left panel, select **Regions**. diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/preparing_cloud_account/index.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/preparing_cloud_account/index.mdx index 63e27c31d4f..80b01550dc3 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/preparing_cloud_account/index.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/preparing_cloud_account/index.mdx @@ -4,7 +4,6 @@ indexCards: simple description: When using Your Cloud Account, how to ensure its readiness to work with EDB Postgres AI. redirects: - /purl/upm/cloud-readiness/ - - /purl/upm/azure-raise-resource-limits/ - /biganimal/latest/getting_started/01_check_resource_limits/ - /biganimal/latest/getting_started/preparing_cloud_account/ #generated for BigAnimal URL path removal branch navigation: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/preparing_cloud_account/preparing_azure/index.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/preparing_cloud_account/preparing_azure/index.mdx index 6578a8dc1ad..874b307e4de 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/preparing_cloud_account/preparing_azure/index.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/getting_started/your_cloud_account/preparing_cloud_account/preparing_azure/index.mdx @@ -2,6 +2,7 @@ title: "Preparing your Azure account" description: Prepare your Azure account to manage databases on EDB Postgres AI Cloud Service. redirects: + - /purl/upm/azure-raise-resource-limits/ - /biganimal/latest/getting_started/01_preparing_azure/ - /biganimal/latest/getting_started/preparing_cloud_account/01_preparing_azure/ #generated for BigAnimal URL path removal branch --- diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/index.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/index.mdx index 8d43059de39..a02602d3cba 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/index.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/index.mdx @@ -2,6 +2,8 @@ title: EDB Postgres AI Cloud Service navTitle: Cloud Service description: An introduction to the EDB Postgres AI Cloud Service and its features. +directoryDefaults: + displayBanner: "The EDB Hosted Cloud Service has been deprecated. Support is available for current customers. However, the related documentation topics will be removed shortly. Further updates will be provided as the removal progresses." navigation: - getting_started - using_cluster @@ -16,15 +18,17 @@ redirects: - /purl/upm/home/ - /biganimal/latest/ - /biganimal/latest/overview/ + --- The EDB Postgres® AI Cloud Service is a holistic platform which includes hybrid data estate management, observability, analytics, and AI capabilities. + ## Overview -The EDB Postgres AI Cloud Service itself is a fully managed cloud service that provides a high-performance, scalable, and secure database platform for analytics, AI, and machine learning workloads. It also provides the platform for [EDB Postgres AI Analytics](../analytics/) and [EDB Postgres AI Machine Learning](../ai-ml/) services. +The EDB Postgres AI Cloud Service itself is a fully managed cloud service that provides a high-performance, scalable, and secure database platform for analytics and AI workloads. It also provides the platform for [EDB Postgres AI Analytics](../analytics/) and [EDB Postgres AIDB](../ai-ml/) services. -Cloud Service builds on the [EDB Postgres Advanced Server](/epas/latest) and [EDB Postgres Extended](/pge/latest) databases and it's designed to help organizations accelerate the development and deployment of AI and machine learning applications. +Cloud Service builds on the [EDB Postgres Advanced Server](/epas/latest) and [EDB Postgres Extended](/pge/latest) databases and it's designed to help organizations accelerate the development and deployment of AI and analytics applications. Databases in the EDB Postgres AI Cloud Service can run on EDB's own cloud accounts or managed by EDB on your own cloud on your behalf. diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/backup_and_restore.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/backup_and_restore.mdx index 869c160a29b..b09154bddb8 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/backup_and_restore.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/backup_and_restore.mdx @@ -43,6 +43,8 @@ To determine the replication lag, you can compare the last log sequence number ( ## Restores + + If a restore is necessary—for example, in case of an accidental `DROP TABLE` statement—you can restore clusters to any point in the backup retention period. Cluster restores aren't performed in place on an existing cluster. Instead, a new cluster is created and initialized with data from the backup archive. Restores must replay the transaction logs between the most recent full database backup and the target restore point. Thus restore times (that is, RTO) depend on the write activity in the source cluster. diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/modifying_your_cluster/05_db_configuration_parameters.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/modifying_your_cluster/05_db_configuration_parameters.mdx index e21188d29ad..4f4bc0ba9d7 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/modifying_your_cluster/05_db_configuration_parameters.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/modifying_your_cluster/05_db_configuration_parameters.mdx @@ -23,6 +23,8 @@ Not all database configuration parameters are supported by Cloud Service. Some p ## Using formulas for parameter values + + In addition to entering specific values for parameters, for some parameters you can specify formulas to calculate a value. You can use formulas for parameters of type integer and real in ternary formulas, such as the [shared buffer example](#examples), using the following operators: `+ - / * > >= < <= == != && || ! ? : ( )`. Use `?` and `:` . Use `( )` to specify [order of operations](#order-of-operations), if needed. GUCs used in formulas must also be of type integer or real. All arithmetic is done on 64-bit floating point values rounded to an integer result if the target GUC is of type integer and not real. BigAnimal has what we refer to as *psuedo GUCs* to help with creating equations. These read-only GUCs are: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/periodic_maintenance.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/periodic_maintenance.mdx index 847312e0855..c48c0c62da7 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/periodic_maintenance.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/managing_your_cluster/periodic_maintenance.mdx @@ -18,6 +18,8 @@ In some cases, these updates might terminate existing network connections to you ## Specifying maintenance windows + + If you want to control when the updates are pushed, you can specify a weekly maintenance window for each cluster or each data group in the case of a distributed high-availability cluster. BigAnimal displays a *scheduled maintenance* message on your cluster list four hours prior to the scheduled maintenance time to remind you of the upcoming maintenance window. This reminder allows you to make any necessary preparations, such as saving your work and closing any open connections. For more information on specifying maintenance windows, see [Maintenance](/edb-postgres-ai/cloud-service/getting_started/creating_cluster/creating_a_cluster/#maintenance). ## Maintenance for high-availability clusters diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/references/supported_cluster_types/distributed_highavailability.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/references/supported_cluster_types/distributed_highavailability.mdx index ca11619eb9f..2eb8fdf3849 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/references/supported_cluster_types/distributed_highavailability.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/references/supported_cluster_types/distributed_highavailability.mdx @@ -63,6 +63,8 @@ Cross-cloud service provider witness nodes are available with AWS, Azure, and Go ## Read-only workloads + + When you enable the read-only workloads option during the cluster creation, a read-only connection string is created for the data group. You can use this connection to allow your application or service to route read-only requests through the shadow nodes (non-write leaders) to lighten the load on the write leaders and improve the distributed high-availability cluster's performance. If you have more than one data group, you can choose whether to enable the read-only workloads option on a per-data-group basis. diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/references/supported_database_versions.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/references/supported_database_versions.mdx index 024d731cc0d..dfd434e61ea 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/references/supported_database_versions.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/references/supported_database_versions.mdx @@ -20,6 +20,8 @@ We support the major Postgres versions from the date they're made available unti ## End-of-life policy + + Cloud Service deprecates support for Postgres versions following the same timeline as PostgreSQL. PostgreSQL, EDB Postgres Advanced Server, and EDB Postgres Extended Server follow the same timelines. We recommend that you take action and upgrade your Postgres databases running on the deprecated version to a later version as soon as possible. Six months before the PostgreSQL deprecation date, Cloud Service doesn't allow you to create new instances with the deprecated database version. @@ -30,6 +32,8 @@ The only exception is customers who purchased Extended Life Support (ELS) prior ## Key dates + + While PostgreSQL officially deprecated version 11 on November 9, 2023, Cloud Service deprecated PostgreSQL 11 on November 20, 2023 in alignment with the broader EDB portfolio. On November 20, 2023, Cloud Service deprecated support for PostgreSQL 11 and EDB Postgres Advanced Server 11 using the following schedule. We recommend that you take action and upgrade your Postgres databases running on major version 11 to a later version, such as PostgreSQL version 15. diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_01_jan_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_01_jan_rel_notes.mdx similarity index 92% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_01_jan_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_01_jan_rel_notes.mdx index 2798469d410..cb10f457a66 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_01_jan_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_01_jan_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service January release notes navTitle: "January 2023" redirects: - /biganimal/latest/release_notes/2023_01_jan_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_01_jan_rel_notes/ --- Cloud Service's January 2023 release includes the following enhancements and bug fixes: @@ -11,4 +12,4 @@ Cloud Service's January 2023 release includes the following enhancements and bug | ----------- | ------------------------------------------------------------------------------------------------------------------------------- | | Enhancement | Added support for an additional AWS region: AWS Asia Pacific Southeast 2 (Sydney). | | Enhancement | Cloud Service CLI v1.12.0 & v1.13.0 now allows users to provision faraway replicas and get monitoring info for their clusters. | - \ No newline at end of file + diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_02_feb_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_02_feb_rel_notes.mdx similarity index 89% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_02_feb_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_02_feb_rel_notes.mdx index f783d3c6859..6c5ef0201e4 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_02_feb_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_02_feb_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service February release notes navTitle: "February 2023" redirects: - /biganimal/latest/release_notes/2023_02_feb_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_02_feb_rel_notes/ --- Cloud Service's February 2023 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_03_mar_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_03_mar_rel_notes.mdx similarity index 98% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_03_mar_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_03_mar_rel_notes.mdx index 5f65cc6ef71..56a1c5165a5 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_03_mar_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_03_mar_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service March release notes navTitle: "March 2023" redirects: - /biganimal/latest/release_notes/2023_03_mar_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_03_mar_rel_notes/ --- Cloud Service's March 2023 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_04_apr_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_04_apr_rel_notes.mdx similarity index 89% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_04_apr_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_04_apr_rel_notes.mdx index 60d3f359434..3213122566b 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_04_apr_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_04_apr_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service April release notes navTitle: "April 2023" redirects: - /biganimal/latest/release_notes/2023_04_apr_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_04_apr_rel_notes/ --- Cloud Service's April 2023 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_05_may_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_05_may_rel_notes.mdx similarity index 94% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_05_may_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_05_may_rel_notes.mdx index 662084c1382..a46e7d8b45e 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_05_may_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_05_may_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service May release notes navTitle: "May 2023" redirects: - /biganimal/latest/release_notes/2023_05_may_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_05_may_rel_notes/ --- Cloud Service's May 2023 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_06_jun_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_06_jun_rel_notes.mdx similarity index 95% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_06_jun_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_06_jun_rel_notes.mdx index 535a692ab85..7d472348e17 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_06_jun_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_06_jun_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service June release notes navTitle: "June 2023" redirects: - /biganimal/latest/release_notes/2023_06_jun_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_06_jun_rel_notes/ --- Cloud Service's June 2023 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_07_jul_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_07_jul_rel_notes.mdx similarity index 95% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_07_jul_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_07_jul_rel_notes.mdx index 13642c91a81..4479b5fe957 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_07_jul_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_07_jul_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service July release notes navTitle: "July 2023" redirects: - /biganimal/latest/release_notes/2023_07_jul_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_07_jul_rel_notes/ --- Cloud Service's July 2023 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_08_aug_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_08_aug_rel_notes.mdx similarity index 90% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_08_aug_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_08_aug_rel_notes.mdx index ad2a0827f82..bbbb76a5f83 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_08_aug_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_08_aug_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service August release notes navTitle: "August 2023" redirects: - /biganimal/latest/release_notes/2023_08_aug_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_08_aug_rel_notes/ --- Cloud Service's August 2023 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_09_sep_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_09_sep_rel_notes.mdx similarity index 96% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_09_sep_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_09_sep_rel_notes.mdx index e51b1c277ce..f7d64692700 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_09_sep_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_09_sep_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service September release notes navTitle: "September 2023" redirects: - /biganimal/latest/release_notes/2023_09_sep_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_09_sep_rel_notes/ --- Cloud Service's September 2023 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_10_oct_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_10_oct_rel_notes.mdx similarity index 92% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_10_oct_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_10_oct_rel_notes.mdx index 5186cee8b53..178cb899de1 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_10_oct_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_10_oct_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service October release notes navTitle: "October 2023" redirects: - /biganimal/latest/release_notes/2023_10_oct_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_10_oct_rel_notes/ --- Cloud Service's October 2023 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_11_nov_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_11_nov_rel_notes.mdx similarity index 94% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_11_nov_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_11_nov_rel_notes.mdx index 5e1dfc8efe9..cd8ec2699c4 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_11_nov_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_11_nov_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service November release notes navTitle: "November 2023" redirects: - /biganimal/latest/release_notes/2023_11_nov_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_11_nov_rel_notes/ --- Cloud Service's November 2023 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_12_dec_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_12_dec_rel_notes.mdx similarity index 91% rename from advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_12_dec_rel_notes.mdx rename to advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_12_dec_rel_notes.mdx index 3a5c378a49e..a63a87b9a1b 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023_12_dec_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/2023_12_dec_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service December release notes navTitle: "December 2023" redirects: - /biganimal/latest/release_notes/2023_12_dec_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2023_12_dec_rel_notes/ --- Cloud Service's December 2023 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/index.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/index.mdx new file mode 100644 index 00000000000..cba14e3e277 --- /dev/null +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2023/index.mdx @@ -0,0 +1,34 @@ +--- +title: Cloud Service release notes 2023 +navTitle: "2023" +description: 2023 release notes for Cloud Service +navigation: + - 2023_12_dec_rel_notes + - 2023_11_nov_rel_notes + - 2023_10_oct_rel_notes + - 2023_09_sep_rel_notes + - 2023_08_aug_rel_notes + - 2023_07_jul_rel_notes + - 2023_06_jun_rel_notes + - 2023_05_may_rel_notes + - 2023_04_apr_rel_notes + - 2023_03_mar_rel_notes + - 2023_02_feb_rel_notes + - 2023_01_jan_rel_notes +indexCards: none +--- + +| 2023 | +|--------------------------------------| +| [December 2023](2023_12_dec_rel_notes) | +| [November 2023](2023_11_nov_rel_notes) | +| [October 2023](2023_10_oct_rel_notes) | +| [September 2023](2023_09_sep_rel_notes)| +| [August 2023](2023_08_aug_rel_notes) | +| [July 2023](2023_07_jul_rel_notes) | +| [June 2023](2023_06_jun_rel_notes) | +| [May 2023](2023_05_may_rel_notes) | +| [April 2023](2023_04_apr_rel_notes) | +| [March 2023](2023_03_mar_rel_notes) | +| [February 2023](2023_02_feb_rel_notes) | +| [January 2023](2023_01_jan_rel_notes) | \ No newline at end of file diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_01_jan_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_01_jan_rel_notes.mdx index dac6eaabc2b..c5a78504853 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_01_jan_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_01_jan_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service January 2024 release notes navTitle: January 2024 redirects: - /biganimal/latest/release_notes/2024_01_jan_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2024_01_jan_rel_notes/ --- Cloud Service's January 2024 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_02_feb_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_02_feb_rel_notes.mdx index b8b87f7c223..05618b4db91 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_02_feb_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_02_feb_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service February 2024 release notes navTitle: February 2024 redirects: - /biganimal/latest/release_notes/2024_02_feb_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2024_02_feb_rel_notes/ --- Cloud Service's February 2024 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_03_mar_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_03_mar_rel_notes.mdx index 8dbf95c1d6b..e42cca9bb2a 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_03_mar_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_03_mar_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service March 2024 release notes navTitle: March 2024 redirects: - /biganimal/latest/release_notes/2024_03_mar_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2024_03_mar_rel_notes/ --- Cloud Service's March 2024 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_04_apr_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_04_apr_rel_notes.mdx index c891385f8f1..a6d89d13c3e 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_04_apr_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_04_apr_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service April 2024 release notes navTitle: April 2024 redirects: - /biganimal/latest/release_notes/2024_04_apr_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2024_04_apr_rel_notes/ --- Cloud Service's April 2024 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_05_may_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_05_may_rel_notes.mdx index 65217348d88..9039749faf5 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_05_may_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_05_may_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service May 2024 release notes navTitle: May 2024 redirects: - /biganimal/latest/release_notes/2024_05_may_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2024_05_may_rel_notes/ --- Cloud Service's May 2024 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_06_jun_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_06_jun_rel_notes.mdx index 9e7e8e38766..48ddcb2ad31 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_06_jun_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_06_jun_rel_notes.mdx @@ -3,6 +3,7 @@ title: Cloud Service June 2024 release notes navTitle: June 2024 redirects: - /biganimal/latest/release_notes/2024_06_jun_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2024_06_jun_rel_notes/ --- Cloud Service's June 2024 release includes the following enhancements and bug fixes: diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_07_jul_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_07_jul_rel_notes.mdx index 71d9d10b034..e9fc25a8660 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_07_jul_rel_notes.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_07_jul_rel_notes.mdx @@ -3,10 +3,12 @@ title: BigAnimal July 2024 release notes navTitle: July 2024 redirects: - /biganimal/latest/release_notes/2024_07_jul_rel_notes/ #generated for BigAnimal URL path removal branch - - /biganimal/latest/release_notes/2024_07_jul_rel_notes/ #generated for BigAnimal URL path removal branch + - /edb-postgres-ai/cloud-service/release_notes/2024_07_jul_rel_notes/ --- -No updates were released for BigAnimal in July 2024. - +EDB Postgres® AI Cloud Service's July 2024 release includes the following enhancements and bug fixes: +| Type | Description | +|------|-------------| +| Enhancement | Support for [tagging AWS resources](/edb-postgres-ai/cloud-service/using_cluster/your_cloud_account/csp_tagging/) added | diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_08_aug_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_08_aug_rel_notes.mdx new file mode 100644 index 00000000000..b6eb0a6ac39 --- /dev/null +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_08_aug_rel_notes.mdx @@ -0,0 +1,16 @@ +--- +title: Cloud Service August 2024 release notes +navTitle: August 2024 +redirect: +- /edb-postgres-ai/cloud-service/release_notes/2024_08_aug_rel_notes/ +--- + +EDB Postgres® AI Cloud Service's August 2024 release includes the following enhancements and bug fixes: + +| Type | Description | +|------|-------------| +| Enhancement | Volume Snapshot Backup support for Distributed HA Clusters (PGD). | +| Enhancement | BigAnimal CLI v3.9.0 is now available. Learn more about what’s new [**here**](https://cli.biganimal.com/versions/v3.9.0/). | +| Enhancement | Internal improvements and updates for the cloud service. | +| Enhancement | Support added for AWS t3 instance types. | +| Enhancement | UI now enables customers to change backup times. | diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_09_sep_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_09_sep_rel_notes.mdx new file mode 100644 index 00000000000..c5b817a24d8 --- /dev/null +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_09_sep_rel_notes.mdx @@ -0,0 +1,12 @@ +--- +title: Cloud Service September 2024 release notes +navTitle: September 2024 +redirect: +- /edb-postgres-ai/cloud-service/release_notes/2024_09_sep_rel_notes/ +--- + +EDB Postgres® AI Cloud Service's September 2024 release includes the following enhancements and bug fixes: + +| Type | Description | +|------|-------------| +| Enhancement | Support for wal2json extension added. | diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_10_oct_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_10_oct_rel_notes.mdx new file mode 100644 index 00000000000..2e4db68ebca --- /dev/null +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_10_oct_rel_notes.mdx @@ -0,0 +1,12 @@ +--- +title: Cloud Service October 2024 release notes +navTitle: October 2024 +redirect: +- /edb-postgres-ai/cloud-service/release_notes/2024_10_oct_rel_notes/ +--- + +EDB Postgres® AI Cloud Service's October 2024 release includes the following enhancements and bug fixes: + +| Type | Description | +|------|-------------| +| Enhancement | Implemented graceful handling of "0% storage available" scenarios. | diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_11_nov_rel_notes.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_11_nov_rel_notes.mdx new file mode 100644 index 00000000000..19249b36a3f --- /dev/null +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/2024_11_nov_rel_notes.mdx @@ -0,0 +1,14 @@ +--- +title: Cloud Service November 2024 release notes +navTitle: November 2024 +redirect: +- /edb-postgres-ai/cloud-service/release_notes/2024_11_nov_rel_notes/ +--- + +EDB Postgres® AI Cloud Service's November 2024 release includes the following enhancements and bug fixes: + +| Type | Description | +|------|-------------| +| Enhancement | Expansion of service notifications to customers via Slack, PagerDuty, Webhook | +| Enhancement | BigAnimal CLI v3.10.0 is now available. Learn more about what’s new [**here**](https://cli.biganimal.com/versions/v3.10.0/). | +| Enhancement | Terrraform provider for EDB Postgres AI Cloud Service 1.2.0 is now available. Learn more about what’s new [**here**](https://github.com/EnterpriseDB/terraform-provider-biganimal/releases/tag/v1.2.0). | diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/index.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/index.mdx index 068069a0c69..c72d98c6e6f 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/index.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/release_notes/index.mdx @@ -3,6 +3,10 @@ title: Cloud Service release notes navTitle: Release notes description: Provides monthly release notes for Cloud Service navigation: + - 2024_11_nov_rel_notes + - 2024_10_oct_rel_notes + - 2024_09_sep_rel_notes + - 2024_08_aug_rel_notes - 2024_07_jul_rel_notes - 2024_06_jun_rel_notes - 2024_05_may_rel_notes @@ -10,26 +14,23 @@ navigation: - 2024_03_mar_rel_notes - 2024_02_feb_rel_notes - 2024_01_jan_rel_notes - - 2023_12_dec_rel_notes - - 2023_11_nov_rel_notes - - 2023_10_oct_rel_notes - - 2023_09_sep_rel_notes - - 2023_08_aug_rel_notes - - 2023_07_jul_rel_notes - - 2023_06_jun_rel_notes - - 2023_05_may_rel_notes - - 2023_04_apr_rel_notes - - 2023_03_mar_rel_notes - - 2023_02_feb_rel_notes - - 2023_01_jan_rel_notes redirects: - /biganimal/latest/release_notes/ #generated for BigAnimal URL path removal branch +indexCards: none --- +!!!note "Deprecation" +The EDB Hosted Cloud Service has been deprecated. Support is available for current customers. However, the related documentation topics will be removed shortly. Further updates will be provided as the removal progresses. +!!! + The Cloud Service documentation describes the latest version of Cloud Service, including minor releases and patches. These release notes provide information on what was new in each release. For new functionality introduced in a minor or patch release, the content also indicates the release that introduced the feature. | 2024 | |----------------------------------------| +| [November 2024](2024_11_nov_rel_notes) | +| [October 2024](2024_10_oct_rel_notes) | +| [September 2024](2024_09_sep_rel_notes)| +| [August 2024](2024_08_aug_rel_notes) | | [July 2024](2024_07_jul_rel_notes) | | [June 2024](2024_06_jun_rel_notes) | | [May 2024](2024_05_may_rel_notes) | @@ -38,17 +39,21 @@ The Cloud Service documentation describes the latest version of Cloud Service, i | [February 2024](2024_02_feb_rel_notes) | | [January 2024](2024_01_jan_rel_notes) | -| 2023 | -|--------------------------------------| -| [December 2023](2023_12_dec_rel_notes) | -| [November 2023](2023_11_nov_rel_notes) | -| [October 2023](2023_10_oct_rel_notes) | -| [September 2023](2023_09_sep_rel_notes)| -| [August 2023](2023_08_aug_rel_notes) | -| [July 2023](2023_07_jul_rel_notes) | -| [June 2023](2023_06_jun_rel_notes) | -| [May 2023](2023_05_may_rel_notes) | -| [April 2023](2023_04_apr_rel_notes) | -| [March 2023](2023_03_mar_rel_notes) | -| [February 2023](2023_02_feb_rel_notes) | -| [January 2023](2023_01_jan_rel_notes) | +
2023 + +| 2023 | +|----------------------------------------------| +| [December 2023](2023/2023_12_dec_rel_notes) | +| [November 2023](2023/2023_11_nov_rel_notes) | +| [October 2023](2023/2023_10_oct_rel_notes) | +| [September 2023](2023/2023_09_sep_rel_notes) | +| [August 2023](2023/2023_08_aug_rel_notes) | +| [July 2023](2023/2023_07_jul_rel_notes) | +| [June 2023](2023/2023_06_jun_rel_notes) | +| [May 2023](2023/2023_05_may_rel_notes) | +| [April 2023](2023/2023_04_apr_rel_notes) | +| [March 2023](2023/2023_03_mar_rel_notes) | +| [February 2023](2023/2023_02_feb_rel_notes) | +| [January 2023](2023/2023_01_jan_rel_notes) | + +
diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/cli/managing_clusters.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/cli/managing_clusters.mdx index 0fd59e5bf03..dda5253484c 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/cli/managing_clusters.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/cli/managing_clusters.mdx @@ -33,7 +33,7 @@ Enable read-only workloads: No Provider: Azure Cloud Provider Subscription ID: "111,222" Service Account IDs, (leave empty to stop adding): "id1@iam.gcp" -Cluster Name: my-biganimal-cluster +Cluster Name: my-cloud-service-cluster Password: **************** PostgreSQL type: EDB Postgres Advanced Server PostgreSQL version: 14 @@ -71,7 +71,7 @@ __OUTPUT__ ├──────────────┬──────────────────────┬──────────┬──────────────┬──────────────────────────┬─────────────┬───────────────┬───────────────────────────────┬────────────────────┬────────────┤ │ ID │ Name │ Provider │ Architecture │ Status │ Region │ Instance Type │ Postgres Details │ Maintenance Window │ FAReplicas │ ├──────────────┼──────────────────────┼──────────┼──────────────┼──────────────────────────┼─────────────┼───────────────┼───────────────────────────────┼────────────────────┼────────────┤ -│ p-gxhkfww1fe │ my-biganimal-cluster │ Azure │ ha │ Cluster in healthy state │ East US │ E2s v3 │ EDB Postgres Advanced Server │ Disabled │ N/A │ +│ p-gxhkfww1fe │ my-cloud-service-cluster │ Azure │ ha │ Cluster in healthy state │ East US │ E2s v3 │ EDB Postgres Advanced Server │ Disabled │ N/A │ │ │ │ │ │ │ │ │ │ │ │ └──────────────┴──────────────────────┴──────────┴──────────────┴──────────────────────────┴─────────────┴───────────────┴───────────────────────────────┴────────────────────┴────────────┘ ``` @@ -87,16 +87,21 @@ Here's a sample configuration file in YAML format with Azure specified as the pr --- clusterArchitecture: ha # haStandbyReplicas: 2 # -provider: azure # +provider: azure # cspSubscriptionIDs: # - 123123123 # - 456456456 # serviceAccountIds: # - service-account-1234567b@development-data-123456.iam.gserviceaccount.com# - service-account-1234567b@development-data-123456.iam.gserviceaccount.com# -clusterName: biganimal_cluster # +clusterName: cloud_service_cluster # password: ************ # -# refer following link for steps to setup IAM: https://www.enterprisedb.com/docs/biganimal/latest/using_cluster/01_postgres_access/iam-authentication-for-postgres +tags: # + - name: tag1 # + color: "#c5eae7" # + - name: tag2 # + color: "#0b6ff4" # +# refer following link for steps to setup IAM: https://www.enterprisedb.com/docs/edb-postgres-ai/cloud-service/using_cluster/postgres_access/database_authentication/#iam-authentication-for-postgres iamAuthentication: true # postgresType: epas # postgresVersion: 14 # @@ -104,7 +109,7 @@ region: eastus # volumeType: azurepremiumstorage # volumeProperties: P1 # -volumePropertySize: 4Gi # +volumePropertySize: 32Gi # volumePropertyIOPS: 1000 # : Not Applicable to Azure Premium Storage and GCP:[pd-ssd], volume Input/Output Operations Per Second> networking: public # allowIpRangeMap: # @@ -117,9 +122,21 @@ pgConfigMap: # array_nulls: true # backupRetentionPeriod: 30d # -superuserAccess: true # -pgvector: true # -postgis: true # +customMaintenanceWindow: # + isEnabled: true # + maintenanceStartDay: Monday # + maintenanceStartTime: 02:00 # +superuserAccess: true # +pgvector: true # +postgis: true # +pgBouncer: true # +pgBouncerRWSettings: # + application_name_add_host: true # + max_client_conn: 100 # +pgBouncerROSettings: # + idle_transaction_timeout: 20 # + log_stats: true # + --- ``` !!! Note @@ -162,7 +179,7 @@ To use your Cloud Service cluster, you first need to get your cluster's connecti ```shell biganimal cluster show-connection \ - --name "my-biganimal-cluster" \ + --name "my-cloud-service-cluster" \ --provider "azure" \ --region "eastus" __OUTPUT__ @@ -181,7 +198,7 @@ You can query the complete connection information with other output formats, lik ```shell biganimal cluster show-connection \ - --name "my-biganimal-cluster" \ + --name "my-cloud-service-cluster" \ --provider "azure" \ --region "eastus" \ --output "json" @@ -207,11 +224,12 @@ After the cluster is created, you can update attributes of the cluster, includin - IAM authentication - Cloud service provider subscription IDs - Service account IDs +- Tags For example, to set the public allowed IP range list, use the `--cidr-blocks` flag: ```shell -./biganimal cluster update --name "my-biganimal-cluster" --provider "azure" \ +./biganimal cluster update --name "my-cloud-service-cluster" --provider "azure" \ --region "eastus" \ --cidr-blocks "9.9.9.9/28=Traffic from App A" ``` @@ -219,7 +237,7 @@ For example, to set the public allowed IP range list, use the `--cidr-blocks` fl To check whether the setting took effect, use the `cluster show` command, and view the detailed cluster information output in JSON format. For example: ```shell -biganimal cluster show --name "my-biganimal-cluster" --provider "azure" \ +biganimal cluster show --name "my-cloud-service-cluster" --provider "azure" \ --region "eastus" \ --output "json" \ | jq '.[0].allowIpRangeMap' @@ -255,7 +273,7 @@ To delete a cluster you no longer need, use the `cluster delete` command. For ex ```shell biganimal cluster delete \ - --name "my-biganimal-cluster" \ + --name "my-cloud-service-cluster" \ --provider "azure" \ --region "eastus" ``` @@ -268,11 +286,11 @@ Cloud Service continuously backs up your PostgreSQL clusters. Using the CLI, you ```shell biganimal cluster restore\ - --name "my-biganimal-cluster" \ + --name "my-cloud-service-cluster" \ --provider "azure" \ --region "eastus" \ --password "mypassword@123" \ - --new-name "my-biganimal-cluster-restored" \ + --new-name "my-cloud-service-cluster-restored" \ --new-region="eastus2" \ --cluster-architecture "single" \ --instance-type "azure:Standard_E2s_v3" \ @@ -284,6 +302,7 @@ biganimal cluster restore\ --backup-retention-period "2w" \ --postgis=true --pgvector=true + --tags "tag1, tag2=blue" --read-only-workloads: "true" --csp-subscription-ids "123123123,456456456" --service-account-ids "service-account-1234567b@development-data-123456.iam.gserviceaccount.com, @@ -411,66 +430,86 @@ Where `` is a valid path to a YAML configuration file. #### Azure example ``` -clusterName: biganimal-azure-pgd-cluster -password: Meredith Palmer Memorial -postgresType: epas -postgresVersion: "14" -provider: azure -dataNodes: 2 -dataGroups: - - iamAuthentication: false - region: westus2 - instanceType: azure:Standard_E2s_v3 - volumeType: azurepremiumstorage - volumeProperties: P2 - customMaintenanceWindow: - maintenanceStartTime: 18:00 - maintenanceStartDay: wednesday - networking: public - allowIpRangeMap: - - cidr: 9.9.9.9/28 - description: Allow traffic from App A - - cidr: 10.10.10.10/27 - description: Allow traffic from App B - pgConfigMap: - application_name: test - array_nulls: true - backupRetentionPeriod: 30d - - iamAuthentication: false - region: canadacentral - instanceType: azure:Standard_E2s_v3 - volumeType: azurepremiumstorage - volumeProperties: P2 - customMaintenanceWindow: - maintenanceStartTime: 18:00 - maintenanceStartDay: tuesday - networking: public - allowIpRangeMap: - - cidr: 9.9.9.9/28 - description: Allow traffic from App A - - cidr: 10.10.10.10/27 - description: Allow traffic from App B - pgConfigMap: - application_name: test1 - array_nulls: true - backupRetentionPeriod: 30d -witnessGroups: - - provider: azure - region: uksouth - customMaintenanceWindow: - maintenanceStartTime: 18:00 - maintenanceStartDay: monday +clusterName: cloud_service_cluster # +password: ************ # +postgresType: epas # (only epas is supported in pgd preview) +postgresVersion: 14 # +provider: azure # +dataNodes: 3 # +tags: # + - name: tag1 # + color: blue # + - name: tag2 # + color: "#FF0000" # +dataGroups: # + region: westus2 # + instanceType: azure:Standard_E2s_v3 # + volumeType: azurepremiumstorage # + volumeProperties: P1 # + volumePropertySize: 32Gi # + volumePropertyIOPS: 1000 # : Not Applicable to Azure Premium Storage and GCP:[pd-ssd], volume Input/Output Operations Per Second> + customMaintenanceWindow: # + maintenanceStartTime: 15:00 # + maintenanceStartDay: Monday # + networking: public # + allowIpRangeMap: # + - cidr: 9.9.9.9/28 # + description: Allow traffic from App A # + - cidr: 10.10.10.10/27 # + description: Allow traffic from App B # + pgConfigMap: # + application_name: test_app # + array_nulls: true # + backupRetentionPeriod: 30d # + - iamAuthentication: false # + region: canadacentral # + instanceType: azure:Standard_E2s_v3 # + volumeType: azurepremiumstorage # + volumeProperties: P1 # + volumePropertySize: 32Gi # + volumePropertyIOPS: 1000 # : Not Applicable to Azure Premium Storage and GCP:[pd-ssd], volume Input/Output Operations Per Second> + customMaintenanceWindow: # + maintenanceStartTime: 17:00 # + maintenanceStartDay: tuesday # + networking: public # + allowIpRangeMap: # + - cidr: 9.9.9.9/28 # + description: Allow traffic from App A # + - cidr: 10.10.10.10/27 # + description: Allow traffic from App B # + pgConfigMap: # + application_name: test_app # + array_nulls: true # + backupRetentionPeriod: 30d # +# cspSubscriptionIds: # +# - 123123123 # +# - 456456456 # +# serviceAccountIds: # +# - service-account-1234567b@development-data-123456.iam.gserviceaccount.com# +# - service-account-1234567b@development-data-123456.iam.gserviceaccount.com# +witnessGroups: # + region: uksouth # + customMaintenanceWindow: # + maintenanceStartTime: 18:00 # + maintenanceStartDay: Monday # ``` #### AWS example ``` -clusterName: biganimal-aws-pgd-cluster # +clusterName: cloud-service-aws-pgd-cluster # password: Meredith Palmer Memorial # postgresType: pgextended # (only epas is supported in pgd preview) postgresVersion: 16 # provider: aws # dataNodes: 2 # +tags: # + - name: tag1 # + color: blue # + - name: tag2 # + color: "#FF0000" # dataGroups: # region: ap-south-1 # @@ -481,7 +520,7 @@ dataGroups: # : Not Applicable to Azure Premium Storage and GCP:[pd-ssd], volume Input/Output Operations Per Second> customMaintenanceWindow: # maintenanceStartTime: 15:00 # - maintenanceStartDay: monday # + maintenanceStartDay: Monday # networking: public # allowIpRangeMap: # - cidr: 9.9.9.9/28 # @@ -502,7 +541,7 @@ dataGroups: # : Not Applicable to Azure Premium Storage and GCP:[pd-ssd], volume Input/Output Operations Per Second> customMaintenanceWindow: # maintenanceStartTime: 17:00 # - maintenanceStartDay: tuesday # + maintenanceStartDay: tuesday # networking: public # allowIpRangeMap: # - cidr: 9.9.9.9/28 # @@ -525,18 +564,23 @@ witnessGroups: # customMaintenanceWindow: # maintenanceStartTime: 18:00 # - maintenanceStartDay: monday # + maintenanceStartDay: Monday # ``` #### Google Cloud example ``` -clusterName: biganimal-gcp-pgd-cluster # +clusterName: cloud-service-gcp-pgd-cluster # password: Meredith Palmer Memorial # postgresType: epas # (only epas is supported in pgd preview) postgresVersion: 16 # provider: gcp # dataNodes: 3 # +tags: # + - name: tag1 # + color: blue # + - name: tag2 # + color: "#FF0000" # dataGroups: # region: europe-west1 # @@ -547,7 +591,7 @@ dataGroups: # : Not Applicable to Azure Premium Storage and GCP:[pd-ssd], volume Input/Output Operations Per Second> customMaintenanceWindow: # maintenanceStartTime: 15:00 # - maintenanceStartDay: monday # + maintenanceStartDay: Monday # networking: public # allowIpRangeMap: # - cidr: 9.9.9.9/28 # @@ -568,7 +612,7 @@ dataGroups: # : Not Applicable to Azure Premium Storage and GCP:[pd-ssd], volume Input/Output Operations Per Second> customMaintenanceWindow: # maintenanceStartTime: 18:00 # - maintenanceStartDay: monday # + maintenanceStartDay: Monday # networking: public # allowIpRangeMap: # - cidr: 9.9.9.9/28 # @@ -587,11 +631,11 @@ dataGroups: # witnessGroups: # + - provider: azure # region: australiaeast # customMaintenanceWindow: # maintenanceStartTime: 21:00 # - maintenanceStartDay: monday # + maintenanceStartDay: Monday # ``` ### Add a data group @@ -617,7 +661,7 @@ dataGroups: volumeProperties: P2 customMaintenanceWindow: maintenanceStartTime: 18:00 - maintenanceStartDay: monday + maintenanceStartDay: Monday networking: public allowIpRangeMap: - cidr: 9.9.9.9/28 @@ -646,7 +690,51 @@ The syntax of the command is: pgd update [--config-file] ``` -Where `` is a valid path to a YAML configuration file with the same format as a configuration file for creating a distributed high-availability cluster. See [Create a distributed high-availability cluster](#create-a-distributed-high-availability-cluster). +Where `` is a valid path to a YAML configuration file. For updating a distributed high-availability cluster, clusterId and groupId are mandatory fields. All other fields are optional removing/unspecified optional fields means no change is intended for them. See the sample config file below to update a distributed high-availability cluster. + +``` +clusterId: p-***** # +clusterName: cloud_service_cluster # +password: ************ # +tags: # + - name: tag1 # + color: blue # + - name: tag2 # + color: "#FF0000" # +dataNodes: 3 # +dataGroups: # + iamAuthentication: false # + instanceType: azure:Standard_E2s_v3 # + volumeType: azurepremiumstorage # + volumeProperties: P1 # + volumePropertySize: 32Gi # + volumePropertyIOPS: 1000 # : Not Applicable to Azure Premium Storage and GCP:[pd-ssd], volume Input/Output Operations Per Second> + customMaintenanceWindow: # + maintenanceStartTime: 15:00 # + maintenanceStartDay: Monday # + networking: public # + allowIpRangeMap: # + - cidr: 9.9.9.9/28 # + description: Allow traffic from App A # + - cidr: 10.10.10.10/27 # + description: Allow traffic from App B # + pgConfigMap: # + application_name: test_app # + array_nulls: true # + backupRetentionPeriod: 30d # +# cspSubscriptionIds: # +# - 123123123 # +# - 456456456 # +# serviceAccountIds: # +# - service-account-1234567b@development-data-123456.iam.gserviceaccount.com# +# - service-account-1234567b@development-data-123456.iam.gserviceaccount.com# +witnessGroups: # + customMaintenanceWindow: # + maintenanceStartTime: 18:00 # + maintenanceStartDay: Monday # +``` ### Show distributed high-availability clusters @@ -670,27 +758,41 @@ pgd restore [--config-file] Where `` is a valid path to a YAML configuration file. For example: +You can either restore an active or a deleted distributed high-availability cluster within its retention period. You can restore only 1 data group from the available data groups in the source cluster. By default the new cluster will inherit all settings of source cluster, you can change the cluster setting and database configurations by specifying new values in the restore command. + +To restore a distributed high-availability cluster, clusterId, sourceGroupId and password are mandatory fields. All other fields are optional. Removing the optional fields will use the corresponding source group field instead. + ``` -clusterName: pgd-restore-name -password: Meredith Palmer Memorial -dataNodes: 2 -clusterId: p-9fdkl5ju29 -dataGroups: - - iamAuthentication: false - region: uksouth - instanceType: azure:Standard_E2s_v3 - volumeType: azurepremiumstorage - volumeProperties: P2 - allowIpRangeMap: - - cidr: 9.9.9.9/28 - description: Allow traffic from App A - - cidr: 10.10.10.10/27 - description: Allow traffic from App B - pgConfigMap: - application_name: test - array_nulls: true - backupRetentionPeriod: 30d - sourceGroupId: p-9fdkl5ju29-a +clusterName: cloud_service_cluster # +password: ************ # +dataNodes: 3 # +clusterId: p-***** # +tags: # + - name: tag1 # + color: blue # + - name: tag2 # + color: "#FF0000" # +dataGroups: # + iamAuthentication: false # + region: westus2 # + instanceType: azure:Standard_E2s_v3 # + volumeType: azurepremiumstorage # + volumeProperties: P1 # + volumePropertySize: 32Gi # + volumePropertyIOPS: 1000 # : Not Applicable to Azure Premium Storage and GCP:[pd-ssd], volume Input/Output Operations Per Second> + customMaintenanceWindow: # + maintenanceStartTime: 15:00 # + maintenanceStartDay: Monday # + allowIpRangeMap: # + - cidr: 9.9.9.9/28 # + description: Allow traffic from App A # + - cidr: 10.10.10.10/27 # + description: Allow traffic from App B # + pgConfigMap: # + application_name: test_app # + array_nulls: true # + backupRetentionPeriod: 30d # ``` ### Get distributed high-availability cluster connection information @@ -749,4 +851,110 @@ For example: ``` biganimal pgd resume --id p-c5fh47nf +``` + +## Creating and Managing tags + +Tagging is a powerful way to organize, manage, and track your resources and clusters, especially in large-scale environments. Here's how you can effectively manage tags for grouping your clusters and other resources: + +Here are the available commands for creating and managing tags: + +``` +biganimal tag -h +Manage tags for grouping your clusters and other resources. + +Usage: + biganimal tag [flags] + biganimal tag [command] + +Available Commands: + create Create a tag + update Update a tag with specified ID. + delete Delete a tag + show Show all available tags + +Flags: + -h, --help help for tag +``` + +Here are the options with tag create command: + +``` +biganimal tag create -h +Create a tag with specified name and color. + +Usage: + biganimal tag create [flags] + +Examples: +biganimal tag create --name my-cyan-tag --color "#30f8ef" +tag create --name my-red-tag --color red + +Flags: + -n, --name string Tag Name + -r, --color string Tag color hex code or name (e.g. #FF0000 or red) + -y, --yes auto-confirm all confirmations + -c, --credential string The credential which you created via 'credential create' command, the default is fetched from 'context_credential' + -P, --project string The project that groups your clusters and other resources, the default is taken from 'context_project' (default "DummyProject") + -I, --interactive[=NoOpt] Execute command interactively + -h, --help help for create +``` + +Here are the options with the tag update command: + +``` +biganimal tag update -h +Update a tag color, name for specified tag ID. + +Usage: + biganimal tag update [flags] + +Examples: +biganimal tag update --id "" --name "" --color "" + +Flags: + -i, --id string Tag ID + -n, --name string Updated Tag Name + -r, --color string Updated Tag color hex code or name (e.g. #FF0000 or red) + -y, --yes auto-confirm all confirmations + -c, --credential string The credential which you created via 'credential create' command, the default is fetched from 'context_credential' + -P, --project string The project that groups your clusters and other resources, the default is taken from 'context_project' (default "DummyProject") + -I, --interactive[=NoOpt] Execute command interactively + -h, --help help for update +``` + +Here are the options with the tag show command: + +``` +Usage: + biganimal tag show [flags] + +Examples: +biganimal tags show + +Flags: + -c, --credential string The credential which you created via 'credential create' command, the default is fetched from 'context_credential' + -h, --help help for show + -i, --id string Tag ID + -o, --output string [table json yaml xml] (default "table") +``` + +Here are the options with the tag delete command: + +``` +Delete a tag with specified ID. + +Usage: + biganimal tag delete [flags] + +Examples: +biganimal tag delete --id "TagID_123" + +Flags: + -i, --id string Tag ID + -y, --yes auto-confirm all confirmations + -c, --credential string The credential which you created via 'credential create' command, the default is fetched from 'context_credential' + -P, --project string The project that groups your clusters and other resources, the default is taken from 'context_project' (default "DummyProject") + -I, --interactive[=NoOpt] Execute command interactively + -h, --help help for delete ``` \ No newline at end of file diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/cli/using_features.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/cli/using_features.mdx index 63534e42efe..0189dc40807 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/cli/using_features.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/cli/using_features.mdx @@ -3,6 +3,8 @@ title: Using Cloud Service features with the CLI navTitle: Using Cloud Service features redirects: - /biganimal/latest/reference/cli/using_features/ #generated for BigAnimal URL path removal branch +indexdepth: 3 +deepToC: true --- ## Faraway replicas CLI commands diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/connect_from_a_client/index.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/connect_from_a_client/index.mdx index 88b746a5356..f334108bfc7 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/connect_from_a_client/index.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/connect_from_a_client/index.mdx @@ -8,6 +8,7 @@ navigation: - connect_using_dbeaver - connecting_from_a_client redirects: + - /purl/upm/ssl-production-recommendation/ - /biganimal/latest/free_trial/detail/connect_to_a_cluster/ #generated for BigAnimal URL path removal branch - /biganimal/latest/using_cluster/02_connecting_your_cluster/connecting_from_a_client/ --- @@ -28,6 +29,8 @@ You can connect to your cluster using the client of your choice including: ## Recommended settings for SSL mode + + Different clients can have different default TLS/SSL modes (sslmode). For example, `psql` defaults to `prefer`, which means the client attempts to establish a TLS connection but falls back to non-TLS if the server doesn't support it. In the `psql` example provided by EDB in the **Quick Connect** field, `sslmode` is explicitly set to `require`, which means the client attempts a TLS connection and fails if the connection to the server can't be encrypted. For public connections and in most environments, EDB recommends setting `sslmode` to `verify-full`. This setting ensures that you connect to the server you specified and that the connection is encrypted. diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/connecting_your_cluster/index.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/connecting_your_cluster/index.mdx index 9d205515cf0..ffd32b9851f 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/connecting_your_cluster/index.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/connecting_your_cluster/index.mdx @@ -3,7 +3,6 @@ title: "Connecting to your cluster" description: Connect to your cluster from your applications, client apps, and EDB's tools. redirects: - /purl/upm/connect-to-cluster/ - - /purl/upm/ssl-production-recommendation/ - /biganimal/latest/using_cluster/02_connect_to_cluster/ - connecting_your_cluster - /biganimal/latest/using_cluster/02_connecting_your_cluster/ #generated for BigAnimal URL path removal branch diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/faraway_replicas.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/faraway_replicas.mdx index cab5230eaca..fe281ee18cb 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/faraway_replicas.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/faraway_replicas.mdx @@ -75,6 +75,8 @@ You can create faraway replicas in any active regions in your cloud. There's no ## Modify a replica + + 1. Sign in to the [Console](https://portal.biganimal.com/). 2. Go to the [Clusters](https://portal.biganimal.com/clusters) page. A list of previously created clusters appears. diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/monitoring_and_logging/other_monitoring/index.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/monitoring_and_logging/other_monitoring/index.mdx index 5a033e635f7..8e5e6c3576e 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/monitoring_and_logging/other_monitoring/index.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/monitoring_and_logging/other_monitoring/index.mdx @@ -13,6 +13,8 @@ redirects: ## Metrics + + You can access metrics in a [Prometheus format](https://prometheus.io/docs/concepts/data_model/) if you request this feature from Cloud Service Support. You can retrieve the hostname and port for your clusters by using the Prometheus URL available on the **Monitoring and logging** tab on each cluster's detail page in the Console. These [example metrics](example_metrics/) can help you get started. @@ -31,6 +33,8 @@ For more information on some common monitoring services, see: ## Logs + + You can view your logs in your cloud provider's blob storage solution if you request this feature from Cloud Service Support. You can retrieve the location of your object storage on the **Monitoring and logging** tab on your cluster's detail page in the Console. The general pattern for getting logs from blob storage into the cloud provider's solution is to write a custom serverless function that watches the blob storage and uploads to the desired solution. diff --git a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/postgres_access/database_authentication.mdx b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/postgres_access/database_authentication.mdx index f5034349942..bce8dd86d0f 100644 --- a/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/postgres_access/database_authentication.mdx +++ b/advocacy_docs/edb-postgres-ai/cloud-service/using_cluster/postgres_access/database_authentication.mdx @@ -4,6 +4,8 @@ description: Setting up the database authentication for the Postgres clusters. redirects: - /purl/upm/csp-auth-use/ - /purl/upm/iam-auth-postgres/ + - /purl/upm/csp-aws-ad-usermanagement/ + - /purl/upm/csp-azure-ad-usermanagement/ - /biganimal/latest/using_cluster/01_postgres_access/ #generated for BigAnimal URL path removal branch --- @@ -75,6 +77,10 @@ If you use a single database to host multiple schemas, create a database owner a ## IAM authentication for Postgres + + + + Any user with a supported cloud account connected to a BigAnimal subscription who has the Postgres IAM role iam_aws, iam_azure, or iam_gcp can authenticate to the database using their IAM credentials. ### Configuring IAM for Postgres diff --git a/advocacy_docs/edb-postgres-ai/console/estate/agent/create-machine-user.mdx b/advocacy_docs/edb-postgres-ai/console/estate/agent/create-machine-user.mdx index b55927b03f3..5220601264e 100644 --- a/advocacy_docs/edb-postgres-ai/console/estate/agent/create-machine-user.mdx +++ b/advocacy_docs/edb-postgres-ai/console/estate/agent/create-machine-user.mdx @@ -14,13 +14,13 @@ Select the **Add New User** button. On the **Add New User** page, for the "User Type" select **Machine User*. -The form changes when you select make that selection so it's asking for a name and optional email. Enter a name for the machine user. +The form changes when you make that selection so it's asking for a name and optional email. Enter a name for the machine user. ## Creating an access key Check the **Create Access Key** checkbox in **Add New User** page. The form will expand to ask for an Access Key Name and an Expiry time. -Give the key a name in the Access Key Name field and enter a value from 1 to 365 for the number of days from now that you want this key to be valid in the Expires In field. The date on which the key will expire is shown in the underneath the field. +Give the key a name in the Access Key Name field and enter a value from 1 to 365 for the number of days from now that you want this key to be valid in the Expires In field. The date on which the key will expire is shown underneath the field. Select the **Add User** button. @@ -34,7 +34,7 @@ Once you have securely stored your access key, select the **Key Stored Safely** Select the **Projects** tab to view all your projects. -Select the project from which you want to monitor the a database. +Select the project from which you want to monitor the database. This will take you to the project's overview. Select the **Users** tab, and locate the machine user you just created. @@ -46,5 +46,5 @@ An **Assign Project Roles** dialog will appear with a selection of roles that ca Select the "estate ingester" role and then select the **Submit** button. -Your new machine user is ready for to ingest data from the Beacon Agent. +Your new machine user is now ready to ingest data from the Beacon Agent. diff --git a/advocacy_docs/edb-postgres-ai/console/estate/agent/install-agent.mdx b/advocacy_docs/edb-postgres-ai/console/estate/agent/install-agent.mdx index fa6d7c26ca0..6b9c1ecd041 100644 --- a/advocacy_docs/edb-postgres-ai/console/estate/agent/install-agent.mdx +++ b/advocacy_docs/edb-postgres-ai/console/estate/agent/install-agent.mdx @@ -82,7 +82,7 @@ export BEACON_AGENT_ACCESS_KEY= export BEACON_AGENT_PROJECT_ID= ``` -Running the `beacon-agent setup` command creates a configuration file in the Beacon configuration directory. using those environment variables. +These environment variables are used when you run the `beacon-agent setup` command to create a configuration file in the Beacon configuration directory. You also need to specify the Beacon configuration directory for storing the configuration file and the name of the configuration file to generate there. The `$HOME/.beacon/` file is one of the default locations which `beacon_agent` searches for `beacon_agent.yaml` when it starts. Using the `-file` flag tells the agent setup process to create its configuration file in a specific location. @@ -153,7 +153,7 @@ provider: poll_interval: 5m0s ``` -## Test Beacon Agent locally. +## Test Beacon Agent locally For an initial test of the agent, you can get it to send the data that it would normally send to the EDB Enterprise AI control plane to standard output, your terminal session, instead. This allows you to quickly confirm if the agent is successfully able to gather data and what that data looks like. @@ -210,7 +210,7 @@ agent: ``` -## Run Beacon Agent. +## Run Beacon Agent Run the agent using the following command: diff --git a/advocacy_docs/edb-postgres-ai/console/using/notifications.mdx b/advocacy_docs/edb-postgres-ai/console/using/notifications.mdx index cf270341853..a2176ea8bb4 100644 --- a/advocacy_docs/edb-postgres-ai/console/using/notifications.mdx +++ b/advocacy_docs/edb-postgres-ai/console/using/notifications.mdx @@ -4,15 +4,20 @@ navTitle: Notifications description: "Use notifications to get an alert for the different types of events occurring in your EDB Postgres AI account, organizations, and projects." redirects: - /biganimal/latest/administering_cluster/notifications/ #generated for BigAnimal URL path removal branch +deepToC: true +indexdepth: 3 --- -With EDB Postgres AI, you can opt to get specific types of notifications and receive both in-app and email notifications. +With EDB Postgres AI, you can customize the types of notifications you want to receive. There are two types of notifications channels: -Different types of events are sent as notifications. Users with different roles can configure the preferences to receive these notifications in the in-app inbox, by email, or both. +- User notifications channels - where you can receive notifications on email or in-app inbox or both. These notifications are for all users who have enabled them and are eligible to receive the notifications for that event. +- System notifications channels - where you can receive notifications on slack or webhook or pagerduty or all. These notifications are designed to trigger once per event, primarily for automating tasks within the user's organization. + +Different types of events are sent as notifications. Users with different roles can configure the preferences to receive these notifications. The notifications are categorized into the following preference sections: -- Account -- Organizations -- Projects +- **Account** - only for user notification channels +- **Organizations** - for user and system notification channels both +- **Projects** - for user and system notification channels both The notifications under the **Account** preference section are: @@ -44,12 +49,83 @@ The notifications under the **Projects** preference section are: ## Configuring notifications -The project owners/editors and organization owners/admins can configure the notifications for the events visible to them. They can choose if they want to receive notifications in the in-app inbox, by email, or both. They can also configure email notifications for their teams in their organization. - -Project-level notifications are configured in the project. +The project owners/editors and organization owners/admins can enable/disable the notifications for the events visible to them. They can choose if they want to receive notifications in the in-app inbox, by email or both. Notification settings made by a user apply only to that user. If an email notification is enabled, the email is sent to the email address associated with the user's login. +System notification channels can be configured at Organization-level by organization owner or at Project-level by project owner. By default, the Organization-level system notifications channel are inherited by each project of that organization. However you can enable/disable or re-configure the system notifications channel at the Project-level to override the default settings. + +This steps shows how to configure the system notification channels at Organization-level: + +### Configure Slack notification channel + +To configure Slack notification channel: + +1. Log in to the EDB Postgres AI console. +1. From the menu under your name in the top-right panel, select **Settings**. +1. Select the **Notifications** tab. Options available are Webhook, Slack, and Pager Duty. +1. Select **Slack** and a window pops-up, provide the following details: + 1. Access token - provide a slack bot access token. + 1. Slack Channel on which notification are to be sent. For example: #general, #alerts, #notifications, etc. + 1. Select **Save** button + +!!!note +Before configuring slack notification channel, you need to create the slack app and grant the following permissions: +- `chat:write` +- `im:write` +- `users:read.email` +- `users:read` + +Once the slack app is created and installed on the slack workspace, you can use the slack bot user OAuth token of the slack app to setup the slack channel. + +Also make sure to add the created slack app to the slack channel of your choice. +!!! + + +### Configure Webhook notification channel + +To configure Webhook notification channel: + +1. Log in to the EDB Postgres AI console. +1. From the menu under your name in the top-right panel, select **Settings**. +1. Select the **Notifications** tab. Options available are Webhook, Slack, and Pager Duty. +1. Select **Webhook** and provide the details on the pop-up window: + 1. Host URL of the Webhook + 1. Method of the Webhook - POST or PUT + 1. Optionally provide any of these authentication parameters + 1. For Basic Auth: + 1. Username + 2. Password + 2. For Bearer Token: + 1. Bearer Token + 2. Select **Save** button. + +### Configure Pager Duty notification channel + +To configure Pager Duty notification channel: + +1. Log in to the EDB Postgres AI console. +1. From the menu under your name in the top-right panel, select **Settings**. +1. Select the **Notifications** tab. Options available are Webhook, Slack, and Pager Duty. +1. Select **Pager Duty** and provide the details on the pop-up window: + 1. Set Routing Key(integration key). + 2. Select **Save** button. + +!!!note +Before configuring Pager Duty notification channel: +- You need to have access to a Pager Duty account with elevated privileges. +- Pager Duty Service created and configured with Events API v2 integration. +- In your PagerDuty account, navigate to the desired Service and access its "Integrations" settings. +- Add a new "Events API v2" integration and configure it according to your requirements. +- Copy the Integration Key(routing key) provided by PagerDuty for the newly created integration. +!!! + +To configure the system notifications channel at Project-level: +- Go to **Project Overview** page +- Select **Settings** on the left-side menu +- Select **Notifications** +- Follow the steps as mentioned above for Organization-level for each of the system notification channel. + ## Viewing notifications Users in the following roles can view the notifications: diff --git a/advocacy_docs/edb-postgres-ai/console/using/organizations/identity_provider/index.mdx b/advocacy_docs/edb-postgres-ai/console/using/organizations/identity_provider/index.mdx index 67e996473d1..0c79ecee86a 100644 --- a/advocacy_docs/edb-postgres-ai/console/using/organizations/identity_provider/index.mdx +++ b/advocacy_docs/edb-postgres-ai/console/using/organizations/identity_provider/index.mdx @@ -92,6 +92,8 @@ Step-by-step instructions for setting up specific identity providers are availab ### Add a domain + + You need a verified domain so your users can have a streamlined login experience with their email address. 1. On the **Domains** tab, enter the domain name and select **Next: Verify Domain**. @@ -160,6 +162,8 @@ You add users through your identity provider. A user who you add in the identity ### Add a tile + + Once you establish the identity provider, you can create a EDB Postgres AI tile for users to access the organization's EDB Postgres AI application. To do so, copy the quick sign-in URL from the **Settings > Identity Provider** page of the EDB Postgres AI portal. For details on how to add a tile, refer to your identify provider documentation for instructions on setting up SSO access to your application. ## Next steps diff --git a/advocacy_docs/edb-postgres-ai/console/using/projects/index.mdx b/advocacy_docs/edb-postgres-ai/console/using/projects/index.mdx index 97a58b773aa..8ad75ca0288 100644 --- a/advocacy_docs/edb-postgres-ai/console/using/projects/index.mdx +++ b/advocacy_docs/edb-postgres-ai/console/using/projects/index.mdx @@ -16,7 +16,6 @@ navigation: - settings - migrate redirects: -- /purl/upm/project-manage/ - /biganimal/latest/administering_cluster/projects/ --- diff --git a/advocacy_docs/edb-postgres-ai/console/using/projects/users.mdx b/advocacy_docs/edb-postgres-ai/console/using/projects/users.mdx index 1cc11369198..ac377d83a4b 100644 --- a/advocacy_docs/edb-postgres-ai/console/using/projects/users.mdx +++ b/advocacy_docs/edb-postgres-ai/console/using/projects/users.mdx @@ -3,6 +3,8 @@ title: Managing project users navTitle: Users description: Add users to projects and assign roles to control access to projects deepToC: true +redirects: +- /purl/upm/project-manage/ --- The **Users** page displays all the users in the organisation in a table. Each users full name, email, project roles, identity provider, and on the right hand side, a pen icon. Selecting the pen icon on a user allows you to assign or remove roles from that user. diff --git a/advocacy_docs/edb-postgres-ai/index.mdx b/advocacy_docs/edb-postgres-ai/index.mdx index 4ee4c49ef4e..5b4ab79fffe 100644 --- a/advocacy_docs/edb-postgres-ai/index.mdx +++ b/advocacy_docs/edb-postgres-ai/index.mdx @@ -22,3 +22,6 @@ navigation: The home of all EDB Postgres® AI documentation. +!!!note "Deprecation" + The EDB Hosted Cloud Service has been deprecated. Support is available for current customers. However, the related documentation topics will be removed shortly. Further updates will be provided as the removal progresses. + diff --git a/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/config_reader.mdx b/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/config_reader.mdx index db3a3149fa0..611f32c0f97 100644 --- a/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/config_reader.mdx +++ b/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/config_reader.mdx @@ -21,51 +21,70 @@ redirects: 1. Open the EDB DMS reader located in `/opt/cdcreader/run-cdcreader.sh` and ensure you have write permissions. -1. Set the variables according to your environment and uncomment the edited lines. See [parameters](#parameters) for further guidance. +1. Set the variables according to your environment and uncomment the edited lines. See [parameters](#parameters) for further guidance. The script is reproduced below. ```shell -### set the following environment variables: +#!/bin/bash -e +# run_cdcreader.sh +# +# This script provides a convenient place to specify +# environment variables used to configure the +# EDB Data Migration Service Reader. +# +# After env exports, `java` is called to start the +# software. -############################################## -# Data Migration Service Cloud Configuration # -############################################## +########################################## +# DMS Reader General Configuration # +########################################## -# This ID is used to identify the cdcreader. +# This ID is used to identify DMS Reader +# and is specified by the user. #export DBZ_ID= -# Now we only support aws +# Supported options include: appliance (the hybrid PG AI platform), aws #export CLOUD_PROVIDER= -# No need to change about this field +# This is the DMS backend service used by the Reader +# If your CLOUD_PROVIDER is `appliance`, consult your system administrators +# The default value supports the `aws` CLOUD_PROVIDER #export RW_SERVICE_HOST=https://transporter-rw-service.biganimal.com -# You need to create migration credentials in EDB postgresAI platform and set these fields with the path of credential files -#export TLS_PRIVATE_KEY_PATH=$MY_CREDENTIALS_PATH/client-key.pem -#export TLS_CERTIFICATE_PATH=$MY_CREDENTIALS_PATH/client-cert.pem -#export TLS_CA_PATH=$MY_CREDENTIALS_PATH/int.crt -#export APICURIOREQUEST_CLIENT_KEYSTORE_LOCATION=$MY_CREDENTIALS_PATH/client.keystore.p12 -#export APICURIOREQUEST_TRUSTSTORE_LOCATION=$MY_CREDENTIALS_PATH/int.truststore.p12 -#export KAFKASECURITY_CLIENT_KEYSTORE_LOCATION=$MY_CREDENTIALS_PATH/client.keystore.p12 -#export KAFKASECURITY_TRUSTSTORE_LOCATION=$MY_CREDENTIALS_PATH/int.truststore.p12 +# You need to create migration credentials in EDB Postgres AI platform +# and set these fields with the path of the credential files +#export TLS_PRIVATE_KEY_PATH=$HOME/credentials/client-key.pem +#export TLS_CERTIFICATE_PATH=$HOME/credentials/client-cert.pem +#export TLS_CA_PATH=$HOME/credentials/int.crt +#export APICURIOREQUEST_CLIENT_KEYSTORE_LOCATION=$HOME/credentials/client.keystore.p12 +#export APICURIOREQUEST_TRUSTSTORE_LOCATION=$HOME/credentials/int.truststore.p12 +#export KAFKASECURITY_CLIENT_KEYSTORE_LOCATION=$HOME/credentials/client.keystore.p12 +#export KAFKASECURITY_TRUSTSTORE_LOCATION=$HOME/credentials/int.truststore.p12 -################################################## -# Data Migration Service Source DB Configuration # -################################################## +########################################## +# DMS Reader Source DB Configuration # +########################################## # A sample configuration to create a single postgres database connection: #export DBZ_DATABASES_0__TYPE=POSTGRES #export DBZ_DATABASES_0__HOSTNAME=localhost #export DBZ_DATABASES_0__PORT=5432 +# The CATALOG is the database name #export DBZ_DATABASES_0__CATALOG=source #export DBZ_DATABASES_0__USERNAME=postgres +# The password env can be set without specifing it here +# but the env structure looks like this #export DBZ_DATABASES_0__PASSWORD=password -# You can increase the index to config more database for the reader +# You can increase the index to configure more than +# one database for the DMS Reader #export DBZ_DATABASES_1__TYPE=ORACLE #export DBZ_DATABASES_1__HOSTNAME=localhost #export DBZ_DATABASES_1__PORT=1521 +# The CATALOG is the database name #export DBZ_DATABASES_1__CATALOG=ORCLCDB/ORCLPDB1 #export DBZ_DATABASES_1__USERNAME=oracle +# The password env can be set without specifing it here +# but the env structure looks like this #export DBZ_DATABASES_1__PASSWORD=password ########################################## @@ -77,6 +96,9 @@ redirects: #export QUARKUS_LOG_LEVEL=DEBUG # Loglevel for a single package #export QUARKUS_LOG_CATEGORY__COM_ENTERPRISEDB__LEVEL=DEBUG + +cd $(dirname $0) +java ${JAVA_OPTS} -jar quarkus-run.jar ``` ## Parameters diff --git a/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/installing/index.mdx b/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/installing/index.mdx index 26a099eaa35..717a84e2bfa 100644 --- a/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/installing/index.mdx +++ b/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/installing/index.mdx @@ -30,3 +30,11 @@ Select a link to access the applicable installation instructions: - [Ubuntu 22.04](linux_x86_64/edb-dms-reader_ubuntu_22), [Ubuntu 20.04](linux_x86_64/edb-dms-reader_ubuntu_20) - [Debian 12](linux_x86_64/edb-dms-reader_debian_12), [Debian 11](linux_x86_64/edb-dms-reader_debian_11) + +## Linux [AArch64 (ARM64)](linux_arm64) + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/edb-dms-reader_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/edb-dms-reader_rhel_9) diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_ubuntu_20.mdx b/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/installing/linux_arm64/edb-dms-reader_rhel_9.mdx similarity index 58% rename from advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_ubuntu_20.mdx rename to advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/installing/linux_arm64/edb-dms-reader_rhel_9.mdx index e742ba3fbcd..a576829d9e1 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_ubuntu_20.mdx +++ b/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/installing/linux_arm64/edb-dms-reader_rhel_9.mdx @@ -1,6 +1,6 @@ --- -navTitle: Ubuntu 20.04 -title: Installing PostgreSQL on Ubuntu 20.04 x86_64 +navTitle: RHEL 9 or OL 9 +title: Installing EDB Data Migration Service Reader on RHEL 9 or OL 9 arm64 --- ## Prerequisites @@ -9,15 +9,11 @@ Before you begin the installation process: - Set up the EDB repository. - !!! Note - Rather than use the EDB repository, you can obtain PostgreSQL installers and installation packages from the [PostgreSQL community downloads page](https://www.postgresql.org/download/). - !!! - Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. To determine if your repository exists, enter this command: - `apt-cache search enterprisedb` + `dnf repolist | grep enterprisedb` If no output is generated, the repository isn't installed. @@ -33,8 +29,8 @@ Before you begin the installation process: ## Install the package +Install the EDB DMS Reader (packaged as `cdcreader`): + ```shell -sudo apt-get -y install postgresql- +sudo dnf install cdcreader ``` - -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql-16`. diff --git a/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/installing/linux_arm64/index.mdx b/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/installing/linux_arm64/index.mdx new file mode 100644 index 00000000000..2c54c64a4e5 --- /dev/null +++ b/advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/installing/linux_arm64/index.mdx @@ -0,0 +1,16 @@ +--- +title: "Installing EDB Data Migration Service Reader on Linux AArch64 (ARM64)" +navTitle: "On Linux ARM64" +indexCards: none + +navigation: + - edb-dms-reader_rhel_9 +--- + +Operating system-specific install instructions are described in the corresponding documentation: + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](edb-dms-reader_rhel_9) + +- [Oracle Linux (OL) 9](edb-dms-reader_rhel_9) diff --git a/advocacy_docs/partner_docs/HashicorpVault/04-ConfiguringHashicorpVault.mdx b/advocacy_docs/partner_docs/HashicorpVault/04-ConfiguringHashicorpVault.mdx index 88222b36acf..fc1848ef4be 100644 --- a/advocacy_docs/partner_docs/HashicorpVault/04-ConfiguringHashicorpVault.mdx +++ b/advocacy_docs/partner_docs/HashicorpVault/04-ConfiguringHashicorpVault.mdx @@ -36,7 +36,7 @@ If you run a check and find that your system doesn't have Python installed, you ## Install Pykmip Once your EDB Repository is installed on your server, you can then install the PyKMIP utility. -- As root user, issue the `install python3-pykmip` command. This example uses a RHEL8 server, so the command is `dnf install python3-pymkip`. +- As root user, issue the `install python3-pykmip` command. This example uses a RHEL8 server, so the command is `dnf install python3-pykmip`. The output looks something like: @@ -230,4 +230,4 @@ IgIhAMb3y3xRXwddt2ejaow1GytysRz4LoxC3B5dLn1LoCpI -----END CERTIFICATE----- ``` -Once you have all of the required certificates, you're ready to use the Hashicorp Vault secrets engine with your EDB Postgres distribution with TDE. \ No newline at end of file +Once you have all of the required certificates, you're ready to use the Hashicorp Vault secrets engine with your EDB Postgres distribution and TDE. diff --git a/advocacy_docs/pg_extensions/pg_failover_slots/rel_notes/index.mdx b/advocacy_docs/pg_extensions/pg_failover_slots/rel_notes/index.mdx index 4d6373b08ca..4c7f7239c36 100644 --- a/advocacy_docs/pg_extensions/pg_failover_slots/rel_notes/index.mdx +++ b/advocacy_docs/pg_extensions/pg_failover_slots/rel_notes/index.mdx @@ -2,6 +2,9 @@ title: PG Failover Slots release notes navTitle: "Release notes" indexCards: none +navigation: +- pg_failover_slots_1.1.0_rel_notes +- pg_failover_slots_1.0.0_rel_notes --- The PG Failover Slots documentation describes the latest version of PG Failover Slots, including minor releases and patches. These release notes @@ -11,8 +14,8 @@ about the release that introduced the feature. | Version | Release Date | | --------------------------- | ------------ | +| [1.1.0](pg_failover_slots_1.1.0_rel_notes) | 27 Aug 2024 | | [1.0.0](pg_failover_slots_1.0.0_rel_notes) | 31 Mar 2023 | - diff --git a/advocacy_docs/pg_extensions/pg_failover_slots/rel_notes/pg_failover_slots_1.1.0_rel_notes.mdx b/advocacy_docs/pg_extensions/pg_failover_slots/rel_notes/pg_failover_slots_1.1.0_rel_notes.mdx new file mode 100644 index 00000000000..0540d0bc280 --- /dev/null +++ b/advocacy_docs/pg_extensions/pg_failover_slots/rel_notes/pg_failover_slots_1.1.0_rel_notes.mdx @@ -0,0 +1,13 @@ +--- +title: Release notes for PG Failover Slots version 1.1.0 +navTitle: "Version 1.1.0" +--- + +This release of PG Failover Slots includes: + +| Type | Description | +| ------- | --------------------------------------- | +| Feature | Add support for PostgreSQL 17. | +| Bug fix | Do not drop physical slots on standby. Previously, physical replication slots were dropped on the standby if they didn't exist on the primary. This fix ensures only logical replication slots are acted upon. +| Enhancement | New configuration setting: `pg_failover_slots.maintenance_db`. This value was previously hardcoded. | +| Enhancement | New configuration setting: `pg_failover_slots.worker_nap_time`. This value was previously hardcoded. | \ No newline at end of file diff --git a/advocacy_docs/pg_extensions/spl_check/using/index.mdx b/advocacy_docs/pg_extensions/spl_check/using/index.mdx index 5997bd6941a..9f3c407ae7e 100644 --- a/advocacy_docs/pg_extensions/spl_check/using/index.mdx +++ b/advocacy_docs/pg_extensions/spl_check/using/index.mdx @@ -138,7 +138,7 @@ $$ LANGUAGE edbspl; ## Checking all your code -Use the `spl_check_function()` to check all of your functions/procedures and to check all your triggers. +Use `spl_check_function()` to check all of your functions/procedures and to check all your triggers. By default `spl_check_function()` checks the validity of parameters and return types for a specified function. However, a GUC allows you to use `spl_check_function()` to [validate the function calls](#validating-function-calls) of all functions called by that function (except system functions and system package functions). To check all nontrigger EDB SPL Check functions: @@ -186,6 +186,15 @@ OFFSET 0 ) ss ORDER BY (pcf).functionid::regprocedure::text, (pcf).lineno; ``` +## Validating function calls + +When the GUC `spl_check.validate_function_calls` is set to true, `spl_check` validates a function and any function calls encountered while validating the initial function. + +For example, if function `f1()` calls function `f2()` and `spl_check.validate_function_calls` is set to true, `spl_check` validates function `f1()` and function `f2()`. If `spl_check.validate_function_calls` is set to false, `spl_check` validates only function `f1()` and checks the parameters and return type of `f2()`. + +Use the `SET` command to set the value of `spl_check.validate_function_calls`. By default `spl_check.validate_function_calls` is set to false. + +`spl_check` doesn't check any functions with an OID less than `FirstNormalObjectId`, that is, functions defined in the `sys` or `pg_catalog` schema. They are assumed to be system functions. Also, `spl_check` skips system packages and functions written in languages other than EDB SPL. ## Limitations diff --git a/advocacy_docs/pg_extensions/wait_states/configuring.mdx b/advocacy_docs/pg_extensions/wait_states/configuring.mdx new file mode 100644 index 00000000000..fbd5b58031c --- /dev/null +++ b/advocacy_docs/pg_extensions/wait_states/configuring.mdx @@ -0,0 +1,15 @@ +--- +title: Configuring EDB Wait States +navTitle: Configuring +--- + +## Parameters + +The following parameters in `postgresql.conf` control the EDB Wait States extension behavior. If you modify these parameters, reload or restart Postgres to apply the changes. + +| Parameter | Description | Default | Reload or Restart | | +|---------------------------------|-------------|---------|----------------------| +| `edb_wait_states.sampling_interval`| The interval between two EDB Wait States sampling cycles.| 1 second | Reload| +| `edb_wait_states.retention_period`| Deletes EDB Wait States log files after a defined retention period. | 604800 seconds (7 days)| Reload| | +| `edb_wait_states.enable_collection`| Enable or disable EDB Wait States data collection. | true | Reload| | +| `edb_wait_states.directory` | Stores the EDB Wait States logs in this directory. The path must be a full, absolute path. It can't be a relative path. | $PGDATA/edb_wait_states |Restart| diff --git a/advocacy_docs/pg_extensions/wait_states/index.mdx b/advocacy_docs/pg_extensions/wait_states/index.mdx index 21e78717259..acbcfcf8f1d 100644 --- a/advocacy_docs/pg_extensions/wait_states/index.mdx +++ b/advocacy_docs/pg_extensions/wait_states/index.mdx @@ -6,6 +6,7 @@ directoryDefaults: navigation: - rel_notes - installing + - configuring - using --- diff --git a/advocacy_docs/repos/migration/index.mdx b/advocacy_docs/repos/migration/index.mdx index 3eda6f4229e..16214450e86 100644 --- a/advocacy_docs/repos/migration/index.mdx +++ b/advocacy_docs/repos/migration/index.mdx @@ -6,7 +6,7 @@ deepToC: true --- !!! Warning Repos 1.0 sunset -The Repos 1.0 repositories are due to be sunset by the end of October 2024. After that, you will no longer have access to them. We recommend that you migrate to the Repos 2.0 repositories as soon as possible. +The Repos 1.0 repositories are due to be sunset by the end of 2024. After that, you will no longer have access to them. We recommend that you migrate to the Repos 2.0 repositories as soon as possible. !!! We recommend that you perform the two steps to this process in order. The first step is to remove the old Repos 1.0 repositories from your system. The second step is to add the new Repos 2.0 repositories to your system. diff --git a/advocacy_docs/security/assessments/cve-2020-10531.mdx b/advocacy_docs/security/assessments/cve-2020-10531.mdx new file mode 100644 index 00000000000..48c38f5c116 --- /dev/null +++ b/advocacy_docs/security/assessments/cve-2020-10531.mdx @@ -0,0 +1,82 @@ +--- +title: CVE-2020-10531 - Integer overflow in ICU doAppend() +navTitle: CVE-2020-10531 +affectedProducts: All versions of EDB Postgres Advanced Server from 13 through 16 +--- + +First Published: 2024/11/14 + +Last Updated: 2024/11/14 + +## Important + +This is an assessment of the impact of CVE-2020-10531 on EDB products and services. It links to and details the CVE and supplements that information with EDB's own assessment. + +## Summary + +The original vulnerability was an integer overflow leading to a heap-based buffer overflow in `UnicodeString::doAppend()` in ICU (International Components for Unicode) for C/C++ which existed up to (and including) version 66.1. + +In the process of validating EDB Postgres Advanced Server 17, a release check identified the presence of a pre 67.1 version of the library being used. + +It had been believed that existing versions of EPAS had been built with a later library version. + +While most EDB software builds against the operating system provided version of ICU, EDB Postgres Advanced Server can include EDB’s own build of ICU 66.1, specifically on RHEL7 on x86\_64/PPCLE, RHEL8 on x86\_64/PPCLE, SLES 12 x86\_64/PPCLE/s390x, and SLES 15 x86\_64/PPCLE/s390x. On RHEL9, the OS supplied library (version 67.1) is used. + +EDB Postgres Extended and Postgres use the OS supplied ICU libraries, and are not affected by this vulnerability. + +It has been established that EDB’s code does not use the vulnerable function. +Assessment and mitigation: In the interests of ensuring that the vulnerable library doesn’t provide any surface for an attacker, we are releasing updated versions of all affected products. + +## Vulnerability Details + +CVE-ID: [CVE-2020-10531](https://nvd.nist.gov/vuln/detail/CVE-2020-10531) + +CVSS Base Score: 8.8 + +CVSS Temporal Score: Undefined + +CVSS Environmental Score: Undefined + +CVSS Vector: CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:H + +## Affected Products and Versions + +### EnterpriseDB Postgres Advanced Server + +On RHEL 7/8 (x86_64/PPCLE) and SLES 12/15 (x86_64/PPCLE/s390x) + +* All versions of EPAS prior to 16.5.0 +* All versions of EPAS prior to 15.9.0 +* All versions of EPAS prior to 14.14.0 +* All versions of EPAS prior to 13.17.0 + +## Remediation + +### EDB Postgres Advanced Server Version Information + +| Product | VRMF | Remediation/First Fix | +|:--------|:--------|:----------------------| +| EPAS 16 | 16.4.0 | Upgrade to 16.5.0 | +| EPAS 15 | 15.8.0 | Upgrade to 15.9.0 | +| EPAS 14 | 14.13.0 | Upgrade to 14.14.0 | +| EPAS 13 | 13.16.0 | Upgrade to 13.17.0 | + +If you are unable to upgrade your EPAS installation, upgrade the installed edb-icu + +## Reference + +* [CVSS #3.1 Calculator](https://www.first.org/cvss/calculator/3.1) + +## Related Information + +* [EnterpriseDB](https://www.enterprisedb.com/) +* [Postgresql](https://www.postgresql.org/) +* [EDB Postgres Advanced Server](https://www.enterprisedb.com/products/edb-postgres-advanced-server) + +## Change History + +14 November 2024: Original Copy Published + +## Disclaimer + +This document is provided on an "as is" basis and does not imply any kind of guarantee or warranty, including the warranties of merchantability or fitness for a particular use. Your use of the information on the document is at your own risk. EDB reserves the right to change or update this document at any time. Customers are therefore recommended to always view the latest version of this document. diff --git a/advocacy_docs/security/assessments/index.mdx b/advocacy_docs/security/assessments/index.mdx index b1339bdf46e..02810f44ac1 100644 --- a/advocacy_docs/security/assessments/index.mdx +++ b/advocacy_docs/security/assessments/index.mdx @@ -6,10 +6,11 @@ iconName: Security hideKBLink: true hideToC: false navigation: -- cve-2024-7348 -- cve-2024-4317 -- cve-2024-1597 +- cve-2020-10531 - cve-2024-0985 +- cve-2024-1597 +- cve-2024-4317 +- cve-2024-7348 --- The CVEs listed in this section are from PostgreSQL and other parties who have reported them and that may have an impact on EDB products. @@ -28,18 +29,18 @@ The CVEs listed in this section are from PostgreSQL and other parties who have r -

CVE-2024-7348

+

CVE-2020-10531

-  Read Assessment -  Updated: 2024/08/15 -

PostgreSQL relation replacement during pg_dump executes arbitrary SQL

-
All versions of PostgreSQL, EPAS and PGE prior to 16.4, 15.8, and 14.13
+  Read Assessment +  Updated: 2024/11/14 +

Integer overflow in ICU doAppend()

+
All versions of EDB Postgres Advanced Server from 13 through 16

Summary:  -Time-of-check Time-of-use (TOCTOU) race condition in pg_dump in PostgreSQL allows an object creator to execute arbitrary SQL functions as the user running pg_dump, which is often a superuser. The attack involves replacing another relation type with a view or foreign table. The attack requires waiting for pg_dump to start, but winning the race condition is trivial if the attacker retains an open transaction. Versions before PostgreSQL 16.4, 15.8, 14.13, 13.16, and 12.20 are affected. +The original vulnerability was an integer overflow leading to a heap-based buffer overflow in UnicodeString::doAppend() in ICU (International Components for Unicode) for C/C++ which existed up to (and including) version 66.1.
-Read More... +Read More...
@@ -47,18 +48,18 @@ Time-of-check Time-of-use (TOCTOU) race condition in pg_dump in PostgreSQL allow -

CVE-2024-4317

+

CVE-2024-0985

-  Read Assessment -  Updated: 2024/05/09 -

Restrict visibility of "pg_stats_ext" and "pg_stats_ext_exprs" entries to the table owner

-
All versions of PostgreSQL, EPAS and PGE prior to 16.3, 15.7, and 14.12
+  Read Assessment +  Updated: 2024/02/26 +

PostgreSQL non-owner REFRESH MATERIALIZED VIEW CONCURRENTLY executes arbitrary SQL

+
PostgreSQL, EPAS all versions prior to 15.6.0,14.11.0,13.14.20 and 12.18.23, PGE all versions prior to 15.6.0

Summary:  -Missing authorization in PostgreSQL built-in views pg_stats_ext and pg_stats_ext_exprs allows an unprivileged database user to read most common values and other statistics from CREATE STATISTICS commands of other users. The most common values may reveal column values the eavesdropper could not otherwise read or results of functions they cannot execute. Installing an unaffected version only fixes fresh PostgreSQL installations, namely those that are created with the initdb utility after installing that version. Current PostgreSQL installations will remain vulnerable until they follow the instructions in the release notes, which are provided as a convenience in the below section. Within major versions 14-16, minor versions before PostgreSQL 16.3, 15.7, and 14.12 are affected. Versions before PostgreSQL 14 are unaffected. +Late privilege drop in REFRESH MATERIALIZED VIEW CONCURRENTLY in PostgreSQL allows an object creator to execute arbitrary SQL functions as the command issuer. The command intends to run SQL functions as the owner of the materialized view, enabling safe refresh of untrusted materialized views. The victim is a superuser or member of one of the attacker's roles. The attack requires luring the victim into running REFRESH MATERIALIZED VIEW CONCURRENTLY on the attacker's materialized view. As part of exploiting this vulnerability, the attacker creates functions that use CREATE RULE to convert the internally-built temporary table to a view. Versions before PostgreSQL 15.6, 14.11, 13.14, and 12.18 are affected. The only known exploit does not work in PostgreSQL 16 and later. For defense in depth, PostgreSQL 16.2 adds the protections that older branches are using to fix their vulnerability.
-Read More... +Read More...
@@ -85,18 +86,37 @@ pgjdbc, the PostgreSQL JDBC Driver, allows attacker to inject SQL if using Prefe -

CVE-2024-0985

+

CVE-2024-4317

-  Read Assessment -  Updated: 2024/02/26 -

PostgreSQL non-owner REFRESH MATERIALIZED VIEW CONCURRENTLY executes arbitrary SQL

-
PostgreSQL, EPAS all versions prior to 15.6.0,14.11.0,13.14.20 and 12.18.23, PGE all versions prior to 15.6.0
+  Read Assessment +  Updated: 2024/05/09 +

Restrict visibility of "pg_stats_ext" and "pg_stats_ext_exprs" entries to the table owner

+
All versions of PostgreSQL, EPAS and PGE prior to 16.3, 15.7, and 14.12

Summary:  -Late privilege drop in REFRESH MATERIALIZED VIEW CONCURRENTLY in PostgreSQL allows an object creator to execute arbitrary SQL functions as the command issuer. The command intends to run SQL functions as the owner of the materialized view, enabling safe refresh of untrusted materialized views. The victim is a superuser or member of one of the attacker's roles. The attack requires luring the victim into running REFRESH MATERIALIZED VIEW CONCURRENTLY on the attacker's materialized view. As part of exploiting this vulnerability, the attacker creates functions that use CREATE RULE to convert the internally-built temporary table to a view. Versions before PostgreSQL 15.6, 14.11, 13.14, and 12.18 are affected. The only known exploit does not work in PostgreSQL 16 and later. For defense in depth, PostgreSQL 16.2 adds the protections that older branches are using to fix their vulnerability. +Missing authorization in PostgreSQL built-in views pg_stats_ext and pg_stats_ext_exprs allows an unprivileged database user to read most common values and other statistics from CREATE STATISTICS commands of other users. The most common values may reveal column values the eavesdropper could not otherwise read or results of functions they cannot execute. Installing an unaffected version only fixes fresh PostgreSQL installations, namely those that are created with the initdb utility after installing that version. Current PostgreSQL installations will remain vulnerable until they follow the instructions in the release notes, which are provided as a convenience in the below section. Within major versions 14-16, minor versions before PostgreSQL 16.3, 15.7, and 14.12 are affected. Versions before PostgreSQL 14 are unaffected.
-Read More... +Read More... +
+ + + + + + +

CVE-2024-7348

+ +  Read Assessment +  Updated: 2024/08/15 +

PostgreSQL relation replacement during pg_dump executes arbitrary SQL

+
All versions of PostgreSQL, EPAS and PGE prior to 16.4, 15.8, and 14.13
+
+
+Summary:  +Time-of-check Time-of-use (TOCTOU) race condition in pg_dump in PostgreSQL allows an object creator to execute arbitrary SQL functions as the user running pg_dump, which is often a superuser. The attack involves replacing another relation type with a view or foreign table. The attack requires waiting for pg_dump to start, but winning the race condition is trivial if the attacker retains an open transaction. Versions before PostgreSQL 16.4, 15.8, 14.13, 13.16, and 12.20 are affected. +
+Read More...
diff --git a/advocacy_docs/security/index.mdx b/advocacy_docs/security/index.mdx index 003cb05203d..1fd3d0f0954 100644 --- a/advocacy_docs/security/index.mdx +++ b/advocacy_docs/security/index.mdx @@ -55,34 +55,34 @@ All versions of EnterpriseDB Postgres Advanced Server (EPAS) from 15.0 and prior -

CVE-2024-7348

+

CVE-2020-10531

-  Read Assessment -  Updated: 2024/08/15 -

PostgreSQL relation replacement during pg_dump executes arbitrary SQL

-
All versions of PostgreSQL, EPAS and PGE prior to 16.4, 15.8, and 14.13
+  Read Assessment +  Updated: 2024/11/14 +

Integer overflow in ICU doAppend()

+
All versions of EDB Postgres Advanced Server from 13 through 16

Summary:  -Time-of-check Time-of-use (TOCTOU) race condition in pg_dump in PostgreSQL allows an object creator to execute arbitrary SQL functions as the user running pg_dump, which is often a superuser. The attack involves replacing another relation type with a view or foreign table. The attack requires waiting for pg_dump to start, but winning the race condition is trivial if the attacker retains an open transaction. Versions before PostgreSQL 16.4, 15.8, 14.13, 13.16, and 12.20 are affected. +The original vulnerability was an integer overflow leading to a heap-based buffer overflow in UnicodeString::doAppend() in ICU (International Components for Unicode) for C/C++ which existed up to (and including) version 66.1.
-Read More... +Read More...
-

CVE-2024-4317

+

CVE-2024-0985

-  Read Assessment -  Updated: 2024/05/09 -

Restrict visibility of "pg_stats_ext" and "pg_stats_ext_exprs" entries to the table owner

-
All versions of PostgreSQL, EPAS and PGE prior to 16.3, 15.7, and 14.12
+  Read Assessment +  Updated: 2024/02/26 +

PostgreSQL non-owner REFRESH MATERIALIZED VIEW CONCURRENTLY executes arbitrary SQL

+
PostgreSQL, EPAS all versions prior to 15.6.0,14.11.0,13.14.20 and 12.18.23, PGE all versions prior to 15.6.0

Summary:  -Missing authorization in PostgreSQL built-in views pg_stats_ext and pg_stats_ext_exprs allows an unprivileged database user to read most common values and other statistics from CREATE STATISTICS commands of other users. The most common values may reveal column values the eavesdropper could not otherwise read or results of functions they cannot execute. Installing an unaffected version only fixes fresh PostgreSQL installations, namely those that are created with the initdb utility after installing that version. Current PostgreSQL installations will remain vulnerable until they follow the instructions in the release notes, which are provided as a convenience in the below section. Within major versions 14-16, minor versions before PostgreSQL 16.3, 15.7, and 14.12 are affected. Versions before PostgreSQL 14 are unaffected. +Late privilege drop in REFRESH MATERIALIZED VIEW CONCURRENTLY in PostgreSQL allows an object creator to execute arbitrary SQL functions as the command issuer. The command intends to run SQL functions as the owner of the materialized view, enabling safe refresh of untrusted materialized views. The victim is a superuser or member of one of the attacker's roles. The attack requires luring the victim into running REFRESH MATERIALIZED VIEW CONCURRENTLY on the attacker's materialized view. As part of exploiting this vulnerability, the attacker creates functions that use CREATE RULE to convert the internally-built temporary table to a view. Versions before PostgreSQL 15.6, 14.11, 13.14, and 12.18 are affected. The only known exploit does not work in PostgreSQL 16 and later. For defense in depth, PostgreSQL 16.2 adds the protections that older branches are using to fix their vulnerability.
-Read More... +Read More...
@@ -103,18 +103,34 @@ pgjdbc, the PostgreSQL JDBC Driver, allows attacker to inject SQL if using Prefe -

CVE-2024-0985

+

CVE-2024-4317

-  Read Assessment -  Updated: 2024/02/26 -

PostgreSQL non-owner REFRESH MATERIALIZED VIEW CONCURRENTLY executes arbitrary SQL

-
PostgreSQL, EPAS all versions prior to 15.6.0,14.11.0,13.14.20 and 12.18.23, PGE all versions prior to 15.6.0
+  Read Assessment +  Updated: 2024/05/09 +

Restrict visibility of "pg_stats_ext" and "pg_stats_ext_exprs" entries to the table owner

+
All versions of PostgreSQL, EPAS and PGE prior to 16.3, 15.7, and 14.12

Summary:  -Late privilege drop in REFRESH MATERIALIZED VIEW CONCURRENTLY in PostgreSQL allows an object creator to execute arbitrary SQL functions as the command issuer. The command intends to run SQL functions as the owner of the materialized view, enabling safe refresh of untrusted materialized views. The victim is a superuser or member of one of the attacker's roles. The attack requires luring the victim into running REFRESH MATERIALIZED VIEW CONCURRENTLY on the attacker's materialized view. As part of exploiting this vulnerability, the attacker creates functions that use CREATE RULE to convert the internally-built temporary table to a view. Versions before PostgreSQL 15.6, 14.11, 13.14, and 12.18 are affected. The only known exploit does not work in PostgreSQL 16 and later. For defense in depth, PostgreSQL 16.2 adds the protections that older branches are using to fix their vulnerability. +Missing authorization in PostgreSQL built-in views pg_stats_ext and pg_stats_ext_exprs allows an unprivileged database user to read most common values and other statistics from CREATE STATISTICS commands of other users. The most common values may reveal column values the eavesdropper could not otherwise read or results of functions they cannot execute. Installing an unaffected version only fixes fresh PostgreSQL installations, namely those that are created with the initdb utility after installing that version. Current PostgreSQL installations will remain vulnerable until they follow the instructions in the release notes, which are provided as a convenience in the below section. Within major versions 14-16, minor versions before PostgreSQL 16.3, 15.7, and 14.12 are affected. Versions before PostgreSQL 14 are unaffected.
-Read More... +Read More... +
+ + + +

CVE-2024-7348

+ +  Read Assessment +  Updated: 2024/08/15 +

PostgreSQL relation replacement during pg_dump executes arbitrary SQL

+
All versions of PostgreSQL, EPAS and PGE prior to 16.4, 15.8, and 14.13
+
+
+Summary:  +Time-of-check Time-of-use (TOCTOU) race condition in pg_dump in PostgreSQL allows an object creator to execute arbitrary SQL functions as the user running pg_dump, which is often a superuser. The attack involves replacing another relation type with a view or foreign table. The attack requires waiting for pg_dump to start, but winning the race condition is trivial if the attacker retains an open transaction. Versions before PostgreSQL 16.4, 15.8, 14.13, 13.16, and 12.20 are affected. +
+Read More...
\ No newline at end of file diff --git a/advocacy_docs/supported-open-source/postgresql/installing/index.mdx b/advocacy_docs/supported-open-source/postgresql/installing/index.mdx index f9c41467155..37d9eacbb08 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/index.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/index.mdx @@ -41,7 +41,7 @@ Select a link to access the applicable installation instructions: ### Debian and derivatives -- [Ubuntu 22.04](linux_x86_64/postgresql_ubuntu_22), [Ubuntu 20.04](linux_x86_64/postgresql_ubuntu_20) +- [Ubuntu 22.04](linux_x86_64/postgresql_ubuntu_22) - [Debian 12](linux_x86_64/postgresql_debian_12), [Debian 11](linux_x86_64/postgresql_debian_11) @@ -57,6 +57,12 @@ Select a link to access the applicable installation instructions: ## Linux [AArch64 (ARM64)](linux_arm64) +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/postgresql_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/postgresql_rhel_9) + ### Debian and derivatives - [Debian 12](linux_arm64/postgresql_debian_12) diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_arm64/index.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_arm64/index.mdx index a5b44702762..0b8f1e9e273 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_arm64/index.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_arm64/index.mdx @@ -4,11 +4,18 @@ navTitle: "On Linux ARM64" indexCards: none navigation: + - postgresql_rhel_9 - postgresql_debian_12 --- Operating system-specific install instructions are described in the corresponding documentation: +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](postgresql_rhel_9) + +- [Oracle Linux (OL) 9](postgresql_rhel_9) + ### Debian and derivatives - [Debian 12](postgresql_debian_12) diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_arm64/postgresql_debian_12.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_arm64/postgresql_debian_12.mdx index 26187dbef09..dfb3a38b00c 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_arm64/postgresql_debian_12.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_arm64/postgresql_debian_12.mdx @@ -37,4 +37,4 @@ Before you begin the installation process: sudo apt-get -y install postgresql- ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql-16`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql-17`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_arm64/postgresql_rhel_9.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_arm64/postgresql_rhel_9.mdx new file mode 100644 index 00000000000..55b89740a2a --- /dev/null +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_arm64/postgresql_rhel_9.mdx @@ -0,0 +1,55 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing PostgreSQL on RHEL 9 or OL 9 arm64 +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + !!! Note + Rather than use the EDB repository, you can obtain PostgreSQL installers and installation packages from the [PostgreSQL community downloads page](https://www.postgresql.org/download/). + !!! + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +- Enable additional repositories to resolve dependencies: + ```shell + ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-9-${ARCH}-rpms" + ``` +- Disable the built-in PostgreSQL module: + ```shell + dnf -qy module disable postgresql + ``` + +## Install the package + +```shell +sudo dnf -y install postgresql-server postgresql-contrib +``` + +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql17-server postgresql17-contrib`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_rhel_8.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_rhel_8.mdx index 4103e5c54da..bbc29c34980 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_rhel_8.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_rhel_8.mdx @@ -54,4 +54,4 @@ Before you begin the installation process: sudo dnf -y install postgresql-server postgresql-contrib ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql16-server postgresql16-contrib`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql17-server postgresql17-contrib`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_rhel_9.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_rhel_9.mdx index 842d74bb296..c33af01fcd8 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_rhel_9.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_rhel_9.mdx @@ -54,4 +54,4 @@ Before you begin the installation process: sudo dnf -y install postgresql-server postgresql-contrib ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql16-server postgresql16-contrib`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql17-server postgresql17-contrib`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_sles_15.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_sles_15.mdx index a31953cafa4..aef206de738 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_sles_15.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_ppc64le/postgresql_sles_15.mdx @@ -47,4 +47,4 @@ Before you begin the installation process: sudo zypper -n install postgresql-server ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql16-server`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql17-server`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/index.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/index.mdx index ed79f869eb4..00942f6a71f 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/index.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/index.mdx @@ -10,7 +10,6 @@ navigation: - postgresql_other_linux_8 - postgresql_sles_15 - postgresql_ubuntu_22 - - postgresql_ubuntu_20 - postgresql_debian_12 - postgresql_debian_11 --- @@ -43,8 +42,6 @@ Operating system-specific install instructions are described in the correspondin - [Ubuntu 22.04](postgresql_ubuntu_22) -- [Ubuntu 20.04](postgresql_ubuntu_20) - - [Debian 12](postgresql_debian_12) - [Debian 11](postgresql_debian_11) diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_debian_11.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_debian_11.mdx index a5b22f8e183..0a1eec48c7d 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_debian_11.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_debian_11.mdx @@ -37,4 +37,4 @@ Before you begin the installation process: sudo apt-get -y install postgresql- ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql-16`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql-17`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_debian_12.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_debian_12.mdx index 11ee583726e..0a8b97b683b 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_debian_12.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_debian_12.mdx @@ -37,4 +37,4 @@ Before you begin the installation process: sudo apt-get -y install postgresql- ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql-16`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql-17`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_other_linux_8.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_other_linux_8.mdx index 7c05941a38e..3b7e24ed004 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_other_linux_8.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_other_linux_8.mdx @@ -54,4 +54,4 @@ Before you begin the installation process: sudo dnf -y install postgresql-server postgresql-contrib ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql16-server postgresql16-contrib`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql17-server postgresql17-contrib`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_other_linux_9.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_other_linux_9.mdx index 92d4c6f4c52..ccdd36e0733 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_other_linux_9.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_other_linux_9.mdx @@ -54,4 +54,4 @@ Before you begin the installation process: sudo dnf -y install postgresql-server postgresql-contrib ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql16-server postgresql16-contrib`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql17-server postgresql17-contrib`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_rhel_8.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_rhel_8.mdx index fe2c8b6c87b..0e8b62b9fc9 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_rhel_8.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_rhel_8.mdx @@ -52,4 +52,4 @@ Before you begin the installation process: sudo dnf -y install postgresql-server postgresql-contrib ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql16-server postgresql16-contrib`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql17-server postgresql17-contrib`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_rhel_9.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_rhel_9.mdx index 85f8c1cf8d8..9a76d8ed03d 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_rhel_9.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_rhel_9.mdx @@ -52,4 +52,4 @@ Before you begin the installation process: sudo dnf -y install postgresql-server postgresql-contrib ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql16-server postgresql16-contrib`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql17-server postgresql17-contrib`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_sles_15.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_sles_15.mdx index 76fe2c2192d..413cf1cad96 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_sles_15.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_sles_15.mdx @@ -47,4 +47,4 @@ Before you begin the installation process: sudo zypper -n install postgresql-server ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql16-server`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql17-server`. diff --git a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_ubuntu_22.mdx b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_ubuntu_22.mdx index 25129c010d1..d2fd1bc0bac 100644 --- a/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_ubuntu_22.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installing/linux_x86_64/postgresql_ubuntu_22.mdx @@ -37,4 +37,4 @@ Before you begin the installation process: sudo apt-get -y install postgresql- ``` -Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 16, the package name would be `postgresql-16`. +Where `` is the version of PostgreSQL you are installing. For example, if you are installing version 17, the package name would be `postgresql-17`. diff --git a/docker/docker-compose.build-pdf.yaml b/docker/docker-compose.build-pdf.yaml index c44cf98122f..ba05aea1b93 100644 --- a/docker/docker-compose.build-pdf.yaml +++ b/docker/docker-compose.build-pdf.yaml @@ -1,5 +1,3 @@ -version: "3.8" - services: docs-pdf-builder: build: diff --git a/gatsby-browser.js b/gatsby-browser.js index feb9f696645..a91c72c31f3 100644 --- a/gatsby-browser.js +++ b/gatsby-browser.js @@ -19,3 +19,26 @@ function scrollToAnchor(location, mainNavHeight = 0) { return true; } + +exports.onInitialClientRender = () => { + // h/t https://stackoverflow.com/questions/19646684/force-open-the-details-summary-tag-for-print-in-chrome/75260733#75260733 + window.matchMedia("print").addEventListener("change", (evt) => { + if (evt.matches) { + let detailsElements = document.body.querySelectorAll( + "details:not([open])", + ); + for (let e of detailsElements) { + e.toggleAttribute("open", true); + e.dataset.wasclosed = ""; + } + } else { + let detailsElements = document.body.querySelectorAll( + "details[data-wasclosed]", + ); + for (let e of detailsElements) { + e.removeAttribute("open"); + delete e.dataset.wasclosed; + } + } + }); +}; diff --git a/gatsby-config.js b/gatsby-config.js index 3903e418d30..d2ce185befe 100644 --- a/gatsby-config.js +++ b/gatsby-config.js @@ -327,9 +327,12 @@ module.exports = { noInlineHighlight: true, aliases: { postgresql: "sql", + psql: "sql", sh: "shell", "c++": "cpp", console: "shell-session", + output: "none", + terminal: "none", }, }, }, diff --git a/gatsby-node.js b/gatsby-node.js index 2c8337ebd1c..d93cbae8452 100644 --- a/gatsby-node.js +++ b/gatsby-node.js @@ -635,6 +635,26 @@ exports.onPostBuild = async ({ graphql, reporter, pathPrefix }) => { // // additional headers // + await addHeaders(graphql, reporter, pathPrefix); + + // + // redirects cleanup + // + await rewriteRedirects(pathPrefix, reporter); +}; + +/** + * Adds content type headers for raw files + * @param {function} graphql + * @param {GatsbyReporter} reporter + * @param {string} pathPrefix + */ +async function addHeaders(graphql, reporter, pathPrefix) { + const contentHeaderTimer = reporter.activityTimer( + "adding content type headers", + ); + contentHeaderTimer.start(); + const publicFileData = await graphql(` query { allPublicFile { @@ -699,17 +719,38 @@ exports.onPostBuild = async ({ graphql, reporter, pathPrefix }) => { "public/_headers", (await readFile("public/_headers")) + "\n" + newHeaders.join("\n"), ); + contentHeaderTimer.end(); +} + +/** + * Rewrites generated headers: + * - fix up unnecessary path prefix for legacy redirects + * - add hash for perma-URLs + * @param {string} pathPrefix + * @param {GatsbyReporter} reporter + */ +async function rewriteRedirects(pathPrefix, reporter) { + const redirectTimer = reporter.activityTimer("rewriting redirects"); + redirectTimer.start(); - // - // redirects cleanup - // const originalRedirects = await readFile("public/_redirects"); // rewrite legacy redirects to exclude the /docs prefix + // rewrite perma-URL redirects to include hash const prefixRE = new RegExp(`^${pathPrefix}/edb-docs/`); + const purlRE = new RegExp( + `^/docs/purl/(?[^/]+)/(?[^/]+)/?\\s+(?\\S+)\\s+\\d+`, + ); let rewrittenRedirects = originalRedirects .split("\n") .map((line) => line.replace(prefixRE, "/edb-docs/")) + .map((line) => + line.replace( + purlRE, + pathPrefix + + "/purl/$/$/ $#$_$ 302", + ), + ) .join("\n"); if (rewrittenRedirects.length === originalRedirects.length) { @@ -764,17 +805,18 @@ exports.onPostBuild = async ({ graphql, reporter, pathPrefix }) => { # Netlify pathPrefix path rewrite ${pathPrefix}/* /:splat 200`, ); -}; + redirectTimer.end(); +} /** * Strip compilation hashes from generated HTML * this speeds up Netlify deploys, as (otherwise unchanged) files don't change every build * there probably should be a faster / more elegant way to do this, possibly by overriding one of the * default webpack configs... But I've had no luck doing so up to now. - * @param {*} reporter Gatsby reporter + * @param {GatsbyReporter} reporter Gatsby reporter */ async function removeCompilationHashes(reporter) { - const hashTimer = reporter.createProgress("Removing compilation hashes"); + const hashTimer = reporter.createProgress("removing compilation hashes"); hashTimer.start(); const { globby } = await import("globby"); diff --git a/install_template/config.yaml b/install_template/config.yaml index 2f44b5a13b4..229a781deec 100644 --- a/install_template/config.yaml +++ b/install_template/config.yaml @@ -10,6 +10,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [2] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [2] - name: Debian 11 arch: x86_64 supported versions: [2] @@ -29,43 +32,46 @@ products: platforms: - name: RHEL 8 arch: ppc64le - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: AlmaLinux 8 or Rocky Linux 8 arch: x86_64 - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: RHEL 8 or OL 8 arch: x86_64 - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: RHEL 9 arch: ppc64le - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [42.7.3.2] - name: AlmaLinux 9 or Rocky Linux 9 arch: x86_64 - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: RHEL 9 or OL 9 arch: x86_64 - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: Debian 11 arch: x86_64 - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: Debian 12 arch: x86_64 - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: Debian 12 arch: arm64 - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: Ubuntu 20.04 arch: x86_64 - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: Ubuntu 22.04 arch: x86_64 - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: SLES 15 arch: x86_64 - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: SLES 15 arch: ppc64le - supported versions: [42.7.3.1] + supported versions: [42.7.3.2] - name: Migration Toolkit platforms: - name: RHEL 8 or OL 8 @@ -77,6 +83,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [55] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [55] - name: AlmaLinux 9 or Rocky Linux 9 arch: x86_64 supported versions: [55] @@ -111,43 +120,46 @@ products: platforms: - name: RHEL 8 arch: ppc64le - supported versions: [14, 15, 16] + supported versions: [14, 15, 16, 17] - name: RHEL 9 arch: ppc64le - supported versions: [14, 15, 16] + supported versions: [14, 15, 16, 17] - name: AlmaLinux 8 or Rocky Linux 8 arch: x86_64 - supported versions: [14, 15, 16] + supported versions: [14, 15, 16, 17] - name: AlmaLinux 9 or Rocky Linux 9 arch: x86_64 - supported versions: [14, 15, 16] + supported versions: [14, 15, 16, 17] - name: RHEL 8 or OL 8 arch: x86_64 - supported versions: [14, 15, 16] + supported versions: [14, 15, 16, 17] - name: RHEL 9 or OL 9 arch: x86_64 - supported versions: [14, 15, 16] + supported versions: [14, 15, 16, 17] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [14, 15, 16, 17] - name: Debian 11 arch: x86_64 - supported versions: [14, 15, 16] + supported versions: [14, 15, 16, 17] - name: Debian 12 arch: x86_64 - supported versions: [16] + supported versions: [16, 17] - name: Debian 12 arch: arm64 - supported versions: [16] + supported versions: [16, 17] - name: Ubuntu 20.04 arch: x86_64 - supported versions: [14, 15, 16] + supported versions: [14, 15, 16, 17] - name: Ubuntu 22.04 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: SLES 15 arch: x86_64 - supported versions: [14, 15, 16] + supported versions: [14, 15, 16, 17] - name: SLES 15 arch: ppc64le - supported versions: [14, 15, 16] + supported versions: [14, 15, 16, 17] - name: EDB ODBC Connector platforms: - name: RHEL 8 @@ -168,6 +180,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [13, 16] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [13, 16] - name: Debian 11 arch: x86_64 supported versions: [13, 16] @@ -208,7 +223,10 @@ products: supported versions: [1] - name: RHEL 9 or OL 9 arch: x86_64 - supported versions: [1] + supported versions: [1] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [1] - name: Debian 11 arch: x86_64 supported versions: [1] @@ -250,6 +268,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [4] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [4] - name: Debian 11 arch: x86_64 supported versions: [4] @@ -291,6 +312,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [4] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [4] - name: Debian 11 arch: x86_64 supported versions: [4] @@ -316,69 +340,75 @@ products: platforms: - name: AlmaLinux 8 or Rocky Linux 8 arch: x86_64 - supported versions: [11, 12, 13, 14, 15, 16] + supported versions: [11, 12, 13, 14, 15, 16, 17] - name: AlmaLinux 9 or Rocky Linux 9 arch: x86_64 - supported versions: [11, 12, 13, 14, 15, 16] + supported versions: [11, 12, 13, 14, 15, 16, 17] - name: RHEL 8 or OL 8 arch: x86_64 - supported versions: [11, 12, 13, 14, 15, 16] + supported versions: [11, 12, 13, 14, 15, 16, 17] - name: RHEL 9 or OL 9 arch: x86_64 - supported versions: [11, 12, 13, 14, 15, 16] + supported versions: [11, 12, 13, 14, 15, 16, 17] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [13, 14, 15, 16, 17] - name: RHEL 9 arch: ppc64le - supported versions: [11, 12, 13, 14, 15, 16] + supported versions: [11, 12, 13, 14, 15, 16, 17] - name: RHEL 8 arch: ppc64le - supported versions: [11, 12, 13, 14, 15, 16] + supported versions: [11, 12, 13, 14, 15, 16, 17] - name: Debian 12 arch: x86_64 - supported versions: [16] + supported versions: [16, 17] - name: Debian 12 arch: arm64 - supported versions: [16] + supported versions: [16, 17] - name: Debian 11 arch: x86_64 - supported versions: [11, 12, 13, 14, 15, 16] + supported versions: [11, 12, 13, 14, 15, 16, 17] - name: Ubuntu 22.04 arch: x86_64 - supported versions: [11, 12, 13, 14, 15, 16] + supported versions: [11, 12, 13, 14, 15, 16, 17] - name: Ubuntu 20.04 arch: x86_64 supported versions: [11, 12, 13, 14, 15, 16] - name: SLES 15 arch: x86_64 - supported versions: [11, 12, 13, 14, 15, 16] + supported versions: [11, 12, 13, 14, 15, 16, 17] - name: SLES 15 arch: ppc64le - supported versions: [11, 12, 13, 14, 15, 16] + supported versions: [11, 12, 13, 14, 15, 16, 17] - name: EDB Postgres Extended Server platforms: - name: AlmaLinux 8 or Rocky Linux 8 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: AlmaLinux 9 or Rocky Linux 9 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: RHEL 8 or OL 8 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: RHEL 9 or OL 9 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [15, 16, 17] - name: Debian 12 arch: x86_64 - supported versions: [16] + supported versions: [16, 17] - name: Debian 12 arch: arm64 - supported versions: [16] + supported versions: [16, 17] - name: Debian 11 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: Ubuntu 22.04 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: Ubuntu 20.04 arch: x86_64 supported versions: [15, 16] @@ -396,6 +426,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [41] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [41] - name: RHEL 9 arch: ppc64le supported versions: [41] @@ -440,6 +473,9 @@ products: - name: RHEL 9 arch: ppc64le supported versions: [4] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [4] - name: RHEL 8 arch: ppc64le supported versions: [4] @@ -478,6 +514,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [2] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [2] - name: RHEL 9 arch: ppc64le supported versions: [2] @@ -519,6 +558,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [5] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [5] - name: RHEL 9 arch: ppc64le supported versions: [5] @@ -560,6 +602,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [2] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [2] - name: Ubuntu 22.04 arch: x86_64 supported versions: [2] @@ -601,6 +646,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [3.4.2] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [3.4.2] - name: Ubuntu 22.04 arch: x86_64 supported versions: [3.4.2] @@ -642,6 +690,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [9] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [9] - name: RHEL 9 arch: ppc64le supported versions: [9] @@ -683,6 +734,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [9] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [9] - name: RHEL 9 arch: ppc64le supported versions: [9] @@ -714,43 +768,46 @@ products: platforms: - name: RHEL 8 arch: ppc64le - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: RHEL 9 arch: ppc64le - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: AlmaLinux 8 or Rocky Linux 8 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: AlmaLinux 9 or Rocky Linux 9 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: RHEL 8 or OL 8 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: RHEL 9 or OL 9 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [15, 16, 17] - name: Debian 12 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: Debian 12 arch: arm64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: Debian 11 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: Ubuntu 20.04 arch: x86_64 supported versions: [15, 16] - name: Ubuntu 22.04 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: SLES 15 arch: x86_64 - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: SLES 15 arch: ppc64le - supported versions: [15, 16] + supported versions: [15, 16, 17] - name: Replication Server platforms: - name: AlmaLinux 8 or Rocky Linux 8 @@ -765,6 +822,9 @@ products: - name: RHEL 9 or OL 9 arch: x86_64 supported versions: [7] + - name: RHEL 9 or OL 9 + arch: arm64 + supported versions: [7] - name: Debian 11 arch: x86_64 supported versions: [7] diff --git a/install_template/templates/platformBase/index.njk b/install_template/templates/platformBase/index.njk index d2adae158ae..6aba8f97103 100644 --- a/install_template/templates/platformBase/index.njk +++ b/install_template/templates/platformBase/index.njk @@ -79,7 +79,7 @@ Select a link to access the applicable installation instructions: {{archInstall("IBM Power (ppc64le)", "ppc64le", ["RHEL", "SLES"])}} -{{archInstall("AArch64 (ARM64)", "arm64", ["Debian"])}} +{{archInstall("AArch64 (ARM64)", "arm64", ["RHEL", "OL", "Debian"])}} {% endblock linuxinstall %} {% block otherosinstall %} diff --git a/install_template/templates/products/edb*plus/rhel-9_arm64.njk b/install_template/templates/products/edb*plus/rhel-9_arm64.njk new file mode 100644 index 00000000000..99fac101b08 --- /dev/null +++ b/install_template/templates/products/edb*plus/rhel-9_arm64.njk @@ -0,0 +1,3 @@ +{% extends "products/edb*plus/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} +{% block prerequisites %}{% endblock prerequisites %} \ No newline at end of file diff --git a/install_template/templates/products/edb-data-migration-service-reader/arm64_index.njk b/install_template/templates/products/edb-data-migration-service-reader/arm64_index.njk new file mode 100644 index 00000000000..c538681325a --- /dev/null +++ b/install_template/templates/products/edb-data-migration-service-reader/arm64_index.njk @@ -0,0 +1,7 @@ +{% extends "platformBase/arm64_index.njk" %} +{% set productShortname="edb-dms-reader" %} + +{% block frontmatter %} +deployPath: advocacy_docs/edb-postgres-ai/migration-etl/data-migration-service/getting_started/installing/linux_arm64/index.mdx +indexCards: none +{% endblock frontmatter %} \ No newline at end of file diff --git a/install_template/templates/products/edb-data-migration-service-reader/rhel-9_arm64.njk b/install_template/templates/products/edb-data-migration-service-reader/rhel-9_arm64.njk new file mode 100644 index 00000000000..0599dba7d89 --- /dev/null +++ b/install_template/templates/products/edb-data-migration-service-reader/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/edb-data-migration-service-reader/rhel-9-or-ol-9.njk" %} + diff --git a/install_template/templates/products/edb-jdbc-connector/arm64_index.njk b/install_template/templates/products/edb-jdbc-connector/arm64_index.njk index 0a8c6d32c3d..587a9816227 100644 --- a/install_template/templates/products/edb-jdbc-connector/arm64_index.njk +++ b/install_template/templates/products/edb-jdbc-connector/arm64_index.njk @@ -4,3 +4,4 @@ {% block frontmatter %} deployPath: jdbc_connector/{{ product.version }}/installing/linux_arm64/index.mdx {% endblock frontmatter %} + diff --git a/install_template/templates/products/edb-jdbc-connector/rhel-9_arm64.njk b/install_template/templates/products/edb-jdbc-connector/rhel-9_arm64.njk new file mode 100644 index 00000000000..223963c714d --- /dev/null +++ b/install_template/templates/products/edb-jdbc-connector/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/edb-jdbc-connector/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} \ No newline at end of file diff --git a/install_template/templates/products/edb-ocl-connector/rhel-9_arm64.njk b/install_template/templates/products/edb-ocl-connector/rhel-9_arm64.njk new file mode 100644 index 00000000000..9f0b856e325 --- /dev/null +++ b/install_template/templates/products/edb-ocl-connector/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/edb-ocl-connector/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} \ No newline at end of file diff --git a/install_template/templates/products/edb-odbc-connector/rhel-9_arm64.njk b/install_template/templates/products/edb-odbc-connector/rhel-9_arm64.njk new file mode 100644 index 00000000000..11f119351c1 --- /dev/null +++ b/install_template/templates/products/edb-odbc-connector/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/edb-odbc-connector/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} \ No newline at end of file diff --git a/install_template/templates/products/edb-pgbouncer/rhel-9_arm64.njk b/install_template/templates/products/edb-pgbouncer/rhel-9_arm64.njk new file mode 100644 index 00000000000..38d001007d7 --- /dev/null +++ b/install_template/templates/products/edb-pgbouncer/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/edb-pgbouncer/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} \ No newline at end of file diff --git a/install_template/templates/products/edb-pgpool-ii-extensions/rhel-9_arm64.njk b/install_template/templates/products/edb-pgpool-ii-extensions/rhel-9_arm64.njk new file mode 100644 index 00000000000..1e82154321e --- /dev/null +++ b/install_template/templates/products/edb-pgpool-ii-extensions/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/edb-pgpool-ii-extensions/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} \ No newline at end of file diff --git a/install_template/templates/products/edb-pgpool-ii/rhel-9_arm64.njk b/install_template/templates/products/edb-pgpool-ii/rhel-9_arm64.njk new file mode 100644 index 00000000000..83f9e29b663 --- /dev/null +++ b/install_template/templates/products/edb-pgpool-ii/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/edb-pgpool-ii/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} \ No newline at end of file diff --git a/install_template/templates/products/edb-postgres-advanced-server/index.njk b/install_template/templates/products/edb-postgres-advanced-server/index.njk index b559ed87f89..9750d8f1b0b 100644 --- a/install_template/templates/products/edb-postgres-advanced-server/index.njk +++ b/install_template/templates/products/edb-postgres-advanced-server/index.njk @@ -22,5 +22,5 @@ redirects: {% block otherosinstall %} ## Windows -- [Windows Server 2019](windows) +- [Windows Server 2022](windows) {% endblock otherosinstall %} diff --git a/install_template/templates/products/edb-postgres-advanced-server/rhel-9_arm64.njk b/install_template/templates/products/edb-postgres-advanced-server/rhel-9_arm64.njk new file mode 100644 index 00000000000..0c0b0deb008 --- /dev/null +++ b/install_template/templates/products/edb-postgres-advanced-server/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/edb-postgres-advanced-server/rhel-9-or-ol-9.njk" %} + diff --git a/install_template/templates/products/edb-postgres-extended-server/rhel-9_arm64.njk b/install_template/templates/products/edb-postgres-extended-server/rhel-9_arm64.njk new file mode 100644 index 00000000000..668e6b32a40 --- /dev/null +++ b/install_template/templates/products/edb-postgres-extended-server/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/edb-postgres-extended-server/rhel-9-or-ol-9.njk" %} + diff --git a/install_template/templates/products/failover-manager/rhel-9_arm64.njk b/install_template/templates/products/failover-manager/rhel-9_arm64.njk new file mode 100644 index 00000000000..ea56c076990 --- /dev/null +++ b/install_template/templates/products/failover-manager/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/failover-manager/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} \ No newline at end of file diff --git a/install_template/templates/products/hadoop-foreign-data-wrapper/rhel-9_arm64.njk b/install_template/templates/products/hadoop-foreign-data-wrapper/rhel-9_arm64.njk new file mode 100644 index 00000000000..11d9017285f --- /dev/null +++ b/install_template/templates/products/hadoop-foreign-data-wrapper/rhel-9_arm64.njk @@ -0,0 +1,9 @@ +{% extends "products/hadoop-foreign-data-wrapper/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} +{% block prerequisites %} +{{ super() }} +- Enable additional repositories to resolve dependencies: + ```shell + sudo dnf config-manager --set-enabled PowerTools + ``` +{% endblock prerequisites %} \ No newline at end of file diff --git a/install_template/templates/products/migration-toolkit/rhel-9_arm64.njk b/install_template/templates/products/migration-toolkit/rhel-9_arm64.njk new file mode 100644 index 00000000000..8a0caab90f9 --- /dev/null +++ b/install_template/templates/products/migration-toolkit/rhel-9_arm64.njk @@ -0,0 +1,3 @@ +{% extends "products/migration-toolkit/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} +{% block prerequisites %}{% endblock prerequisites %} \ No newline at end of file diff --git a/install_template/templates/products/mongodb-foreign-data-wrapper/rhel-9_arm64.njk b/install_template/templates/products/mongodb-foreign-data-wrapper/rhel-9_arm64.njk new file mode 100644 index 00000000000..5d9c95280c5 --- /dev/null +++ b/install_template/templates/products/mongodb-foreign-data-wrapper/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/mongodb-foreign-data-wrapper/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} \ No newline at end of file diff --git a/install_template/templates/products/mysql-foreign-data-wrapper/rhel-9_arm64.njk b/install_template/templates/products/mysql-foreign-data-wrapper/rhel-9_arm64.njk new file mode 100644 index 00000000000..f6086814d16 --- /dev/null +++ b/install_template/templates/products/mysql-foreign-data-wrapper/rhel-9_arm64.njk @@ -0,0 +1,2 @@ +{% extends "products/mysql-foreign-data-wrapper/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} \ No newline at end of file diff --git a/install_template/templates/products/postgis/rhel-9_arm64.njk b/install_template/templates/products/postgis/rhel-9_arm64.njk new file mode 100644 index 00000000000..781d890ee87 --- /dev/null +++ b/install_template/templates/products/postgis/rhel-9_arm64.njk @@ -0,0 +1,29 @@ +{% extends "products/postgis/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} +{% block prerequisites %} +{{ super() }} +- Enable additional repositories to resolve dependencies: + ```shell + ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-9-${ARCH}-rpms" + ``` + !!!note + + If you are using a public cloud RHEL image, `subscription manager` may not be enabled and enabling it may incur unnecessary charges. Equivalent packages may be available under a different name such as `codeready-builder-for-rhel-8-rhui-rpms`. Consult the documentation for the RHEL image you are using to determine how to install `codeready-builder`. + + !!! +{% endblock prerequisites %} +{% block installCommand %} +```shell +# To install PostGIS 3.4: +sudo dnf -y install edb-as-postgis34 + +# To install PostGIS 3.1 using EDB Postgres Advanced Server 13-15: +sudo dnf -y install edb-as-postgis3 + +# To install PostGIS 3.1 using EDB Postgres Advanced Server 11-12: +sudo dnf -y install edb-as-postgis +``` +{% include "./_epasVersionInPostGISPackageName.njk" %} +{% endblock installCommand %} + + diff --git a/install_template/templates/products/postgres-enterprise-manager-agent/rhel-9_arm64.njk b/install_template/templates/products/postgres-enterprise-manager-agent/rhel-9_arm64.njk new file mode 100644 index 00000000000..86cb86a3880 --- /dev/null +++ b/install_template/templates/products/postgres-enterprise-manager-agent/rhel-9_arm64.njk @@ -0,0 +1,3 @@ +{% extends "products/postgres-enterprise-manager-agent/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} +{% block prerequisites %}{% endblock prerequisites %} \ No newline at end of file diff --git a/install_template/templates/products/postgres-enterprise-manager-server/rhel-9_arm64.njk b/install_template/templates/products/postgres-enterprise-manager-server/rhel-9_arm64.njk new file mode 100644 index 00000000000..61856ff934e --- /dev/null +++ b/install_template/templates/products/postgres-enterprise-manager-server/rhel-9_arm64.njk @@ -0,0 +1,11 @@ +{% extends "products/postgres-enterprise-manager-server/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} +{% set ssutilsName %}sslutils_ postgresql-contrib{% endset %} +{% set ssutilsExtendedName %}edb-postgresextended-contrib{% endset %} +{% set ssutilsExtendedFirstName %}edb-postgresextended-sslutils{% endset %} +{% block prerequisites %}{% endblock prerequisites %} +{% block firewallCommand %}```shell + firewall-cmd --permanent --zone=public --add-port=8443/tcp + + firewall-cmd --reload + ```{% endblock firewallCommand %} diff --git a/install_template/templates/products/postgresql/rhel-9_arm64.njk b/install_template/templates/products/postgresql/rhel-9_arm64.njk new file mode 100644 index 00000000000..70b8b9d5529 --- /dev/null +++ b/install_template/templates/products/postgresql/rhel-9_arm64.njk @@ -0,0 +1 @@ +{% extends "products/postgresql/rhel-9-or-ol-9.njk" %} \ No newline at end of file diff --git a/install_template/templates/products/replication-server/arm64_index.njk b/install_template/templates/products/replication-server/arm64_index.njk index d12e583ba05..3d0ff9d34b6 100644 --- a/install_template/templates/products/replication-server/arm64_index.njk +++ b/install_template/templates/products/replication-server/arm64_index.njk @@ -5,5 +5,5 @@ {% block frontmatter %} {{super()}} redirects: - - /eprs/latest/03_installation/03_installing_rpm_package/x86_amd64/ + - /eprs/latest/03_installation/03_installing_rpm_package/arm64/ {% endblock frontmatter %} diff --git a/install_template/templates/products/replication-server/rhel-9_arm64.njk b/install_template/templates/products/replication-server/rhel-9_arm64.njk new file mode 100644 index 00000000000..62ec8c125f5 --- /dev/null +++ b/install_template/templates/products/replication-server/rhel-9_arm64.njk @@ -0,0 +1,3 @@ +{% extends "products/replication-server/base.njk" %} +{% set platformBaseTemplate = "rhel-9-or-ol-9" %} +{% block prerequisites %}{% endblock prerequisites %} \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 2af06ad76f7..bf1b2fddf1d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -53,7 +53,7 @@ "react-instantsearch": "^7.8.1", "rehype-parse": "^7.0.1", "remark-admonitions": "github:josh-heyer/remark-admonitions", - "sass": "^1.77.2", + "sass": "^1.77.6", "truncate-utf8-bytes": "^1.0.2", "unist-util-visit-parents": "^3.1.1" }, @@ -2026,10 +2026,9 @@ "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" }, "node_modules/@babel/runtime": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.5.tgz", - "integrity": "sha512-Nms86NXrsaeU9vbBJKni6gXiEXZ4CVpYVzEjDH9Sb8vmZ3UljyA1GSOJl/6LGPO8EHLuSF9H+IxNXHPX8QHJ4g==", - "license": "MIT", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.0.tgz", + "integrity": "sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw==", "dependencies": { "regenerator-runtime": "^0.14.0" }, @@ -2720,19 +2719,14 @@ "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.18", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz", - "integrity": "sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==", + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", "dependencies": { - "@jridgewell/resolve-uri": "3.1.0", - "@jridgewell/sourcemap-codec": "1.4.14" + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, - "node_modules/@jridgewell/trace-mapping/node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.14", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", - "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==" - }, "node_modules/@lezer/common": { "version": "0.15.12", "resolved": "https://registry.npmjs.org/@lezer/common/-/common-0.15.12.tgz", @@ -3861,16 +3855,278 @@ } }, "node_modules/@parcel/watcher": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.1.0.tgz", - "integrity": "sha512-8s8yYjd19pDSsBpbkOHnT6Z2+UJSuLQx61pCFM0s5wSRvKCEMDjd/cHY3/GI1szHIWbpXpsJdg3V6ISGGx9xDw==", + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.0.tgz", + "integrity": "sha512-i0GV1yJnm2n3Yq1qw6QrUrd/LI9bE8WEBOTtOkpCXHHdyN3TAGgqAK/DAT05z4fq2x04cARXt2pDmjWjL92iTQ==", "hasInstallScript": true, "dependencies": { + "detect-libc": "^1.0.3", "is-glob": "^4.0.3", "micromatch": "^4.0.5", - "node-addon-api": "^3.2.1", - "node-gyp-build": "^4.3.0" + "node-addon-api": "^7.0.0" + }, + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "@parcel/watcher-android-arm64": "2.5.0", + "@parcel/watcher-darwin-arm64": "2.5.0", + "@parcel/watcher-darwin-x64": "2.5.0", + "@parcel/watcher-freebsd-x64": "2.5.0", + "@parcel/watcher-linux-arm-glibc": "2.5.0", + "@parcel/watcher-linux-arm-musl": "2.5.0", + "@parcel/watcher-linux-arm64-glibc": "2.5.0", + "@parcel/watcher-linux-arm64-musl": "2.5.0", + "@parcel/watcher-linux-x64-glibc": "2.5.0", + "@parcel/watcher-linux-x64-musl": "2.5.0", + "@parcel/watcher-win32-arm64": "2.5.0", + "@parcel/watcher-win32-ia32": "2.5.0", + "@parcel/watcher-win32-x64": "2.5.0" + } + }, + "node_modules/@parcel/watcher-android-arm64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.0.tgz", + "integrity": "sha512-qlX4eS28bUcQCdribHkg/herLe+0A9RyYC+mm2PXpncit8z5b3nSqGVzMNR3CmtAOgRutiZ02eIJJgP/b1iEFQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-darwin-arm64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.0.tgz", + "integrity": "sha512-hyZ3TANnzGfLpRA2s/4U1kbw2ZI4qGxaRJbBH2DCSREFfubMswheh8TeiC1sGZ3z2jUf3s37P0BBlrD3sjVTUw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10.0.0" }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-darwin-x64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.0.tgz", + "integrity": "sha512-9rhlwd78saKf18fT869/poydQK8YqlU26TMiNg7AIu7eBp9adqbJZqmdFOsbZ5cnLp5XvRo9wcFmNHgHdWaGYA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-freebsd-x64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.0.tgz", + "integrity": "sha512-syvfhZzyM8kErg3VF0xpV8dixJ+RzbUaaGaeb7uDuz0D3FK97/mZ5AJQ3XNnDsXX7KkFNtyQyFrXZzQIcN49Tw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm-glibc": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.0.tgz", + "integrity": "sha512-0VQY1K35DQET3dVYWpOaPFecqOT9dbuCfzjxoQyif1Wc574t3kOSkKevULddcR9znz1TcklCE7Ht6NIxjvTqLA==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm-musl": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.0.tgz", + "integrity": "sha512-6uHywSIzz8+vi2lAzFeltnYbdHsDm3iIB57d4g5oaB9vKwjb6N6dRIgZMujw4nm5r6v9/BQH0noq6DzHrqr2pA==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-glibc": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.0.tgz", + "integrity": "sha512-BfNjXwZKxBy4WibDb/LDCriWSKLz+jJRL3cM/DllnHH5QUyoiUNEp3GmL80ZqxeumoADfCCP19+qiYiC8gUBjA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-musl": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.0.tgz", + "integrity": "sha512-S1qARKOphxfiBEkwLUbHjCY9BWPdWnW9j7f7Hb2jPplu8UZ3nes7zpPOW9bkLbHRvWM0WDTsjdOTUgW0xLBN1Q==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-glibc": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.0.tgz", + "integrity": "sha512-d9AOkusyXARkFD66S6zlGXyzx5RvY+chTP9Jp0ypSTC9d4lzyRs9ovGf/80VCxjKddcUvnsGwCHWuF2EoPgWjw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-musl": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.0.tgz", + "integrity": "sha512-iqOC+GoTDoFyk/VYSFHwjHhYrk8bljW6zOhPuhi5t9ulqiYq1togGJB5e3PwYVFFfeVgc6pbz3JdQyDoBszVaA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-arm64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.0.tgz", + "integrity": "sha512-twtft1d+JRNkM5YbmexfcH/N4znDtjgysFaV9zvZmmJezQsKpkfLYJ+JFV3uygugK6AtIM2oADPkB2AdhBrNig==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-ia32": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.0.tgz", + "integrity": "sha512-+rgpsNRKwo8A53elqbbHXdOMtY/tAtTzManTWShB5Kk54N8Q9mzNWV7tV+IbGueCbcj826MfWGU3mprWtuf1TA==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-x64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.0.tgz", + "integrity": "sha512-lPrxve92zEHdgeff3aiu4gDOIt4u7sJYha6wbdEZDCDUhtjTsOMiaJzG5lMY4GkWH8p0fMmO2Ppq5G5XXG+DQw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], "engines": { "node": ">= 10.0.0" }, @@ -4224,18 +4480,18 @@ } }, "node_modules/@types/eslint-scope": { - "version": "3.7.4", - "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.4.tgz", - "integrity": "sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA==", + "version": "3.7.7", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", "dependencies": { "@types/eslint": "*", "@types/estree": "*" } }, "node_modules/@types/estree": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.0.tgz", - "integrity": "sha512-WulqXMDUTYAXCjZnk6JtIHPigp55cVtDgDrO2gHRwhyJto21+1zbVCtOYB2L1F9w4qCQ0rOGWBnBe0FNTiEJIQ==" + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", + "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==" }, "node_modules/@types/get-port": { "version": "3.2.0", @@ -4252,9 +4508,9 @@ } }, "node_modules/@types/google.maps": { - "version": "3.55.11", - "resolved": "https://registry.npmjs.org/@types/google.maps/-/google.maps-3.55.11.tgz", - "integrity": "sha512-F3VuPtjKj4UGuyym75pqmgPBOHbT/i7I6/D+4DdtSzbeu2aWZG1ENwpbZOd46uO+PSAz9flJEhxxi+b4MVb4gQ==" + "version": "3.58.1", + "resolved": "https://registry.npmjs.org/@types/google.maps/-/google.maps-3.58.1.tgz", + "integrity": "sha512-X9QTSvGJ0nCfMzYOnaVs/k6/4L+7F5uCS+4iUmkLEls6J9S/Phv+m/i3mDeyc49ZBgwab3EFO1HEoBY7k98EGQ==" }, "node_modules/@types/hast": { "version": "2.3.4", @@ -4356,9 +4612,9 @@ "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==" }, "node_modules/@types/qs": { - "version": "6.9.15", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.15.tgz", - "integrity": "sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg==" + "version": "6.9.17", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.17.tgz", + "integrity": "sha512-rX4/bPcfmvxHDv0XjfJELTTr+iB+tn032nPILqHm5wbthUUUuVtNGGqzhya9XUxjTP8Fpr0qYgSZZKxGY++svQ==" }, "node_modules/@types/reach__router": { "version": "1.3.11", @@ -4711,133 +4967,133 @@ } }, "node_modules/@webassemblyjs/ast": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.1.tgz", - "integrity": "sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz", + "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", "dependencies": { - "@webassemblyjs/helper-numbers": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1" + "@webassemblyjs/helper-numbers": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2" } }, "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz", - "integrity": "sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", + "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==" }, "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz", - "integrity": "sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", + "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==" }, "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz", - "integrity": "sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA==" + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", + "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==" }, "node_modules/@webassemblyjs/helper-numbers": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz", - "integrity": "sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", + "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", "dependencies": { - "@webassemblyjs/floating-point-hex-parser": "1.11.1", - "@webassemblyjs/helper-api-error": "1.11.1", + "@webassemblyjs/floating-point-hex-parser": "1.13.2", + "@webassemblyjs/helper-api-error": "1.13.2", "@xtuc/long": "4.2.2" } }, "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz", - "integrity": "sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", + "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==" }, "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz", - "integrity": "sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", + "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/wasm-gen": "1.14.1" } }, "node_modules/@webassemblyjs/ieee754": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz", - "integrity": "sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", + "integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==", "dependencies": { "@xtuc/ieee754": "^1.2.0" } }, "node_modules/@webassemblyjs/leb128": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.1.tgz", - "integrity": "sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.13.2.tgz", + "integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==", "dependencies": { "@xtuc/long": "4.2.2" } }, "node_modules/@webassemblyjs/utf8": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.1.tgz", - "integrity": "sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.13.2.tgz", + "integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==" }, "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz", - "integrity": "sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz", + "integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==", "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/helper-wasm-section": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1", - "@webassemblyjs/wasm-opt": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1", - "@webassemblyjs/wast-printer": "1.11.1" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/helper-wasm-section": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-opt": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1", + "@webassemblyjs/wast-printer": "1.14.1" } }, "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz", - "integrity": "sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.14.1.tgz", + "integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==", "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/ieee754": "1.11.1", - "@webassemblyjs/leb128": "1.11.1", - "@webassemblyjs/utf8": "1.11.1" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" } }, "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz", - "integrity": "sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.14.1.tgz", + "integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==", "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-buffer": "1.11.1", - "@webassemblyjs/wasm-gen": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1" } }, "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz", - "integrity": "sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz", + "integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==", "dependencies": { - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/helper-api-error": "1.11.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.1", - "@webassemblyjs/ieee754": "1.11.1", - "@webassemblyjs/leb128": "1.11.1", - "@webassemblyjs/utf8": "1.11.1" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-api-error": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" } }, "node_modules/@webassemblyjs/wast-printer": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz", - "integrity": "sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.14.1.tgz", + "integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==", "dependencies": { - "@webassemblyjs/ast": "1.11.1", + "@webassemblyjs/ast": "1.14.1", "@xtuc/long": "4.2.2" } }, @@ -4874,9 +5130,9 @@ } }, "node_modules/acorn": { - "version": "8.8.2", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", - "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", + "version": "8.14.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", + "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", "bin": { "acorn": "bin/acorn" }, @@ -4884,14 +5140,6 @@ "node": ">=0.4.0" } }, - "node_modules/acorn-import-assertions": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.8.0.tgz", - "integrity": "sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw==", - "peerDependencies": { - "acorn": "^8" - } - }, "node_modules/acorn-jsx": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", @@ -4985,9 +5233,9 @@ } }, "node_modules/algoliasearch-helper": { - "version": "3.22.1", - "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.22.1.tgz", - "integrity": "sha512-fSxJ4YreH4kOME9CnKazbAn2tK/rvBoV37ETd6nTt4j7QfkcnW+c+F22WfuE9Q/sRpvOMnUwU/BXAVEiwW7p/w==", + "version": "3.22.5", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.22.5.tgz", + "integrity": "sha512-lWvhdnc+aKOKx8jyA3bsdEgHzm/sglC4cYdMG4xSQyRiPLJVJtH/IVYZG3Hp6PkTEhQqhyVYkeP9z2IlcHJsWw==", "dependencies": { "@algolia/events": "^4.0.1" }, @@ -5746,20 +5994,20 @@ "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" }, "node_modules/body-parser": { - "version": "1.20.1", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", - "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", "dependencies": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", - "raw-body": "2.5.1", + "qs": "6.13.0", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" }, @@ -5908,20 +6156,20 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" } }, "node_modules/browserslist": { - "version": "4.21.5", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.5.tgz", - "integrity": "sha512-tUkiguQGW7S3IhB7N+c2MV/HZPSCPAAiYBZXLsBhFB/PCy6ZKKsZrmBayHV9fdGV/ARIfJ14NkxKzRDjvp7L6w==", + "version": "4.24.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.2.tgz", + "integrity": "sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg==", "funding": [ { "type": "opencollective", @@ -5930,13 +6178,17 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "caniuse-lite": "^1.0.30001449", - "electron-to-chromium": "^1.4.284", - "node-releases": "^2.0.8", - "update-browserslist-db": "^1.0.10" + "caniuse-lite": "^1.0.30001669", + "electron-to-chromium": "^1.5.41", + "node-releases": "^2.0.18", + "update-browserslist-db": "^1.1.1" }, "bin": { "browserslist": "cli.js" @@ -6064,12 +6316,18 @@ } }, "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -6123,9 +6381,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001651", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001651.tgz", - "integrity": "sha512-9Cf+Xv1jJNe1xPZLGuUXLNkE1BoDkqRqYyFJ9TDYSqhduqA4hu4oR9HluGoWYQC/aj8WHjsGVV+bwkh0+tegRg==", + "version": "1.0.30001683", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001683.tgz", + "integrity": "sha512-iqmNnThZ0n70mNwvxpEC2nBJ037ZHZUoBI5Gorh1Mw6IlEAZujEoU1tXA628iZfzm7R9FvFzxbfdgml82a3k8Q==", "funding": [ { "type": "opencollective", @@ -6479,9 +6737,9 @@ } }, "node_modules/clipboardy/node_modules/cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "version": "6.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.6.tgz", + "integrity": "sha512-VqCUuhcd1iB+dsv8gxPttb5iZh/D0iubSP21g36KXdEuf6I5JiioesUVjpCdHV9MZRUfVFlvwtIUyPfxo5trtw==", "dependencies": { "nice-try": "^1.0.4", "path-key": "^2.0.1", @@ -7067,9 +7325,9 @@ } }, "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -7495,6 +7753,22 @@ "node": ">=10" } }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/define-lazy-prop": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", @@ -7829,9 +8103,9 @@ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" }, "node_modules/electron-to-chromium": { - "version": "1.4.361", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.361.tgz", - "integrity": "sha512-VocVwjPp05HUXzf3xmL0boRn5b0iyqC7amtDww84Jb1QJNPBc7F69gJyEeXRoriLBC4a5pSyckdllrXAg4mmRA==" + "version": "1.5.64", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.64.tgz", + "integrity": "sha512-IXEuxU+5ClW2IGEYFC2T7szbyVgehupCWQe5GNh+H065CD6U6IFN0s4KeAMFGNmQolRU4IV7zGBWSYMmZ8uuqQ==" }, "node_modules/emoji-regex": { "version": "9.2.2", @@ -7847,9 +8121,9 @@ } }, "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", "engines": { "node": ">= 0.8" } @@ -7903,9 +8177,9 @@ } }, "node_modules/enhanced-resolve": { - "version": "5.12.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.12.0.tgz", - "integrity": "sha512-QHTXI/sZQmko1cbDoNAa3mJ5qhWUUNAq3vR0/YiD379fWQrcfuoX1+HW2S0MTt7XmoPLapdaDKUtelUSPic7hQ==", + "version": "5.17.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz", + "integrity": "sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==", "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" @@ -8015,6 +8289,25 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/es-get-iterator": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/es-get-iterator/-/es-get-iterator-1.1.3.tgz", @@ -8077,13 +8370,14 @@ } }, "node_modules/es5-ext": { - "version": "0.10.62", - "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.62.tgz", - "integrity": "sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==", + "version": "0.10.64", + "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.64.tgz", + "integrity": "sha512-p2snDhiLaXe6dahss1LddxqEm+SkuDvV8dnIQG0MWjyHpcMNfXKPE+/Cc0y+PhxJX3A4xGNeFCj5oc0BUh6deg==", "hasInstallScript": true, "dependencies": { "es6-iterator": "^2.0.3", "es6-symbol": "^3.1.3", + "esniff": "^2.0.1", "next-tick": "^1.1.0" }, "engines": { @@ -8126,9 +8420,9 @@ } }, "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", "engines": { "node": ">=6" } @@ -8726,6 +9020,25 @@ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, + "node_modules/esniff": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/esniff/-/esniff-2.0.1.tgz", + "integrity": "sha512-kTUIGKQ/mDPFoJ0oVfcmyJn4iBDRptjNVIzwIFR7tqWXdVI9xfA2RMwY/gbSpJG3lkdWNEjLap/NqVHZiJsdfg==", + "dependencies": { + "d": "^1.0.1", + "es5-ext": "^0.10.62", + "event-emitter": "^0.3.5", + "type": "^2.7.2" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esniff/node_modules/type": { + "version": "2.7.3", + "resolved": "https://registry.npmjs.org/type/-/type-2.7.3.tgz", + "integrity": "sha512-8j+1QmAbPvLZow5Qpi6NCaN8FB60p/6x8/vfNqOk/hC+HuvFZhL4+WfekuhQLiqFZXOgQdrs3B+XxEmCc6b3FQ==" + }, "node_modules/espree": { "version": "7.3.1", "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.1.tgz", @@ -8886,36 +9199,36 @@ } }, "node_modules/express": { - "version": "4.18.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", - "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "version": "4.21.1", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.1.tgz", + "integrity": "sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ==", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.1", + "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.5.0", + "cookie": "0.7.1", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", - "finalhandler": "1.2.0", + "finalhandler": "1.3.1", "fresh": "0.5.2", "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", + "merge-descriptors": "1.0.3", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", + "path-to-regexp": "0.1.10", "proxy-addr": "~2.0.7", - "qs": "6.11.0", + "qs": "6.13.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", + "send": "0.19.0", + "serve-static": "1.16.2", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", @@ -9005,9 +9318,9 @@ } }, "node_modules/express/node_modules/cookie": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", - "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", "engines": { "node": ">= 0.6" } @@ -9263,9 +9576,9 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -9282,12 +9595,12 @@ } }, "node_modules/finalhandler": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", - "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", "dependencies": { "debug": "2.6.9", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "on-finished": "2.4.1", "parseurl": "~1.3.3", @@ -9374,9 +9687,9 @@ "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==" }, "node_modules/follow-redirects": { - "version": "1.15.2", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", - "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==", + "version": "1.15.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", + "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", "funding": [ { "type": "individual", @@ -9683,9 +9996,12 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/function.prototype.name": { "version": "1.1.5", @@ -11578,13 +11894,18 @@ } }, "node_modules/get-intrinsic": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz", - "integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -11928,11 +12249,11 @@ } }, "node_modules/has-property-descriptors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "dependencies": { - "get-intrinsic": "^1.1.1" + "es-define-property": "^1.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -12005,6 +12326,17 @@ "node": ">=8" } }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/hast-to-hyperscript": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz", @@ -12760,30 +13092,30 @@ } }, "node_modules/instantsearch-ui-components": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/instantsearch-ui-components/-/instantsearch-ui-components-0.7.0.tgz", - "integrity": "sha512-4dHmCFbKWap5iSR2VmF5pSBECGQbAa8sPMoazMihYY2/0ahVzdHrOOtT8/hferG1A/xTT8nzfU7PIg+8lomNsA==", + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/instantsearch-ui-components/-/instantsearch-ui-components-0.9.0.tgz", + "integrity": "sha512-ugQ+XdPx3i3Sxu+woRo6tPE0Fz/kWd4KblTUfZD1TZZBsm/8qFvcbg5dVBDvXX9v7ntoyugXCzC/XCZMzrSkig==", "dependencies": { "@babel/runtime": "^7.1.2" } }, "node_modules/instantsearch.js": { - "version": "4.72.2", - "resolved": "https://registry.npmjs.org/instantsearch.js/-/instantsearch.js-4.72.2.tgz", - "integrity": "sha512-6/4XbXk91f9RV9TNvy+yKxtwZoKDsk0xBXdlA7nKYo//DtFkA/jNoqzuUJIWzf/59wSLxs7/LN/H4FLbBp6UNw==", + "version": "4.75.5", + "resolved": "https://registry.npmjs.org/instantsearch.js/-/instantsearch.js-4.75.5.tgz", + "integrity": "sha512-XnplrpnSfFzVtoL7YBKWbf3FPwmjfSFM8BN+nnuRsfzwUwWgb7zQLxh2mRtohUFI9fnA7vAcJlDcmichfYgjmA==", "dependencies": { "@algolia/events": "^4.0.1", "@types/dom-speech-recognition": "^0.0.1", - "@types/google.maps": "^3.45.3", + "@types/google.maps": "^3.55.12", "@types/hogan.js": "^3.0.0", "@types/qs": "^6.5.3", - "algoliasearch-helper": "3.22.1", + "algoliasearch-helper": "3.22.5", "hogan.js": "^3.0.2", "htm": "^3.0.0", - "instantsearch-ui-components": "0.7.0", + "instantsearch-ui-components": "0.9.0", "preact": "^10.10.0", "qs": "^6.5.1 < 6.10", - "search-insights": "^2.13.0" + "search-insights": "^2.17.2" }, "peerDependencies": { "algoliasearch": ">= 3.1 < 6" @@ -14519,10 +14851,9 @@ } }, "node_modules/markdown-to-jsx": { - "version": "7.4.7", - "resolved": "https://registry.npmjs.org/markdown-to-jsx/-/markdown-to-jsx-7.4.7.tgz", - "integrity": "sha512-0+ls1IQZdU6cwM1yu0ZjjiVWYtkbExSyUIFU2ZeDIFuZM1W42Mh4OlJ4nb4apX4H8smxDHRdFaoIVJGwfv5hkg==", - "license": "MIT", + "version": "7.7.0", + "resolved": "https://registry.npmjs.org/markdown-to-jsx/-/markdown-to-jsx-7.7.0.tgz", + "integrity": "sha512-130nIMbJY+woOQJ11xTqEtYko60t6EpNkZuqjKMferL3udtob3nRfzXOdsiA26NPemiR7w/hR8M3/B9yiYPGZg==", "engines": { "node": ">= 10" }, @@ -14908,9 +15239,12 @@ } }, "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, "node_modules/merge-stream": { "version": "2.0.0", @@ -15170,11 +15504,11 @@ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, "node_modules/msgpackr": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/msgpackr/-/msgpackr-1.8.5.tgz", - "integrity": "sha512-mpPs3qqTug6ahbblkThoUY2DQdNXcm4IapwOS3Vm/87vmpzLVelvp9h3It1y9l1VPpiFLV11vfOXnmeEwiIXwg==", + "version": "1.11.2", + "resolved": "https://registry.npmjs.org/msgpackr/-/msgpackr-1.11.2.tgz", + "integrity": "sha512-F9UngXRlPyWCDEASDpTf6c9uNhGPTqnTeLVt7bN+bU1eajoR/8V9ys2BRaV5C/e5ihE6sJ9uPIKaYt6bFuO32g==", "optionalDependencies": { - "msgpackr-extract": "^3.0.1" + "msgpackr-extract": "^3.0.2" } }, "node_modules/msgpackr-extract": { @@ -15232,9 +15566,9 @@ "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==" }, "node_modules/nanoid": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", - "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", "funding": [ { "type": "github", @@ -15365,9 +15699,9 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/node-addon-api": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-3.2.1.tgz", - "integrity": "sha512-mmcei9JghVNDYydghQmeDX8KoAm0FAiYyIcUt/N4nhyAipB17pllZQDOJD2fotxABnt4Mdz+dKTO7eftLg4d0A==" + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz", + "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==" }, "node_modules/node-fetch": { "version": "2.6.9", @@ -15388,16 +15722,6 @@ } } }, - "node_modules/node-gyp-build": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.6.0.tgz", - "integrity": "sha512-NTZVKn9IylLwUzaKjkas1e4u2DLNcV4rdYagA4PWdPwW87Bi7z+BznyKSRwS/761tV/lzCGXplWsiaMjLqP2zQ==", - "bin": { - "node-gyp-build": "bin.js", - "node-gyp-build-optional": "optional.js", - "node-gyp-build-test": "build-test.js" - } - }, "node_modules/node-gyp-build-optional-packages": { "version": "5.0.3", "resolved": "https://registry.npmjs.org/node-gyp-build-optional-packages/-/node-gyp-build-optional-packages-5.0.3.tgz", @@ -15494,9 +15818,9 @@ } }, "node_modules/node-releases": { - "version": "2.0.10", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.10.tgz", - "integrity": "sha512-5GFldHPXVG/YZmFzJvKK2zDSzPKhEp0+ZR5SVaoSag9fsL5YgHbUHDfnG5494ISANDcK4KwPXAx2xqVEydmd7w==" + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", + "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==" }, "node_modules/nopt": { "version": "1.0.10", @@ -15599,9 +15923,12 @@ } }, "node_modules/object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "version": "1.13.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.3.tgz", + "integrity": "sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA==", + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -16227,9 +16554,9 @@ } }, "node_modules/password-prompt/node_modules/cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "version": "6.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.6.tgz", + "integrity": "sha512-VqCUuhcd1iB+dsv8gxPttb5iZh/D0iubSP21g36KXdEuf6I5JiioesUVjpCdHV9MZRUfVFlvwtIUyPfxo5trtw==", "dependencies": { "nice-try": "^1.0.4", "path-key": "^2.0.1", @@ -16337,9 +16664,9 @@ } }, "node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", + "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==" }, "node_modules/path-type": { "version": "4.0.0", @@ -16367,9 +16694,9 @@ "integrity": "sha512-rxJOljMuWtYlvREBmd6TZYanfcPhNUKtGDZBjBBS8WG1dpN2iwPsRJZgQqN/OtJuiQckdRFOfzogqJClTrsi7g==" }, "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==" }, "node_modules/picomatch": { "version": "2.3.1", @@ -16526,9 +16853,9 @@ "integrity": "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg==" }, "node_modules/postcss": { - "version": "8.4.21", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.21.tgz", - "integrity": "sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==", + "version": "8.4.49", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.49.tgz", + "integrity": "sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA==", "funding": [ { "type": "opencollective", @@ -16537,12 +16864,16 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "nanoid": "^3.3.4", - "picocolors": "^1.0.0", - "source-map-js": "^1.0.2" + "nanoid": "^3.3.7", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" }, "engines": { "node": "^10 || ^12 || >=14" @@ -17058,9 +17389,9 @@ "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" }, "node_modules/preact": { - "version": "10.22.1", - "resolved": "https://registry.npmjs.org/preact/-/preact-10.22.1.tgz", - "integrity": "sha512-jRYbDDgMpIb5LHq3hkI0bbl+l/TQ9UnkdQ0ww+lp+4MMOdqaUYdFc5qeyP+IV8FAd/2Em7drVPeKdQxsiWCf/A==", + "version": "10.25.0", + "resolved": "https://registry.npmjs.org/preact/-/preact-10.25.0.tgz", + "integrity": "sha512-6bYnzlLxXV3OSpUxLdaxBmE7PMOu0aR3pG6lryK/0jmvcDFPlcXGQAt5DpK3RITWiDrfYZRI0druyaK/S9kYLg==", "funding": { "type": "opencollective", "url": "https://opencollective.com/preact" @@ -17116,11 +17447,10 @@ } }, "node_modules/prettier": { - "version": "3.2.5", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.2.5.tgz", - "integrity": "sha512-3/GWa9aOC0YeD7LUfvOG2NiDyhOWRvt1k+rcKhOuYnMY24iiCphgneUfJDyFXd6rZCAnuLBv6UeAULtrhT/F4A==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz", + "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==", "dev": true, - "license": "MIT", "bin": { "prettier": "bin/prettier.cjs" }, @@ -17297,11 +17627,11 @@ } }, "node_modules/qs": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", - "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", "dependencies": { - "side-channel": "^1.0.4" + "side-channel": "^1.0.6" }, "engines": { "node": ">=0.6" @@ -17388,9 +17718,9 @@ } }, "node_modules/raw-body": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", - "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -17463,14 +17793,13 @@ } }, "node_modules/react-bootstrap": { - "version": "2.10.2", - "resolved": "https://registry.npmjs.org/react-bootstrap/-/react-bootstrap-2.10.2.tgz", - "integrity": "sha512-UvB7mRqQjivdZNxJNEA2yOQRB7L9N43nBnKc33K47+cH90/ujmnMwatTCwQLu83gLhrzAl8fsa6Lqig/KLghaA==", - "license": "MIT", + "version": "2.10.5", + "resolved": "https://registry.npmjs.org/react-bootstrap/-/react-bootstrap-2.10.5.tgz", + "integrity": "sha512-XueAOEn64RRkZ0s6yzUTdpFtdUXs5L5491QU//8ZcODKJNDLt/r01tNyriZccjgRImH1REynUc9pqjiRMpDLWQ==", "dependencies": { - "@babel/runtime": "^7.22.5", + "@babel/runtime": "^7.24.7", "@restart/hooks": "^0.4.9", - "@restart/ui": "^1.6.8", + "@restart/ui": "^1.6.9", "@types/react-transition-group": "^4.4.6", "classnames": "^2.3.2", "dom-helpers": "^5.2.1", @@ -17690,33 +18019,33 @@ } }, "node_modules/react-instantsearch": { - "version": "7.11.4", - "resolved": "https://registry.npmjs.org/react-instantsearch/-/react-instantsearch-7.11.4.tgz", - "integrity": "sha512-yzGxA9SxN+BR/ZruISJYXdZJV3siZu+qBO5glHtfJG/V597CwCzcWRSJQf0z/lSxiYlbfc6eouBpPglwCik8Bw==", + "version": "7.13.8", + "resolved": "https://registry.npmjs.org/react-instantsearch/-/react-instantsearch-7.13.8.tgz", + "integrity": "sha512-14t7qAlp5H2dD+ljQLPhMZt40JX7YGKXdAooGZBo8IB+0M7NHAhsn/SVRdxSObb9bx1hg4uaBIZ51Wajgw8Xbg==", "dependencies": { "@babel/runtime": "^7.1.2", - "instantsearch-ui-components": "0.7.0", - "instantsearch.js": "4.72.2", - "react-instantsearch-core": "7.11.4" + "instantsearch-ui-components": "0.9.0", + "instantsearch.js": "4.75.5", + "react-instantsearch-core": "7.13.8" }, "peerDependencies": { - "algoliasearch": ">= 3.1 < 5", + "algoliasearch": ">= 3.1 < 6", "react": ">= 16.8.0 < 19", "react-dom": ">= 16.8.0 < 19" } }, "node_modules/react-instantsearch-core": { - "version": "7.11.4", - "resolved": "https://registry.npmjs.org/react-instantsearch-core/-/react-instantsearch-core-7.11.4.tgz", - "integrity": "sha512-LGlioZu3vfk5ODQDwUO0cZrOA60aieEx/P6RZhstwSpjmmQW+2P14lOk38OIQ8MA23fEqwCmLJn9kBKpgqfHjg==", + "version": "7.13.8", + "resolved": "https://registry.npmjs.org/react-instantsearch-core/-/react-instantsearch-core-7.13.8.tgz", + "integrity": "sha512-1bcHCFJ3Qw9HWFkeM5MNMqzw0BI4LIR61qI1lPAcJREih9NXy3R3inPeOk2T2FKkgAp5PJbjNQ3DOTxErV88jg==", "dependencies": { "@babel/runtime": "^7.1.2", - "algoliasearch-helper": "3.22.1", - "instantsearch.js": "4.72.2", + "algoliasearch-helper": "3.22.5", + "instantsearch.js": "4.75.5", "use-sync-external-store": "^1.0.0" }, "peerDependencies": { - "algoliasearch": ">= 3.1 < 5", + "algoliasearch": ">= 3.1 < 6", "react": ">= 16.8.0 < 19" } }, @@ -19082,10 +19411,9 @@ } }, "node_modules/sass": { - "version": "1.77.2", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.77.2.tgz", - "integrity": "sha512-eb4GZt1C3avsX3heBNlrc7I09nyT00IUuo4eFhAbeXWU2fvA7oXI53SxODVAA+zgZCk9aunAZgO+losjR3fAwA==", - "license": "MIT", + "version": "1.77.6", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.77.6.tgz", + "integrity": "sha512-ByXE1oLD79GVq9Ht1PeHWCPMPB8XHpBuz1r85oByKHjZY6qV6rWnQovQzXJXuQ/XyE1Oj3iPk3lo28uzaRA2/Q==", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -19165,9 +19493,9 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/sass/node_modules/immutable": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.3.0.tgz", - "integrity": "sha512-0AOCmOip+xgJwEVTQj1EfiDDOkPmuyllDuTuEX+DDXUgapLAsBIfkg3sxCYyCEA8mQqZrrxPUGjcOQ2JS3WLkg==" + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.3.7.tgz", + "integrity": "sha512-1hqclzwYwjRDFLjcFxOM5AYkkG0rpFPpr1RLPMEuGczoS7YA8gLhy8SWXYRAA/XwfEHpfo3cw5JGioS32fnMRw==" }, "node_modules/sax": { "version": "1.2.4", @@ -19184,9 +19512,9 @@ } }, "node_modules/schema-utils": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.1.tgz", - "integrity": "sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==", + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", "dependencies": { "@types/json-schema": "^7.0.8", "ajv": "^6.12.5", @@ -19201,9 +19529,9 @@ } }, "node_modules/search-insights": { - "version": "2.14.0", - "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.14.0.tgz", - "integrity": "sha512-OLN6MsPMCghDOqlCtsIsYgtsC0pnwVTyT9Mu6A3ewOj1DxvzZF6COrn2g86E/c05xbktB0XN04m/t1Z+n+fTGw==" + "version": "2.17.3", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.17.3.tgz", + "integrity": "sha512-RQPdCYTa8A68uM2jwxoY842xDhvx3E5LFL1LxvxCNMev4o5mLuokczhzjAgGwUZBAmOKZknArSxLKmXtIi2AxQ==" }, "node_modules/section-matter": { "version": "1.0.0", @@ -19251,9 +19579,9 @@ } }, "node_modules/send": { - "version": "0.18.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", - "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", "dependencies": { "debug": "2.6.9", "depd": "2.0.0", @@ -19286,6 +19614,14 @@ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/send/node_modules/mime": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", @@ -19321,14 +19657,14 @@ } }, "node_modules/serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", "dependencies": { - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "parseurl": "~1.3.3", - "send": "0.18.0" + "send": "0.19.0" }, "engines": { "node": ">= 0.8.0" @@ -19339,6 +19675,22 @@ "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/setimmediate": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", @@ -19458,13 +19810,17 @@ } }, "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -19699,9 +20055,9 @@ } }, "node_modules/source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", "engines": { "node": ">=0.10.0" } @@ -20521,12 +20877,12 @@ } }, "node_modules/terser": { - "version": "5.16.9", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.16.9.tgz", - "integrity": "sha512-HPa/FdTB9XGI2H1/keLFZHxl6WNvAI4YalHGtDQTlMnJcoqSab1UwL4l1hGEhs6/GmLHBZIg/YgB++jcbzoOEg==", + "version": "5.36.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.36.0.tgz", + "integrity": "sha512-IYV9eNMuFAV4THUspIRXkLakHnV6XO7FEdtKjf/mDyrnqUg9LnlOn6/RwRvM9SZjR4GUq8Nk8zj67FzVARr74w==", "dependencies": { - "@jridgewell/source-map": "^0.3.2", - "acorn": "^8.5.0", + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", "commander": "^2.20.0", "source-map-support": "~0.5.20" }, @@ -20538,15 +20894,15 @@ } }, "node_modules/terser-webpack-plugin": { - "version": "5.3.7", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.7.tgz", - "integrity": "sha512-AfKwIktyP7Cu50xNjXF/6Qb5lBNzYaWpU6YfoX3uZicTx0zTy0stDDCsvjDapKsSDvOeWo5MEq4TmdBy2cNoHw==", + "version": "5.3.10", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz", + "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==", "dependencies": { - "@jridgewell/trace-mapping": "^0.3.17", + "@jridgewell/trace-mapping": "^0.3.20", "jest-worker": "^27.4.5", "schema-utils": "^3.1.1", "serialize-javascript": "^6.0.1", - "terser": "^5.16.5" + "terser": "^5.26.0" }, "engines": { "node": ">= 10.13.0" @@ -21281,9 +21637,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.10.tgz", - "integrity": "sha512-OztqDenkfFkbSG+tRxBeAnCVPckDBcvibKd35yDONx6OU8N7sqgwc7rCbkJ/WcYtVRZ4ba68d6byhC21GFh7sQ==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz", + "integrity": "sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A==", "funding": [ { "type": "opencollective", @@ -21292,14 +21648,18 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" + "escalade": "^3.2.0", + "picocolors": "^1.1.0" }, "bin": { - "browserslist-lint": "cli.js" + "update-browserslist-db": "cli.js" }, "peerDependencies": { "browserslist": ">= 4.21.0" @@ -21626,9 +21986,9 @@ } }, "node_modules/watchpack": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", - "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.2.tgz", + "integrity": "sha512-TnbFSbcOCcDgjZ4piURLCbJ3nJhznVh9kw6F6iokjiFPl8ONxe9A6nMDVXDiNbrSfLILs6vB07F7wLBrwPYzJw==", "dependencies": { "glob-to-regexp": "^0.4.1", "graceful-fs": "^4.1.2" @@ -21662,33 +22022,32 @@ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" }, "node_modules/webpack": { - "version": "5.79.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.79.0.tgz", - "integrity": "sha512-3mN4rR2Xq+INd6NnYuL9RC9GAmc1ROPKJoHhrZ4pAjdMFEkJJWrsPw8o2JjCIyQyTu7rTXYn4VG6OpyB3CobZg==", - "dependencies": { - "@types/eslint-scope": "^3.7.3", - "@types/estree": "^1.0.0", - "@webassemblyjs/ast": "1.11.1", - "@webassemblyjs/wasm-edit": "1.11.1", - "@webassemblyjs/wasm-parser": "1.11.1", - "acorn": "^8.7.1", - "acorn-import-assertions": "^1.7.6", - "browserslist": "^4.14.5", + "version": "5.96.1", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.96.1.tgz", + "integrity": "sha512-l2LlBSvVZGhL4ZrPwyr8+37AunkcYj5qh8o6u2/2rzoPc8gxFJkLj1WxNgooi9pnoc06jh0BjuXnamM4qlujZA==", + "dependencies": { + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.6", + "@webassemblyjs/ast": "^1.12.1", + "@webassemblyjs/wasm-edit": "^1.12.1", + "@webassemblyjs/wasm-parser": "^1.12.1", + "acorn": "^8.14.0", + "browserslist": "^4.24.0", "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.10.0", + "enhanced-resolve": "^5.17.1", "es-module-lexer": "^1.2.1", "eslint-scope": "5.1.1", "events": "^3.2.0", "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.2.9", + "graceful-fs": "^4.2.11", "json-parse-even-better-errors": "^2.3.1", "loader-runner": "^4.2.0", "mime-types": "^2.1.27", "neo-async": "^2.6.2", - "schema-utils": "^3.1.0", + "schema-utils": "^3.2.0", "tapable": "^2.1.1", - "terser-webpack-plugin": "^5.3.7", - "watchpack": "^2.4.0", + "terser-webpack-plugin": "^5.3.10", + "watchpack": "^2.4.1", "webpack-sources": "^3.2.3" }, "bin": { diff --git a/package.json b/package.json index 401456e5d9a..b7fd88f98fd 100644 --- a/package.json +++ b/package.json @@ -81,7 +81,7 @@ "react-instantsearch": "^7.8.1", "rehype-parse": "^7.0.1", "remark-admonitions": "github:josh-heyer/remark-admonitions", - "sass": "^1.77.2", + "sass": "^1.77.6", "truncate-utf8-bytes": "^1.0.2", "unist-util-visit-parents": "^3.1.1" }, diff --git a/product_docs/docs/edb_plus/41/02_release_notes/edbplus_41.3_rel_notes.mdx b/product_docs/docs/edb_plus/41/02_release_notes/edbplus_41.3_rel_notes.mdx new file mode 100644 index 00000000000..39e42dccc34 --- /dev/null +++ b/product_docs/docs/edb_plus/41/02_release_notes/edbplus_41.3_rel_notes.mdx @@ -0,0 +1,17 @@ +--- +title: "EDB*Plus 41.3.0 release notes" +navTitle: Version 41.3.0 +--- + +Released: 27 Nov 2024 + +New features, enhancements, bug fixes, and other changes in EDB\*Plus 41.3.0 include: + +| Type | Description | Addresses | +|--------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------| +| Enhancement | EDB\*Plus has been certified for use with EDB Postgres Advanced Server version 17. | | +| Enhancement | Enhanced the behavior where the `SET LINESIZE` command doesn't behave as expected when set to a value greater than 10. This enhancement mainly applies to the constants used in the select statement. | #103591
#35673 | +| Bug fix | Corrected the behavior where `SPOOL` was introducing redundant blank lines after each output. | #37846 | +| Bug fix | Fixed the issue where EDB\*Plus is unable to process the command when `--` is used in between `/* */`. | #100496 | +| Bug fix | Fixed the issue causing the execution of a trivial procedural script to perform quite slowly in EDB\*Plus. | #37747 | +| Bug fix | Fixed an issue related to the configuration of EDB\*Plus with SSL certificate authentication method when the password is not specified. | #37970 | diff --git a/product_docs/docs/edb_plus/41/02_release_notes/index.mdx b/product_docs/docs/edb_plus/41/02_release_notes/index.mdx index 5269d7c8505..9c0d7714452 100644 --- a/product_docs/docs/edb_plus/41/02_release_notes/index.mdx +++ b/product_docs/docs/edb_plus/41/02_release_notes/index.mdx @@ -2,6 +2,7 @@ title: "Release notes" navigation: +- edbplus_41.3_rel_notes - edbplus_41.2_rel_notes - edbplus_41.1_rel_notes - edbplus_41.0_rel_notes @@ -11,8 +12,9 @@ EDB\*Plus is a utility program that provides a command line interface to EDB Pos The EDB\*Plus documentation describes the latest version of EDB\*Plus Version 41. The release notes provide information on what was new in each release. -| Version | Release Date | -| ------------------------------------- | ------------ | +| Version | Release Date | +|--------------------------------------|--------------| +| [41.3.0](edbplus_41.3_rel_notes.mdx) | 27 Nov 2024 | | [41.2.0](edbplus_41.2_rel_notes.mdx) | 23 Aug 2023 | | [41.1.0](edbplus_41.1_rel_notes.mdx) | 20 Apr 2023 | | [41.0.0](edbplus_41.0_rel_notes.mdx) | 14 Feb 2023 | diff --git a/product_docs/docs/edb_plus/41/02a_supported_platforms.mdx b/product_docs/docs/edb_plus/41/02a_supported_platforms.mdx index 40a4469527a..fe018b791a1 100644 --- a/product_docs/docs/edb_plus/41/02a_supported_platforms.mdx +++ b/product_docs/docs/edb_plus/41/02a_supported_platforms.mdx @@ -7,8 +7,8 @@ EDB\*Plus is supported on the same platforms as EDB Postgres Advanced Server. To ## Supported database versions The following list of EDB Postgres Advanced Server (EPAS) versions are currently supported for use with EDB\*Plus: +- EPAS 17 +- EPAS 16 - EPAS 15 - EPAS 14 - EPAS 13 -- EPAS 12 -- EPAS 11 diff --git a/product_docs/docs/edb_plus/41/04_using_edb_plus.mdx b/product_docs/docs/edb_plus/41/04_using_edb_plus.mdx index 561c258e9f3..a69ead27205 100644 --- a/product_docs/docs/edb_plus/41/04_using_edb_plus.mdx +++ b/product_docs/docs/edb_plus/41/04_using_edb_plus.mdx @@ -78,9 +78,9 @@ The following example shows user `enterprisedb` with password `password` connect ```text C:\Program Files\edb\edbplus>edbplus enterprisedb/password -Connected to EnterpriseDB 14.1.0 (localhost:5444/edb) AS enterprisedb +Connected to EnterpriseDB 16.4.1 (localhost:5444/edb) AS enterprisedb -EDB*Plus: Release 14 (Build 40.0.0) +EDB*Plus: (Build 41.3.0) Copyright (c) 2008-2021, EnterpriseDB Corporation. All rights reserved. SQL> @@ -90,9 +90,9 @@ The following example shows user `enterprisedb` with password `password` connect ```text C:\Program Files\edb\edbplus>edbplus enterprisedb/password@localhost:5445/edb -Connected to EnterpriseDB 14.1.0 (localhost:5445/edb) AS enterprisedb +Connected to EnterpriseDB 16.4.1 (localhost:5445/edb) AS enterprisedb -EDB*Plus: Release 14 (Build 40.0.0) +EDB*Plus: (Build 41.3.0) Copyright (c) 2008-2021, EnterpriseDB Corporation. All rights reserved. SQL> @@ -102,9 +102,9 @@ Using variable `hr_5445` in the `login.sql` file, the following shows how it is ```text C:\Program Files\edb\edbplus>edbplus enterprisedb/password@hr_5445 -Connected to EnterpriseDB 14.0.0 (localhost:5445/hr) AS enterprisedb +Connected to EnterpriseDB 16.4.1 (localhost:5445/hr) AS enterprisedb -EDB*Plus: Release 14 (Build 40.1.0) +EDB*Plus: (Build 41.3.0) Copyright (c) 2008-2021, EnterpriseDB Corporation. All rights reserved. SQL> @@ -127,7 +127,7 @@ The following example executes a script file, `dept_query.sql`, after connecting ```sql C:\Program Files\edb\edbplus>edbplus enterprisedb/password @dept_query -Connected to EnterpriseDB 14.1.0 (localhost:5444/edb) AS enterprisedb +Connected to EnterpriseDB 16.4.1 (localhost:5444/edb) AS enterprisedb SQL> SELECT * FROM dept; diff --git a/product_docs/docs/edb_plus/41/05_using_edb_plus_with_ssl.mdx b/product_docs/docs/edb_plus/41/05_using_edb_plus_with_ssl.mdx index 337d345fef2..714c1de1d9c 100644 --- a/product_docs/docs/edb_plus/41/05_using_edb_plus_with_ssl.mdx +++ b/product_docs/docs/edb_plus/41/05_using_edb_plus_with_ssl.mdx @@ -284,9 +284,9 @@ $ export PGSSLCERTPASS=keypass $ export PGSSLKEYPASS=exppass $ cd /usr/edb/edbplus $ ./edbplus.sh enterprisedb/password@192.168.2.22:5444/edb?ssl=true -Connected to EnterpriseDB 14.0.0 (192.168.2.22:5444/edb) AS enterprisedb +Connected to EnterpriseDB 16.4.1 (192.168.2.22:5444/edb) AS enterprisedb -EDB*Plus: Release 14 (Build 40.0.1) +EDB*Plus: (Build 41.3.0) Copyright (c) 2008-2021, EnterpriseDB Corporation. All rights reserved. SQL> @@ -301,9 +301,9 @@ $ export PGSSLCERTPASS=keypass $ export PGSSLKEYPASS=exppass $ cd /usr/edb/edbplus $ ./edbplus.sh enterprisedb/password@192.168.2.22:5444/edb?ssl=true -Connected to EnterpriseDB 14.0.0 (192.168.2.22:5444/edb) AS enterprisedb +Connected to EnterpriseDB 16.4.1 (192.168.2.22:5444/edb) AS enterprisedb -EDB*Plus: Release 14 (Build 40.0.1) +EDB*Plus: (Build 41.3.0) Copyright (c) 2008-2021, EnterpriseDB Corporation. All rights reserved. SQL> diff --git a/product_docs/docs/edb_plus/41/06_command_summary.mdx b/product_docs/docs/edb_plus/41/06_command_summary.mdx index 10348cfde1c..e09121a27f8 100644 --- a/product_docs/docs/edb_plus/41/06_command_summary.mdx +++ b/product_docs/docs/edb_plus/41/06_command_summary.mdx @@ -320,7 +320,7 @@ In this example, the database connection is changed to database `edb` on the loc ```sql SQL> CONNECT smith/mypassword@localhost:5445/edb Disconnected from EnterpriseDB Database. -Connected to EnterpriseDB 14.0.0 (localhost:5445/edb) AS smith +Connected to EnterpriseDB 16.4.1 (localhost:5445/edb) AS smith ``` In this session, the connection is changed to the username `enterprisedb`. The host defaults to the localhost, the port defaults to `5444` (which isn't the same as the port previously used), and the database defaults to `edb`. @@ -328,7 +328,7 @@ In this session, the connection is changed to the username `enterprisedb`. The h ```sql SQL> CONNECT enterprisedb/password Disconnected from EnterpriseDB Database. -Connected to EnterpriseDB 14.0.0 (localhost:5444/edb) AS enterprisedb +Connected to EnterpriseDB 16.4.1 (localhost:5444/edb) AS enterprisedb ``` This example shows connectivity for a multi-node cluster (one primary node and two secondary nodes) setup. The given multi-host `connectstring` syntax is used to establish a connection with the active primary database server. In this case, using `CONNECT` command, the connection is established with the primary database node on host `192.168.22.24` at port `5444`. @@ -336,7 +336,7 @@ This example shows connectivity for a multi-node cluster (one primary node and t ```sql SQL> CONNECT enterprisedb/edb@192.168.22.24:5444,192.168.22.25:5445,192.168.22.26:5446/edb?targetServerType=primary Disconnected from EnterpriseDB Database. -Connected to EnterpriseDB 15.3.0 (192.168.22.24:5444/edb) AS enterprisedb +Connected to EnterpriseDB 16.4.1 (192.168.22.24:5444/edb) AS enterprisedb ``` ## DEFINE diff --git a/product_docs/docs/edb_plus/41/installing/index.mdx b/product_docs/docs/edb_plus/41/installing/index.mdx index e39e4d016a8..8d3ba5149f9 100644 --- a/product_docs/docs/edb_plus/41/installing/index.mdx +++ b/product_docs/docs/edb_plus/41/installing/index.mdx @@ -12,8 +12,8 @@ redirects: navigation: - linux_x86_64 - - linux_ppc64le - linux_arm64 + - linux_ppc64le - windows - configuring_linux_installation --- @@ -54,6 +54,12 @@ Select a link to access the applicable installation instructions: ## Linux [AArch64 (ARM64)](linux_arm64) +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/edbplus_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/edbplus_rhel_9) + ### Debian and derivatives - [Debian 12](linux_arm64/edbplus_debian_12) diff --git a/product_docs/docs/edb_plus/41/installing/linux_arm64/edbplus_rhel_9.mdx b/product_docs/docs/edb_plus/41/installing/linux_arm64/edbplus_rhel_9.mdx new file mode 100644 index 00000000000..ac035a6544a --- /dev/null +++ b/product_docs/docs/edb_plus/41/installing/linux_arm64/edbplus_rhel_9.mdx @@ -0,0 +1,49 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing EDB*Plus on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: + - /edb_plus/41/03_installing_edb_plus/install_on_linux/arm64/edbplus_rhel9_arm +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +## Install the package + +```shell +sudo dnf -y install edb-edbplus +``` + +## Initial configuration + +After performing a Linux installation of EDB\*Plus, you must set the values of environment variables that allow EDB\*Plus to locate your Java installation: + +```shell +export JAVA_HOME= +export PATH=/bin:$PATH +``` diff --git a/product_docs/docs/edb_plus/41/installing/linux_arm64/index.mdx b/product_docs/docs/edb_plus/41/installing/linux_arm64/index.mdx index b59092fa855..57ef6887bac 100644 --- a/product_docs/docs/edb_plus/41/installing/linux_arm64/index.mdx +++ b/product_docs/docs/edb_plus/41/installing/linux_arm64/index.mdx @@ -3,11 +3,18 @@ title: "Installing EDB*Plus on Linux AArch64 (ARM64)" navTitle: "On Linux ARM64" navigation: + - edbplus_rhel_9 - edbplus_debian_12 --- Operating system-specific install instructions are described in the corresponding documentation: +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](edbplus_rhel_9) + +- [Oracle Linux (OL) 9](edbplus_rhel_9) + ### Debian and derivatives - [Debian 12](edbplus_debian_12) diff --git a/product_docs/docs/efm/4/06_monitoring_efm_cluster.mdx b/product_docs/docs/efm/4/06_monitoring_efm_cluster.mdx index 94c111c78d9..07e4873073a 100644 --- a/product_docs/docs/efm/4/06_monitoring_efm_cluster.mdx +++ b/product_docs/docs/efm/4/06_monitoring_efm_cluster.mdx @@ -76,7 +76,9 @@ Standby priority host list: 172.19.12.163 172.19.10.2 ``` -The `Promote Status` section of the report is the result of a direct query from the node on which you are invoking the `cluster-status` command to each database in the cluster. The query also returns the transaction log location of each database. Because the queries to each database return at different times, the LSNs might not match even if streaming replication is working normally for the cluster. To get the latest view of replication, connect to the primary database, and execute SQL command `SELECT * FROM pg_stat_replication;`. +The `Promote Status` section of the report includes information related to promotion in the cluster. The LSN information is used, along with the `Standby priority host list`, when choosing a standby to promote. If there is a mismatch in replay LSNs, Failover Manager will not allow a switchover (though the promotion of a standby is always allowed). + +The LSN information is the result of a direct query from the node on which you are invoking the `cluster-status` command to each database in the cluster. The query also returns the transaction log location of each database. Because the queries to each database return at different times, the LSNs might not match even if streaming replication is working normally for the cluster. To get the latest view of replication, connect to the primary database, and execute SQL command `SELECT * FROM pg_stat_replication;`. ```text Promote Status: diff --git a/product_docs/docs/efm/4/installing/index.mdx b/product_docs/docs/efm/4/installing/index.mdx index 8cac86cdc53..2bb0829289d 100644 --- a/product_docs/docs/efm/4/installing/index.mdx +++ b/product_docs/docs/efm/4/installing/index.mdx @@ -72,6 +72,12 @@ Select a link to access the applicable installation instructions: ## Linux [AArch64 (ARM64)](linux_arm64) +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/efm_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/efm_rhel_9) + ### Debian and derivatives - [Debian 12](linux_arm64/efm_debian_12) diff --git a/product_docs/docs/efm/4/installing/linux_arm64/efm_rhel_9.mdx b/product_docs/docs/efm/4/installing/linux_arm64/efm_rhel_9.mdx new file mode 100644 index 00000000000..dfd8881b0a2 --- /dev/null +++ b/product_docs/docs/efm/4/installing/linux_arm64/efm_rhel_9.mdx @@ -0,0 +1,66 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing Failover Manager on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: + - /efm/4/03_installing_efm/arm64/efm_rhel9_arm +--- + +## Prerequisites + +Before you begin the installation process: + +- Install Postgres on the same host (not needed for witness nodes). + + - See [Installing EDB Postgres Advanced Server](/epas/latest/installing/) + + - See [PostgreSQL Downloads](https://www.postgresql.org/download/) + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +## Install the package + +```shell +sudo dnf -y install edb-efm<4x> +``` + +Where `<4x>` is the version of Failover Manager that you're installing. For example, if you're installing version 4.10, the package name is `edb-efm410`. + +The installation process creates a user named efm that has privileges to invoke scripts that control the Failover Manager service for clusters owned by enterprisedb or postgres. + +## Initial configuration + +If you're using Failover Manager to monitor a cluster owned by a user other than enterprisedb or postgres, see [Extending Failover Manager permissions](../../04_configuring_efm/04_extending_efm_permissions/#extending_efm_permissions). + +After installing on each node of the cluster: + +1. Modify the [cluster properties file](../../04_configuring_efm/01_cluster_properties/#cluster_properties) on each node. +2. Modify the [cluster members file](../../04_configuring_efm/03_cluster_members/#cluster_members) on each node. +3. If applicable, configure and test virtual IP address settings and any scripts that are identified in the cluster properties file. +4. Start the agent on each node of the cluster. For more information, see [Controlling the Failover Manager service](../../08_controlling_efm_service/). diff --git a/product_docs/docs/efm/4/installing/linux_arm64/index.mdx b/product_docs/docs/efm/4/installing/linux_arm64/index.mdx index caa4e6ab3f9..ecb0c136441 100644 --- a/product_docs/docs/efm/4/installing/linux_arm64/index.mdx +++ b/product_docs/docs/efm/4/installing/linux_arm64/index.mdx @@ -10,11 +10,18 @@ navTitle: "On Linux ARM64" redirects: navigation: + - efm_rhel_9 - efm_debian_12 --- Operating system-specific install instructions are described in the corresponding documentation: +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](efm_rhel_9) + +- [Oracle Linux (OL) 9](efm_rhel_9) + ### Debian and derivatives - [Debian 12](efm_debian_12) diff --git a/product_docs/docs/epas/11/installing/index.mdx b/product_docs/docs/epas/11/installing/index.mdx index 7a87927f339..61bbb80e188 100644 --- a/product_docs/docs/epas/11/installing/index.mdx +++ b/product_docs/docs/epas/11/installing/index.mdx @@ -56,4 +56,4 @@ Select a link to access the applicable installation instructions: ## Windows -- [Windows Server 2019](windows) +- [Windows Server 2022](windows) diff --git a/product_docs/docs/epas/12/epas_rel_notes/epas12_22_00_rel_notes.mdx b/product_docs/docs/epas/12/epas_rel_notes/epas12_22_00_rel_notes.mdx new file mode 100644 index 00000000000..e1eaf057251 --- /dev/null +++ b/product_docs/docs/epas/12/epas_rel_notes/epas12_22_00_rel_notes.mdx @@ -0,0 +1,16 @@ +--- +title: EDB Postgres Advanced Server 12.22.00 release notes +navTitle: "Version 12.22.00" +--- + +Released: 21 Nov 2024 + +EDB Postgres Advanced Server 12.21.26 includes the following enhancements and bug fixes: + +| Type | Description | Addresses                | +|----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| +| Upstream merge | Merged with community PostgreSQL 12.22. This release includes a fix for [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/). See the [PostgreSQL 12.22 Release Notes](https://www.postgresql.org/docs/release/12.22/) for more information. | [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/) | +| Bug fix | Fixed an issue for Auditing. With `edb_audit`, now you can audit the initial connection process or authentication messages as well. | #39540 | +| Bug fix | Fixed an issue for EDB*Loader. Now the `negative bitmapset member not allowed` error is resolved for partitioned tables. | #39562 | +| Bug fix | Fixed an issue for `Oracle Proc*c`and `ecpg with PROC` . Corrected the macro definition in `sqlda-proc.h` to fix the compilation errors when used. | #40573 | +| Bug fix | Fixed an issue for replication. Now the login of locked account on the physical replica is not allowed. | | diff --git a/product_docs/docs/epas/12/epas_rel_notes/index.mdx b/product_docs/docs/epas/12/epas_rel_notes/index.mdx index 8b330d5279d..0b9f44f1d17 100644 --- a/product_docs/docs/epas/12/epas_rel_notes/index.mdx +++ b/product_docs/docs/epas/12/epas_rel_notes/index.mdx @@ -2,6 +2,7 @@ navTitle: Release Notes title: "EDB Postgres Advanced Server Release Notes" navigation: +- epas12_22_00_rel_notes - epas12_20_25_rel_notes - epas12_19_24_rel_notes - epas12_18_23_rel_notes @@ -32,6 +33,7 @@ The EDB Postgres Advanced Server (Advanced Server) documentation describes the l | Version | Release Date | Upstream Merges | | ----------------------------------------- | ------------ | -------------------------------------------------------------- | +| [12.22.00](epas12_22_00_rel_notes.mdx) | 21 Nov 2024 | [12.22](https://www.postgresql.org/docs/12/release-12-22.html) | | [12.20.25](epas12_20_25_rel_notes.mdx) | 08 Aug 2024 | [12.20](https://www.postgresql.org/docs/12/release-12-20.html) | | [12.19.24](epas12_19_24_rel_notes.mdx) | 09 May 2024 | [12.19](https://www.postgresql.org/docs/12/release-12-19.html) | | [12.18.23](epas12_18_23_rel_notes.mdx) | 08 Feb 2023 | [12.18](https://www.postgresql.org/docs/12/release-12-18.html) | diff --git a/product_docs/docs/epas/12/installing/index.mdx b/product_docs/docs/epas/12/installing/index.mdx index ebd230fc881..820beddfc35 100644 --- a/product_docs/docs/epas/12/installing/index.mdx +++ b/product_docs/docs/epas/12/installing/index.mdx @@ -56,4 +56,4 @@ Select a link to access the applicable installation instructions: ## Windows -- [Windows Server 2019](windows) +- [Windows Server 2022](windows) diff --git a/product_docs/docs/epas/13/epas_rel_notes/epas13_18_00_rel_notes.mdx b/product_docs/docs/epas/13/epas_rel_notes/epas13_18_00_rel_notes.mdx new file mode 100644 index 00000000000..cb8c9ab8974 --- /dev/null +++ b/product_docs/docs/epas/13/epas_rel_notes/epas13_18_00_rel_notes.mdx @@ -0,0 +1,16 @@ +--- +title: EDB Postgres Advanced Server 13.18.00 release notes +navTitle: "Version 13.18.00" +--- + +Released: 21 Nov 2024 + +EDB Postgres Advanced Server 13.18.00 includes the following enhancements and bug fixes: + +| Type | Description | Addresses                | +|----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| +| Upstream merge | Merged with community PostgreSQL 13.18. This release includes a fix for [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/). See the [PostgreSQL 13.18 Release Notes](https://www.postgresql.org/docs/release/13.18/) for more information. | [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/) | +| Bug fix | Fixed an issue for Auditing. With `edb_audit`, now you can audit the initial connection process or authentication messages as well. | #39540 | +| Bug fix | Fixed an issue for EDB*Loader. Now the `negative bitmapset member not allowed` error is resolved for partitioned tables. | #39562 | +| Bug fix | Fixed an issue for `Oracle Proc*c`and `ecpg with PROC` . Corrected the macro definition in `sqlda-proc.h` to fix the compilation errors when used. | #40573 | +| Bug fix | Fixed an issue for replication. Now the login of locked account on the physical replica is not allowed. | | diff --git a/product_docs/docs/epas/13/epas_rel_notes/index.mdx b/product_docs/docs/epas/13/epas_rel_notes/index.mdx index ff699006f7b..9b5cf247a4f 100644 --- a/product_docs/docs/epas/13/epas_rel_notes/index.mdx +++ b/product_docs/docs/epas/13/epas_rel_notes/index.mdx @@ -2,6 +2,7 @@ navTitle: Release Notes title: "EDB Postgres Advanced Server Release Notes" navigation: +- epas13_18_00_rel_notes - epas13_16_22_rel_notes - epas13_15_21_rel_notes - epas13_14_20_rel_notes @@ -25,6 +26,7 @@ The EDB Postgres Advanced Server (Advanced Server) documentation describes the l | Version | Release Date | Upstream Merges | |-------------------------------------|--------------|------------------------------------------------------------------------------------------------------------------------| +| [13.18.00](epas13_18_00_rel_notes) | 21 Nov 2024 | [13.18](https://www.postgresql.org/docs/release/13.18/) | | [13.16.22](epas13_16_22_rel_notes) | 08 Aug 2024 | [13.16](https://www.postgresql.org/docs/release/13.16/) | | [13.15.21](epas13_15_21_rel_notes) | 09 May 2024 | [13.15](https://www.postgresql.org/docs/release/13.15/) | | [13.14.20](epas13_14_20_rel_notes) | 08 Feb 2024 | [13.14](https://www.postgresql.org/docs/release/13.14/) | diff --git a/product_docs/docs/epas/13/installing/index.mdx b/product_docs/docs/epas/13/installing/index.mdx index 674914f33be..61ab80fcbd0 100644 --- a/product_docs/docs/epas/13/installing/index.mdx +++ b/product_docs/docs/epas/13/installing/index.mdx @@ -14,6 +14,7 @@ redirects: navigation: - linux_x86_64 + - linux_arm64 - linux_ppc64le - windows - linux_install_details @@ -54,6 +55,14 @@ Select a link to access the applicable installation instructions: - [SLES 15](linux_ppc64le/epas_sles_15) +## Linux [AArch64 (ARM64)](linux_arm64) + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/epas_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/epas_rhel_9) + ## Windows -- [Windows Server 2019](windows) +- [Windows Server 2022](windows) diff --git a/product_docs/docs/epas/13/installing/linux_arm64/epas_rhel_9.mdx b/product_docs/docs/epas/13/installing/linux_arm64/epas_rhel_9.mdx new file mode 100644 index 00000000000..337c7a30287 --- /dev/null +++ b/product_docs/docs/epas/13/installing/linux_arm64/epas_rhel_9.mdx @@ -0,0 +1,157 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing EDB Postgres Advanced Server on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: + - /epas/13/epas_inst_linux/installing_epas_using_edb_repository/arm64/epas_rhel9_arm + - /epas/13/epas_inst_linux/installing_epas_using_edb_repository/arm/epas_rhel9_arm +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +- If you are also installing PostGIS, enable additional repositories to resolve dependencies: + + ```shell + ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-9-${ARCH}-rpms" + ``` + + !!!note + + If you are using a public cloud RHEL image, `subscription manager` may not be enabled and enabling it may incur unnecessary charges. Equivalent packages may be available under a different name such as `codeready-builder-for-rhel-9-rhui-rpms`. Consult the documentation for the RHEL image you are using to determine how to install `codeready-builder`. + + !!! + +## Install the package + +```shell +sudo dnf -y install edb-as-server +``` + +Where `` is the version of the EDB Postgres Advanced Server you're installing. For example, if you're installing version 13, the package name is `edb-as13-server`. + +To install an individual component: + +```shell +sudo dnf -y install +``` + +Where `package_name` can be any of the available packages from the [available package list](/epas/13/installing/linux_install_details/rpm_packages/). + +Installing the server package creates an operating system user named enterprisedb. The user is assigned a user ID (UID) and a group ID (GID). The user has no default password. Use the `passwd` command to assign a password for the user. The default shell for the user is `bash`, and the user's home directory is `/var/lib/edb/as13`. + +## Initial configuration + +Getting started with your cluster involves logging in, ensuring the installation and initial configuration was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-as-13-setup` script creates a cluster in Oracle-compatible mode with the `edb` sample database in the cluster. To create a cluster in Postgres mode, see [Initializing the cluster in Postgres mode](../linux_install_details/managing_an_advanced_server_installation/#initializing-the-cluster-in-postgres-mode). + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/edb/as13/bin/edb-as-13-setup initdb + +sudo systemctl start edb-as-13 +``` + +To work in your cluster, log in as the enterprisedb user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo su - enterprisedb + +psql edb +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, enterprisedb. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../epas_guide/03_database_administration/01_configuration_parameters/01_setting_new_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE enterprisedb IDENTIFIED BY password; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +psql (13.0.0, server 13.0.0) +You are now connected to database "hr" as user "enterprisedb". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/epas/13/installing/linux_arm64/index.mdx b/product_docs/docs/epas/13/installing/linux_arm64/index.mdx new file mode 100644 index 00000000000..52ed78c6861 --- /dev/null +++ b/product_docs/docs/epas/13/installing/linux_arm64/index.mdx @@ -0,0 +1,15 @@ +--- +title: "Installing EDB Postgres Advanced Server on Linux AArch64 (ARM64)" +navTitle: "On Linux ARM64" + +navigation: + - epas_rhel_9 +--- + +Operating system-specific install instructions are described in the corresponding documentation: + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](epas_rhel_9) + +- [Oracle Linux (OL) 9](epas_rhel_9) diff --git a/product_docs/docs/epas/14/epas_rel_notes/epas14_15_0_rel_notes.mdx b/product_docs/docs/epas/14/epas_rel_notes/epas14_15_0_rel_notes.mdx new file mode 100644 index 00000000000..0e7a6ce14eb --- /dev/null +++ b/product_docs/docs/epas/14/epas_rel_notes/epas14_15_0_rel_notes.mdx @@ -0,0 +1,16 @@ +--- +title: EDB Postgres Advanced Server 14.15.0 release notes +navTitle: "Version 14.15.0 " +--- + +Released: 21 Nov 2024 + +EDB Postgres Advanced Server 14.15.0 includes the following enhancements and bug fixes: + +| Type | Description | Addresses                | +|----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| +| Upstream merge | Merged with community PostgreSQL 14.15. This release includes a fix for [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/). See the [PostgreSQL 14.15 Release Notes](https://www.postgresql.org/docs/release/14.15/) for more information. | [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/) | +| Bug fix | Fixed an issue for Auditing. With `edb_audit`, now you can audit the initial connection process or authentication messages as well. | #39540 | +| Bug fix | Fixed an issue for EDB*Loader. Now the `negative bitmapset member not allowed` error is resolved for partitioned tables. | #39562 | +| Bug fix | Fixed an issue for `Oracle Proc*c`and `ecpg with PROC` . Corrected the macro definition in `sqlda-proc.h` to fix the compilation errors when used. | #40573 | +| Bug fix | Fixed an issue for replication. Now the login of locked account on the physical replica is not allowed. | | diff --git a/product_docs/docs/epas/14/epas_rel_notes/index.mdx b/product_docs/docs/epas/14/epas_rel_notes/index.mdx index f0fa139453c..4fa6b9b6922 100644 --- a/product_docs/docs/epas/14/epas_rel_notes/index.mdx +++ b/product_docs/docs/epas/14/epas_rel_notes/index.mdx @@ -2,6 +2,7 @@ navTitle: Release notes title: "EDB Postgres Advanced Server release notes" navigation: +- epas14_15_0_rel_notes - epas14_13_1_rel_notes - epas14_13_0_rel_notes - epas14_12_0_rel_notes @@ -25,6 +26,7 @@ The EDB Postgres Advanced Server (EDB Postgres Advanced Server) documentation de | Version | Release date | Upstream merges | |-----------------------------------|--------------|--------------------------------------------------------------------------------------------------------------------------| +| [14.15.0](epas14_15_0_rel_notes) | 21 Nov 2024 | [14.15](https://www.postgresql.org/docs/14/release-14-15.html/) | | [14.13.1](epas14_13_1_rel_notes) | 16 Aug 2024 | | | [14.13.0](epas14_13_0_rel_notes) | 08 Aug 2024 | [14.13](https://www.postgresql.org/docs/14/release-14-13.html) | | [14.12.0](epas14_12_0_rel_notes) | 09 May 2024 | [14.12](https://www.postgresql.org/docs/14/release-14-12.html) | diff --git a/product_docs/docs/epas/14/installing/index.mdx b/product_docs/docs/epas/14/installing/index.mdx index e11325dffa1..159288cf4fc 100644 --- a/product_docs/docs/epas/14/installing/index.mdx +++ b/product_docs/docs/epas/14/installing/index.mdx @@ -14,6 +14,7 @@ redirects: navigation: - linux_x86_64 + - linux_arm64 - linux_ppc64le - windows - linux_install_details @@ -54,6 +55,14 @@ Select a link to access the applicable installation instructions: - [SLES 15](linux_ppc64le/epas_sles_15) +## Linux [AArch64 (ARM64)](linux_arm64) + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/epas_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/epas_rhel_9) + ## Windows -- [Windows Server 2019](windows) +- [Windows Server 2022](windows) diff --git a/product_docs/docs/epas/14/installing/linux_arm64/epas_rhel_9.mdx b/product_docs/docs/epas/14/installing/linux_arm64/epas_rhel_9.mdx new file mode 100644 index 00000000000..44c3edccb28 --- /dev/null +++ b/product_docs/docs/epas/14/installing/linux_arm64/epas_rhel_9.mdx @@ -0,0 +1,157 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing EDB Postgres Advanced Server on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: + - /epas/14/epas_inst_linux/installing_epas_using_edb_repository/arm64/epas_rhel9_arm + - /epas/14/epas_inst_linux/installing_epas_using_edb_repository/arm/epas_rhel9_arm +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +- If you are also installing PostGIS, enable additional repositories to resolve dependencies: + + ```shell + ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-9-${ARCH}-rpms" + ``` + + !!!note + + If you are using a public cloud RHEL image, `subscription manager` may not be enabled and enabling it may incur unnecessary charges. Equivalent packages may be available under a different name such as `codeready-builder-for-rhel-9-rhui-rpms`. Consult the documentation for the RHEL image you are using to determine how to install `codeready-builder`. + + !!! + +## Install the package + +```shell +sudo dnf -y install edb-as-server +``` + +Where `` is the version of the EDB Postgres Advanced Server you're installing. For example, if you're installing version 14, the package name is `edb-as14-server`. + +To install an individual component: + +```shell +sudo dnf -y install +``` + +Where `package_name` can be any of the available packages from the [available package list](/epas/14/installing/linux_install_details/rpm_packages/). + +Installing the server package creates an operating system user named enterprisedb. The user is assigned a user ID (UID) and a group ID (GID). The user has no default password. Use the `passwd` command to assign a password for the user. The default shell for the user is `bash`, and the user's home directory is `/var/lib/edb/as14`. + +## Initial configuration + +Getting started with your cluster involves logging in, ensuring the installation and initial configuration was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-as-14-setup` script creates a cluster in Oracle-compatible mode with the `edb` sample database in the cluster. To create a cluster in Postgres mode, see [Initializing the cluster in Postgres mode](../linux_install_details/managing_an_advanced_server_installation/#initializing-the-cluster-in-postgres-mode). + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/edb/as14/bin/edb-as-14-setup initdb + +sudo systemctl start edb-as-14 +``` + +To work in your cluster, log in as the enterprisedb user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo su - enterprisedb + +psql edb +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, enterprisedb. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../epas_guide/03_database_administration/01_configuration_parameters/01_setting_new_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE enterprisedb IDENTIFIED BY password; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +psql (14.0.0, server 14.0.0) +You are now connected to database "hr" as user "enterprisedb". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/epas/14/installing/linux_arm64/index.mdx b/product_docs/docs/epas/14/installing/linux_arm64/index.mdx new file mode 100644 index 00000000000..52ed78c6861 --- /dev/null +++ b/product_docs/docs/epas/14/installing/linux_arm64/index.mdx @@ -0,0 +1,15 @@ +--- +title: "Installing EDB Postgres Advanced Server on Linux AArch64 (ARM64)" +navTitle: "On Linux ARM64" + +navigation: + - epas_rhel_9 +--- + +Operating system-specific install instructions are described in the corresponding documentation: + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](epas_rhel_9) + +- [Oracle Linux (OL) 9](epas_rhel_9) diff --git a/product_docs/docs/epas/15/epas_rel_notes/epas15_10_0_rel_notes.mdx b/product_docs/docs/epas/15/epas_rel_notes/epas15_10_0_rel_notes.mdx new file mode 100644 index 00000000000..299ba80aac6 --- /dev/null +++ b/product_docs/docs/epas/15/epas_rel_notes/epas15_10_0_rel_notes.mdx @@ -0,0 +1,18 @@ +--- +title: EDB Postgres Advanced Server 15.10.0 release notes +navTitle: "Version 15.10.0 " +--- + +Released: 21 Nov 2024 + +EDB Postgres Advanced Server 15.10.0 includes the following enhancements and bug fixes: + +| Type | Description | Addresses | +|----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------| +| Upstream merge | Merged with community PostgreSQL 15.10. This release includes a fix for [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/). See the [PostgreSQL 15.10 Release Notes](https://www.postgresql.org/docs/release/15.10/) for more information. | [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/) | +| Bug fix | Fixed an issue for Auditing. With `edb_audit`, now you can audit the initial connection process or authentication messages as well. | #39540 | +| Bug fix | Fixed an issue for `EDB*Loader`. Now the `negative bitmapset member not allowed` error is resolved for partitioned tables. | #39562 | +| Bug fix | Fixed an issue for `Oracle Proc*c`and `ecpg with PROC` . Corrected the macro definition in `sqlda-proc.h` to fix the compilation errors when used. | #40573 | +| Bug fix | Fixed an issue for replication. Now the login of locked account on the physical replica is not allowed. | | +| Bug fix | Fixed an issue for `COPY` and `EDB*Loader`. Now the server is not crashed while using dynamic partitioning in `COPY` and `EDB*Loader`. | #38963 | +| Bug fix | Fixed an issue with tab completion for the object names while using in SQL statements. | | diff --git a/product_docs/docs/epas/15/epas_rel_notes/index.mdx b/product_docs/docs/epas/15/epas_rel_notes/index.mdx index 8f897876349..3c937115559 100644 --- a/product_docs/docs/epas/15/epas_rel_notes/index.mdx +++ b/product_docs/docs/epas/15/epas_rel_notes/index.mdx @@ -2,6 +2,7 @@ navTitle: Release notes title: "EDB Postgres Advanced Server release notes" navigation: +- epas15_10_0_rel_notes - epas15_8_1_rel_notes - epas15_8_0_rel_notes - epas15_7_0_rel_notes @@ -19,6 +20,7 @@ The EDB Postgres Advanced Server documentation describes the latest version of E | Version | Release date | Upstream merges | |--------------------------------|--------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [15.10.0](epas15_10_0_rel_notes) | 14 Nov 2024 | [15.10](https://www.postgresql.org/docs/release/15.10/) | | [15.8.1](epas15_8_1_rel_notes) | 16 Aug 2024 | | | [15.8.0](epas15_8_0_rel_notes) | 08 Aug 2024 | [15.8](https://www.postgresql.org/docs/release/15.8/) | | [15.7.0](epas15_7_0_rel_notes) | 09 May 2024 | [15.7](https://www.postgresql.org/docs/release/15.7/) | diff --git a/product_docs/docs/epas/15/installing/index.mdx b/product_docs/docs/epas/15/installing/index.mdx index d1ea85b3cf0..31ecf8ee083 100644 --- a/product_docs/docs/epas/15/installing/index.mdx +++ b/product_docs/docs/epas/15/installing/index.mdx @@ -18,6 +18,7 @@ redirects: navigation: - linux_x86_64 + - linux_arm64 - linux_ppc64le - windows - linux_install_details @@ -58,6 +59,14 @@ Select a link to access the applicable installation instructions: - [SLES 15](linux_ppc64le/epas_sles_15) +## Linux [AArch64 (ARM64)](linux_arm64) + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/epas_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/epas_rhel_9) + ## Windows -- [Windows Server 2019](windows) +- [Windows Server 2022](windows) diff --git a/product_docs/docs/epas/15/installing/linux_arm64/epas_rhel_9.mdx b/product_docs/docs/epas/15/installing/linux_arm64/epas_rhel_9.mdx new file mode 100644 index 00000000000..abf73aba6cc --- /dev/null +++ b/product_docs/docs/epas/15/installing/linux_arm64/epas_rhel_9.mdx @@ -0,0 +1,157 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing EDB Postgres Advanced Server on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: + - /epas/15/epas_inst_linux/installing_epas_using_edb_repository/arm64/epas_rhel9_arm + - /epas/15/epas_inst_linux/installing_epas_using_edb_repository/arm/epas_rhel9_arm +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +- If you are also installing PostGIS, enable additional repositories to resolve dependencies: + + ```shell + ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-9-${ARCH}-rpms" + ``` + + !!!note + + If you are using a public cloud RHEL image, `subscription manager` may not be enabled and enabling it may incur unnecessary charges. Equivalent packages may be available under a different name such as `codeready-builder-for-rhel-9-rhui-rpms`. Consult the documentation for the RHEL image you are using to determine how to install `codeready-builder`. + + !!! + +## Install the package + +```shell +sudo dnf -y install edb-as-server +``` + +Where `` is the version of the EDB Postgres Advanced Server you're installing. For example, if you're installing version 15, the package name is `edb-as15-server`. + +To install an individual component: + +```shell +sudo dnf -y install +``` + +Where `package_name` can be any of the available packages from the [available package list](/epas/15/installing/linux_install_details/rpm_packages/). + +Installing the server package creates an operating system user named enterprisedb. The user is assigned a user ID (UID) and a group ID (GID). The user has no default password. Use the `passwd` command to assign a password for the user. The default shell for the user is `bash`, and the user's home directory is `/var/lib/edb/as15`. + +## Initial configuration + +Getting started with your cluster involves logging in, ensuring the installation and initial configuration was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-as-15-setup` script creates a cluster in Oracle-compatible mode with the `edb` sample database in the cluster. To create a cluster in Postgres mode, see [Initializing the cluster in Postgres mode](../linux_install_details/managing_an_advanced_server_installation/specifying_cluster_options/#initializing-the-cluster-in-postgres-mode). + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/edb/as15/bin/edb-as-15-setup initdb + +sudo systemctl start edb-as-15 +``` + +To work in your cluster, log in as the enterprisedb user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo su - enterprisedb + +psql edb +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, enterprisedb. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../database_administration/01_configuration_parameters/01_setting_new_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE enterprisedb IDENTIFIED BY password; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +psql (15.0.0, server 15.0.0) +You are now connected to database "hr" as user "enterprisedb". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/epas/15/installing/linux_arm64/index.mdx b/product_docs/docs/epas/15/installing/linux_arm64/index.mdx new file mode 100644 index 00000000000..52ed78c6861 --- /dev/null +++ b/product_docs/docs/epas/15/installing/linux_arm64/index.mdx @@ -0,0 +1,15 @@ +--- +title: "Installing EDB Postgres Advanced Server on Linux AArch64 (ARM64)" +navTitle: "On Linux ARM64" + +navigation: + - epas_rhel_9 +--- + +Operating system-specific install instructions are described in the corresponding documentation: + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](epas_rhel_9) + +- [Oracle Linux (OL) 9](epas_rhel_9) diff --git a/product_docs/docs/epas/16/application_programming/ecpgplus_guide/03_using_embedded_sql.mdx b/product_docs/docs/epas/16/application_programming/ecpgplus_guide/03_using_embedded_sql.mdx index 52fa3b400fa..eb6b5564d4a 100644 --- a/product_docs/docs/epas/16/application_programming/ecpgplus_guide/03_using_embedded_sql.mdx +++ b/product_docs/docs/epas/16/application_programming/ecpgplus_guide/03_using_embedded_sql.mdx @@ -264,7 +264,7 @@ int main(int argc, char *argv[]) EXEC SQL CLOSE employees; EXEC SQL DISCONNECT; } -/************************************************************ +/************************************************************/ ``` The code sample begins by including the prototypes and type definitions for the C `stdio` library and then declares the `main` function: diff --git a/product_docs/docs/epas/16/application_programming/ecpgplus_guide/04_using_descriptors.mdx b/product_docs/docs/epas/16/application_programming/ecpgplus_guide/04_using_descriptors.mdx index 82f0396e7c4..d4fee0c21d4 100644 --- a/product_docs/docs/epas/16/application_programming/ecpgplus_guide/04_using_descriptors.mdx +++ b/product_docs/docs/epas/16/application_programming/ecpgplus_guide/04_using_descriptors.mdx @@ -67,14 +67,16 @@ When invoking the application, an end user must provide the name of the database For example, a user might invoke the sample with the following command: -```c +``` ./exec_stmt edb "SELECT * FROM emp" +``` - +Sample Program: +```c /************************************************************ -/* exec_stmt.pgc -* -*/ + * exec_stmt.pgc + * + */ #include #include @@ -102,7 +104,7 @@ int main( int argc, char *argv[] ) EXEC SQL ALLOCATE DESCRIPTOR parse_desc; EXEC SQL PREPARE query FROM :stmt; EXEC SQL DESCRIBE query INTO SQL DESCRIPTOR parse_desc; - EXEC SQL GET DESCRIPTOR 'parse_desc' :col_count = COUNT; + EXEC SQL GET DESCRIPTOR parse_desc :col_count = COUNT; if( col_count == 0 ) { @@ -143,7 +145,7 @@ else varchar name[20+1]; EXEC SQL END DECLARE SECTION; - EXEC SQL GET DESCRIPTOR 'row_desc' + EXEC SQL GET DESCRIPTOR row_desc VALUE :col :val = DATA, :ind = INDICATOR, :name = NAME; @@ -228,7 +230,7 @@ for( col = 1; col <= col_count; col++ ) printf( "\n" ); } -/************************************************************ +/************************************************************/ ``` The code sample begins by including the prototypes and type definitions for the C `stdio` and `stdlib` libraries, SQL data type symbols, and the `SQLCA` (SQL communications area) structure: diff --git a/product_docs/docs/epas/16/epas_rel_notes/epas16_6_0_rel_notes.mdx b/product_docs/docs/epas/16/epas_rel_notes/epas16_6_0_rel_notes.mdx new file mode 100644 index 00000000000..150677ac410 --- /dev/null +++ b/product_docs/docs/epas/16/epas_rel_notes/epas16_6_0_rel_notes.mdx @@ -0,0 +1,19 @@ +--- +title: EDB Postgres Advanced Server 16.6.0 release notes +navTitle: "Version 16.6.0 " +--- + +Released: 21 Nov 2024 + +EDB Postgres Advanced Server 16.6.0 includes the following enhancements and bug fixes: + +| Type | Description | Addresses                | +|----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| +| Upstream merge | Merged with community PostgreSQL 16.6. This release includes a fix for [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/). See the [PostgreSQL 16.6 Release Notes](https://www.postgresql.org/docs/release/16.6/) for more information. | [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/) | +| Bug fix | Fixed an issue for Auditing. With `edb_audit`, now you can audit the initial connection process or authentication messages as well. | #39540 | +| Bug fix | Fixed an issue for `EDB*Loader`. Now the `negative bitmapset member not allowed` error is resolved for partitioned tables. | #39562 | +| Bug fix | Fixed an issue for `Oracle Proc*c`and `ecpg with PROC` . Corrected the macro definition in `sqlda-proc.h` to fix the compilation errors when used. | #40573 | +| Bug fix | Fixed an issue for replication. Now the login of locked account on the physical replica is not allowed. | | +| Bug fix | Fixed an issue for `COPY` and `EDB*Loader`. Now the server is not crashed while using dynamic partitioning in `COPY` and `EDB*Loader`. | #38963 | +| Bug fix | Fixed an issue with tab completion for the object names while using in SQL statements. | | +| Bug fix | Fixed an issue with `ecpg`. Now it supports `EXEC SQL INCLUDE` in Pro*C (`-C PROC`) mode. | #41438 | \ No newline at end of file diff --git a/product_docs/docs/epas/16/epas_rel_notes/index.mdx b/product_docs/docs/epas/16/epas_rel_notes/index.mdx index b85890598b6..32afb7c9134 100644 --- a/product_docs/docs/epas/16/epas_rel_notes/index.mdx +++ b/product_docs/docs/epas/16/epas_rel_notes/index.mdx @@ -2,6 +2,7 @@ navTitle: Release notes title: "EDB Postgres Advanced Server release notes" navigation: +- epas16_6_0_rel_notes - epas16_4_1_rel_notes - epas16_4_0_rel_notes - epas16_3_0_rel_notes @@ -14,13 +15,14 @@ EDB Postgres Advanced Server 16 is built on open-source PostgreSQL 16, which int The EDB Postgres Advanced Server documentation describes the latest version of EDB Postgres Advanced Server 16 including minor releases and patches. These release notes provide information on what was new in each release. -| Version | Release date | Upstream merges | -| ------------------------ | ------------ | ---------------------------------------------------------- | -| [16.4.1](epas16_4_1_rel_notes) | 16 Aug 2024 | | -| [16.4.0](epas16_4_0_rel_notes) | 08 Aug 2024 | [16.4](https://www.postgresql.org/docs/16/release-16-4.html) | -| [16.3.0](epas16_3_0_rel_notes) | 09 May 2024 | [16.3](https://www.postgresql.org/docs/16/release-16-3.html) | -| [16.2](epas16_2_0_rel_notes)| 08 Feb 2024 | [16.2](https://www.postgresql.org/docs/16/release-16-2.html) -| [16.1](epas16_rel_notes) | 09 Nov 2023 | [16.0](https://www.postgresql.org/docs/16/release-16.html),[16.1](https://www.postgresql.org/docs/release/16.1/) | +| Version | Release date | Upstream merges | +|--------------------------------|--------------|------------------------------------------------------------------------------------------------------------------| +| [16.6.0](epas16_6_0_rel_notes) | 21 Nov 2024 | [16.6](https://www.postgresql.org/docs/16/release-16-6.html) | +| [16.4.1](epas16_4_1_rel_notes) | 16 Aug 2024 | | +| [16.4.0](epas16_4_0_rel_notes) | 08 Aug 2024 | [16.4](https://www.postgresql.org/docs/16/release-16-4.html) | +| [16.3.0](epas16_3_0_rel_notes) | 09 May 2024 | [16.3](https://www.postgresql.org/docs/16/release-16-3.html) | +| [16.2](epas16_2_0_rel_notes) | 08 Feb 2024 | [16.2](https://www.postgresql.org/docs/16/release-16-2.html) | +| [16.1](epas16_rel_notes) | 09 Nov 2023 | [16.0](https://www.postgresql.org/docs/16/release-16.html),[16.1](https://www.postgresql.org/docs/release/16.1/) | ## Component certification diff --git a/product_docs/docs/epas/16/installing/index.mdx b/product_docs/docs/epas/16/installing/index.mdx index 254c2681d1d..bc4f033a0f6 100644 --- a/product_docs/docs/epas/16/installing/index.mdx +++ b/product_docs/docs/epas/16/installing/index.mdx @@ -14,8 +14,8 @@ redirects: navigation: - linux_x86_64 - - linux_ppc64le - linux_arm64 + - linux_ppc64le - windows - linux_install_details - windows_install_details @@ -57,10 +57,16 @@ Select a link to access the applicable installation instructions: ## Linux [AArch64 (ARM64)](linux_arm64) +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/epas_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/epas_rhel_9) + ### Debian and derivatives - [Debian 12](linux_arm64/epas_debian_12) ## Windows -- [Windows Server 2019](windows) +- [Windows Server 2022](windows) diff --git a/product_docs/docs/epas/16/installing/linux_arm64/epas_rhel_9.mdx b/product_docs/docs/epas/16/installing/linux_arm64/epas_rhel_9.mdx new file mode 100644 index 00000000000..6a5bba9ad44 --- /dev/null +++ b/product_docs/docs/epas/16/installing/linux_arm64/epas_rhel_9.mdx @@ -0,0 +1,157 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing EDB Postgres Advanced Server on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: + - /epas/16/epas_inst_linux/installing_epas_using_edb_repository/arm64/epas_rhel9_arm + - /epas/16/epas_inst_linux/installing_epas_using_edb_repository/arm/epas_rhel9_arm +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +- If you are also installing PostGIS, enable additional repositories to resolve dependencies: + + ```shell + ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-9-${ARCH}-rpms" + ``` + + !!!note + + If you are using a public cloud RHEL image, `subscription manager` may not be enabled and enabling it may incur unnecessary charges. Equivalent packages may be available under a different name such as `codeready-builder-for-rhel-9-rhui-rpms`. Consult the documentation for the RHEL image you are using to determine how to install `codeready-builder`. + + !!! + +## Install the package + +```shell +sudo dnf -y install edb-as-server +``` + +Where `` is the version of the EDB Postgres Advanced Server you're installing. For example, if you're installing version 16, the package name is `edb-as16-server`. + +To install an individual component: + +```shell +sudo dnf -y install +``` + +Where `package_name` can be any of the available packages from the [available package list](/epas/16/installing/linux_install_details/rpm_packages/). + +Installing the server package creates an operating system user named enterprisedb. The user is assigned a user ID (UID) and a group ID (GID). The user has no default password. Use the `passwd` command to assign a password for the user. The default shell for the user is `bash`, and the user's home directory is `/var/lib/edb/as16`. + +## Initial configuration + +Getting started with your cluster involves logging in, ensuring the installation and initial configuration was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-as-16-setup` script creates a cluster in Oracle-compatible mode with the `edb` sample database in the cluster. To create a cluster in Postgres mode, see [Initializing the cluster in Postgres mode](../linux_install_details/managing_an_advanced_server_installation/specifying_cluster_options/#initializing-the-cluster-in-postgres-mode). + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/edb/as16/bin/edb-as-16-setup initdb + +sudo systemctl start edb-as-16 +``` + +To work in your cluster, log in as the enterprisedb user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo su - enterprisedb + +psql edb +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, enterprisedb. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../database_administration/01_configuration_parameters/01_setting_new_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE enterprisedb IDENTIFIED BY password; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +psql (16.0.0, server 16.0.0) +You are now connected to database "hr" as user "enterprisedb". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/epas/16/installing/linux_arm64/index.mdx b/product_docs/docs/epas/16/installing/linux_arm64/index.mdx index c1e50ddb610..10c64a6a576 100644 --- a/product_docs/docs/epas/16/installing/linux_arm64/index.mdx +++ b/product_docs/docs/epas/16/installing/linux_arm64/index.mdx @@ -3,11 +3,18 @@ title: "Installing EDB Postgres Advanced Server on Linux AArch64 (ARM64)" navTitle: "On Linux ARM64" navigation: + - epas_rhel_9 - epas_debian_12 --- Operating system-specific install instructions are described in the corresponding documentation: +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](epas_rhel_9) + +- [Oracle Linux (OL) 9](epas_rhel_9) + ### Debian and derivatives - [Debian 12](epas_debian_12) diff --git a/product_docs/docs/epas/17/application_programming/02_packages/01_package_components.mdx b/product_docs/docs/epas/17/application_programming/02_packages/01_package_components.mdx new file mode 100644 index 00000000000..118cc946633 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/02_packages/01_package_components.mdx @@ -0,0 +1,385 @@ +--- +title: "Package components" +description: "Describes the two main components of packages" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-built-in-package-guide/9.6/Database_Compatibility_for_Oracle_Developers_Built-in_Package_Guide.1.06.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.184.html" +redirects: + - /epas/latest/epas_compat_bip_guide/02_packages/01_package_components/ #generated for docs/epas/reorg-role-use-case-mode +--- + +Packages consist of two main components: + +- The *package specification*, which is the public interface. You can reference these elements outside the package. Declare all database objects that are a part of a package in the specification. +- The *package body*, which contains the actual implementation of all the database objects declared in the package specification. + + The package body implements the specifications in the package specification. It contains implementation details and private declarations that are invisible to the application. You can debug, enhance, or replace a package body without changing the specifications. Similarly, you can change the body without recompiling the calling programs because the implementation details are invisible to the application. + +## Package specification syntax + +The package specification defines the user interface for a package (the API). The specification lists the functions, procedures, types, exceptions, and cursors that are visible to a user of the package. + +The syntax used to define the interface for a package is: + +```sql +CREATE [ OR REPLACE ] PACKAGE + [ ] + { IS | AS } + [ ; ] ... + [ ] ... +END [ ] ; +``` + +Where `authorization_clause` := + +```sql +{ AUTHID DEFINER } | { AUTHID CURRENT_USER } +``` + +Where `procedure_or_function_declaration` := + +```text +procedure_declaration | function_declaration +``` + +Where `procedure_declaration` := + +```sql +PROCEDURE proc_name [ argument_list ]; +[ restriction_pragma; ] +``` + +Where `function_declaration` := + +```sql +FUNCTION func_name [ argument_list ] + RETURN rettype [ DETERMINISTIC ]; +[ restriction_pragma; ] +``` + +Where `argument_list` := + +```text +( argument_declaration [, ...] ) +``` + +Where `argument_declaration` := + +```text +argname [ IN | IN OUT | OUT ] argtype [ DEFAULT value ] +``` + +Where `restriction_pragma` := + +```sql +PRAGMA RESTRICT_REFERENCES(name, restrictions) +``` + +Where `restrictions` := + +```text +restriction [, ... ] +``` + +### Parameters + +`package_name` + + `package_name` is an identifier assigned to the package. Each package must have a unique name in the schema. + +`AUTHID DEFINER` + + If you omit the `AUTHID` clause or specify `AUTHID DEFINER`, the privileges of the package owner are used to determine access privileges to database objects. + +`AUTHID CURRENT_USER` + + If you specify `AUTHID CURRENT_USER`, the privileges of the current user executing a program in the package are used to determine access privileges. + +`declaration` + + `declaration` is an identifier of a public variable. You can access a public variable from outside the package using the syntax `package_name.variable`. There can be zero, one, or more public variables. Public variable definitions must come before procedure or function declarations. + + `declaration` can be any of the following: + +- Variable declaration +- Record declaration +- Collection declaration +- `REF CURSOR` and cursor variable declaration +- `TYPE` definitions for records, ollections, and `REF CURSOR` +- Exception +- Object variable declaration + +`proc_name` + + The name of a public procedure. + +`argname` + + The name of an argument. The argument is referenced by this name in the function or procedure body. + +`IN` | `IN OUT` | `OUT` + + The argument mode. `IN` (the default) declares the argument for input only. `IN OUT` allows the argument to receive a value as well as return a value. `OUT` specifies the argument is for output only. + +`argtype` + + The data types of an argument. An argument type can be a base data type, a copy of the type of an existing column using `%TYPE`, or a user-defined type such as a nested table or an object type. Don't specify a length for any base type. For example, specify `VARCHAR2`, not `VARCHAR2(10)`. + + Reference the type of a column by writing `tablename.columnname %TYPE`. Using this nomenclature can sometimes help make a procedure independent from changes to the definition of a table. + +`DEFAULT value` + + The `DEFAULT` clause supplies a default value for an input argument if you don't supply one in the invocation. You can't specify `DEFAULT` for arguments with modes `IN OUT` or `OUT`. + +`func_name` + + The name of a public function. + +`rettype` + + The return data type. + +`DETERMINISTIC` + + `DETERMINISTIC` is a synonym for `IMMUTABLE`. A `DETERMINISTIC` function can't modify the database and always reaches the same result when given the same argument values. It doesn't do database lookups or otherwise use information not directly present in its argument list. If you include this clause, any call of the function with all-constant arguments can be immediately replaced with the function value. + +`restriction` + + The following keywords are accepted for compatibility and ignored: + + `RNDS` + + `RNPS` + + `TRUST` + + `WNDS` + + `WNPS` + +## Package body syntax + +Package implementation details reside in the package body. The package body can contain objects that aren't visible to the package user. EDB Postgres Advanced Server supports the following syntax for the package body: + +```sql +CREATE [ OR REPLACE ] PACKAGE BODY + { IS | AS } + [ ; ] ... + [ ] ... + [ ] +END [ ] ; +``` + +Where `procedure_or_function_definition` := + +```text +procedure_definition | function_definition +``` + +Where `procedure_definition` := + +```sql +PROCEDURE proc_name[ argument_list ] + [ options_list ] + { IS | AS } + procedure_body + END [ proc_name ] ; +``` + +Where `procedure_body` := + +```sql +[ PRAGMA AUTONOMOUS_TRANSACTION; ] +[ declaration; ] [, ...] +BEGIN + statement; [...] +[ EXCEPTION + { WHEN exception [OR exception] [...]] THEN statement; } + [...] +] +``` + +Where `function_definition` := + +```sql +FUNCTION func_name [ argument_list ] + RETURN rettype [ DETERMINISTIC ] + [ options_list ] + { IS | AS } + function_body + END [ func_name ] ; +``` + +Where `function_body` := + +```sql +[ PRAGMA AUTONOMOUS_TRANSACTION; ] +[ declaration; ] [, ...] +BEGIN + statement; [...] +[ EXCEPTION + { WHEN exception [ OR exception ] [...] THEN statement; } + [...] +] +``` + +Where `argument_list` := + +```text +( argument_declaration [, ...] ) +``` + +Where `argument_declaration` := + +```text +argname [ IN | IN OUT | OUT ] argtype [ DEFAULT value ] +``` + +Where `options_list` := + +```text +option [ ... ] +``` + +Where `option` := + +```sql +STRICT +LEAKPROOF +PARALLEL { UNSAFE | RESTRICTED | SAFE } +COST execution_cost +ROWS result_rows +SET config_param { TO value | = value | FROM CURRENT } +``` + +Where `package_initializer` := + +```sql +BEGIN + statement; [...] +END; +``` + +### Parameters + +`package_name` + + `package_name` is the name of the package for which this is the package body. An package specification with this name must already exist. + +`private_declaration` + + `private_declaration` is an identifier of a private variable that any procedure or function can access in the package. There can be zero, one, or more private variables. `private_declaration` can be any of the following: + +- Variable declaration +- Record declaration +- Collection declaration +- `REF CURSOR` and cursor variable declaration +- `TYPE` definitions for records, collections, and `REF CURSORs` +- Exception +- Object variable declaration + +`proc_name` + + The name of the procedure being created. + +`PRAGMA AUTONOMOUS_TRANSACTION` + + `PRAGMA AUTONOMOUS_TRANSACTION` is the directive that sets the procedure as an autonomous transaction. + +`declaration` + + A variable, type, `REF CURSOR`, or subprogram declaration. If you include subprogram declarations, declare them after all other variable, type, and `REF CURSOR` declarations. + +`statement` + + An SPL program statement. A `DECLARE - BEGIN - END` block is considered an SPL statement unto itself. Thus, the function body can contain nested blocks. + +`exception` + + An exception condition name such as `NO_DATA_FOUND, OTHERS`. + +`func_name` + + The name of the function being created. + +`rettype` + + The return data type, which can be any of the types listed for `argtype`. As for `argtype`, don't specify a length for `rettype`. + +`DETERMINISTIC` + + Include `DETERMINISTIC` to specify for the function to always return the same result when given the same argument values. A `DETERMINISTIC` function must not modify the database. + + !!! Note + The `DETERMINISTIC` keyword is equivalent to the PostgreSQL `IMMUTABLE` option. + + !!!Note + If `DETERMINISTIC` is specified for a public function in the package body, you must also specify it for the function declaration in the package specification. For private functions, there's no function declaration in the package specification. + +`PRAGMA AUTONOMOUS_TRANSACTION` + + `PRAGMA AUTONOMOUS_TRANSACTION` is the directive that sets the function as an autonomous transaction. + +`argname` + + The name of a formal argument. The argument is referenced by this name in the procedure body. + +`IN` | `IN OUT` | `OUT` + + The argument mode. `IN` (the default) declares the argument for input only. `IN OUT` allows the argument to receive a value as well as return a value. `OUT` specifies the argument is for output only. + +`argtype` + + The data types of an argument. An argument type can be a base data type, a copy of the type of an existing column using `%TYPE`, or a user-defined type such as a nested table or an object type. Don't specify a length for any base type. For example, specify `VARCHAR2`, not `VARCHAR2(10)`. + + Reference the type of a column by writing `tablename.columnname%TYPE`. Using this nomenclature can sometimes help make a procedure independent from changes to the definition of a table. + +`DEFAULT value` + + The `DEFAULT` clause supplies a default value for an input argument if you don't supply one in the procedure call. Don't specify `DEFAULT` for arguments with modes `IN OUT` or `OUT`. + +The following options aren't compatible with Oracle databases. They're extensions to Oracle package syntax provided only by EDB Postgres Advanced Server. + +`STRICT` + + The `STRICT` keyword specifies for the function not to execute if called with a `NULL` argument. Instead the function returns `NULL`. + +`LEAKPROOF` + + The `LEAKPROOF` keyword specifies for the function not to reveal any information about arguments other than through a return value. + +`PARALLEL { UNSAFE | RESTRICTED | SAFE }` + + The `PARALLEL` clause enables the use of parallel sequential scans (parallel mode). A parallel sequential scan uses multiple workers to scan a relation in parallel during a query in contrast to a serial sequential scan. + + When set to `UNSAFE`, the procedure or function can't be executed in parallel mode. The presence of such a procedure or function forces a serial execution plan. This is the default setting if you omit the `PARALLEL` clause. + + When set to `RESTRICTED`, the procedure or function can be executed in parallel mode, but the execution is restricted to the parallel group leader. If the qualification for any particular relation has anything that is parallel restricted, that relation won't be chosen for parallelism. + + When set to `SAFE`, the procedure or function can be executed in parallel mode without restriction. + +`execution_cost` + + `execution_cost` specifies a positive number giving the estimated execution cost for the function, in units of `cpu_operator_cost`. If the function returns a set, this is the cost per returned row. The default is `0.0025`. + +`result_rows` + + `result_rows` is the estimated number of rows for the query planner to expect the function to return. The default is `1000`. + +`SET` + + Use the `SET` clause to specify a parameter value for the duration of the function: + + `config_param` specifies the parameter name. + + `value` specifies the parameter value. + + `FROM CURRENT` guarantees that the parameter value is restored when the function ends. + +`package_initializer` + + The statements in the `package_initializer` are executed once per user session when the package is first referenced. + +!!! Note + The `STRICT`, `LEAKPROOF`, `PARALLEL`, `COST`, `ROWS` and `SET` keywords provide extended functionality for EDB Postgres Advanced Server. Oracle doesn't support them. diff --git a/product_docs/docs/epas/17/application_programming/02_packages/01a_display_packages.mdx b/product_docs/docs/epas/17/application_programming/02_packages/01a_display_packages.mdx new file mode 100644 index 00000000000..4395b146967 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/02_packages/01a_display_packages.mdx @@ -0,0 +1,141 @@ +--- +title: "Viewing packages and package body definition" +description: "Describes how to view the package specification and package body definition" +redirects: + - /epas/latest/epas_compat_bip_guide/02_packages/01a_display_packages/ #generated for docs/epas/reorg-role-use-case-mode +--- + +You can view the package specification and package body definition using the psql meta-commands `\sps` and `\spb`, respectively. + +## Synopsis + +```sql +\sps[+] []. +\spb[+] []. +``` + +## Creating and viewing a package and a package body + +Create a package and a package body `test_pkg` in the `public` schema: + +```sql +edb=# CREATE OR REPLACE PACKAGE public.test_pkg IS +edb$# emp_name character varying(10); +edb$# PROCEDURE get_name(IN p_empno numeric); +edb$# FUNCTION display_counter() RETURN integer; +edb$# END; +CREATE PACKAGE +edb=# +edb=# CREATE OR REPLACE PACKAGE BODY public.test_pkg IS +edb$# v_counter integer; +edb$# +edb$# PROCEDURE get_name(IN p_empno numeric) IS +edb$# BEGIN +edb$# SELECT ename INTO emp_name FROM emp WHERE empno = p_empno; +edb$# v_counter := v_counter + 1; +edb$# END; +edb$# +edb$# FUNCTION display_counter() RETURN integer IS +edb$# BEGIN +edb$# RETURN v_counter; +edb$# END; +edb$# BEGIN +edb$# v_counter := 0; +edb$# DBMS_OUTPUT.PUT_LINE('Initialized counter'); +edb$# END; +CREATE PACKAGE BODY +edb=# +``` + +Use `\sps` and `\spb` commands to view the definition of package and package body: + +``` sql +edb=# \sps test_pkg +CREATE OR REPLACE PACKAGE public.test_pkg IS +emp_name character varying(10); +PROCEDURE get_name(IN p_empno numeric); +FUNCTION display_counter() RETURN integer; +END +edb=# +edb=# \sps+ test_pkg +1 CREATE OR REPLACE PACKAGE public.test_pkg IS +2 emp_name character varying(10); +3 PROCEDURE get_name(INOUT p_empno numeric); +4 FUNCTION display_counter(OUT p1 numeric, OUT p2 numeric) RETURN integer; +5 END + +edb=# \sps public.test_pkg +CREATE OR REPLACE PACKAGE public.test_pkg IS +emp_name character varying(10); +PROCEDURE get_name(INOUT p_empno numeric); +FUNCTION display_counter(OUT p1 numeric, OUT p2 numeric) RETURN integer; +END + +edb=# \sps+ public.test_pkg +1 CREATE OR REPLACE PACKAGE public.test_pkg IS +2 emp_name character varying(10); +3 PROCEDURE get_name(INOUT p_empno numeric); +4 FUNCTION display_counter(OUT p1 numeric, OUT p2 numeric) RETURN integer; +5 END + +edb=# \spb test_pkg +CREATE OR REPLACE PACKAGE BODY public.test_pkg IS +v_counter integer; + +PROCEDURE get_name(IN p_empno numeric) IS +BEGIN +SELECT ename INTO emp_name FROM emp WHERE empno = p_empno; +v_counter := v_counter + 1; +END; + +FUNCTION display_counter() RETURN integer IS +BEGIN +RETURN v_counter; +END; +BEGIN +v_counter := 0; +DBMS_OUTPUT.PUT_LINE('Initialized counter'); +END +edb=# +``` + +## Viewing function and procedure definitions + +You can also view the definition of individual functions and procedures using the `\sf` command. + +Create the function and procedure: + +```sql +edb=# CREATE OR REPLACE FUNCTION public.func1() +edb-# RETURNS integer +edb-# LANGUAGE edbspl +edb-# SECURITY DEFINER +edb-# AS $function$ begin return 10; end$function$; +CREATE FUNCTION +edb=# +edb=# CREATE OR REPLACE PROCEDURE public.proc1() +edb-# SECURITY DEFINER +edb-# AS $procedure$ begin null; end$procedure$ +edb-# LANGUAGE edbspl; +CREATE PROCEDURE +edb=# +``` + +Use the `\sf ` command to view the definition: + +```sql +edb=# \sf func1 +CREATE OR REPLACE FUNCTION public.func1() + RETURNS integer + LANGUAGE edbspl + SECURITY DEFINER +AS $function$ begin return 10; end$function$ +edb=# +edb=# \sf proc1 +CREATE OR REPLACE PROCEDURE public.proc1() + SECURITY DEFINER +AS $procedure$ begin null; end$procedure$ + LANGUAGE edbspl +edb=# +``` + diff --git a/product_docs/docs/epas/17/application_programming/02_packages/02_creating_packages.mdx b/product_docs/docs/epas/17/application_programming/02_packages/02_creating_packages.mdx new file mode 100644 index 00000000000..8505522b14e --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/02_packages/02_creating_packages.mdx @@ -0,0 +1,139 @@ +--- +title: "Creating packages" +description: "Describes how to create the package specification" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-built-in-package-guide/9.6/Database_Compatibility_for_Oracle_Developers_Built-in_Package_Guide.1.07.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.185.html" +redirects: + - /epas/latest/epas_compat_bip_guide/02_packages/02_creating_packages/ #generated for docs/epas/reorg-role-use-case-mode +--- + +A package isn't an executable piece of code but a repository of code. When you use a package, you execute or make reference to an element within a package. + +## Creating the package specification + +The package specification contains the definition of all the elements in the package that you can reference from outside of the package. These are called the *public elements* of the package, and they act as the package interface. The following code sample is a package specification: + +```sql +-- +-- Package specification for the 'emp_admin' package. +-- +CREATE OR REPLACE PACKAGE emp_admin +IS + FUNCTION get_dept_name ( + p_deptno NUMBER DEFAULT 10 + ) + RETURN VARCHAR2; + FUNCTION update_emp_sal ( + p_empno NUMBER, + p_raise NUMBER + ) + RETURN NUMBER; + PROCEDURE hire_emp ( + p_empno NUMBER, + p_ename VARCHAR2, + p_job VARCHAR2, + p_sal NUMBER, + p_hiredate DATE DEFAULT sysdate, + p_comm NUMBER DEFAULT 0, + p_mgr NUMBER, + p_deptno NUMBER DEFAULT 10 + ); + PROCEDURE fire_emp ( + p_empno NUMBER + ); +END emp_admin; +``` + +This code sample creates the `emp_admin` package specification. This package specification consists of two functions and two stored procedures. You can also add the `OR REPLACE` clause to the `CREATE PACKAGE` statement for convenience. + +## Creating the package body + +The body of the package contains the actual implementation behind the package specification. For the `emp_admin` package specification in the example, this code now create a package body that implements the specifications. The body contains the implementation of the functions and stored procedures in the specification. + +```sql +-- +-- Package body for the 'emp_admin' package. +-- +CREATE OR REPLACE PACKAGE BODY emp_admin +IS + -- + -- Function that queries the 'dept' table based on the department + -- number and returns the corresponding department name. + -- + FUNCTION get_dept_name ( + p_deptno IN NUMBER DEFAULT 10 + ) + RETURN VARCHAR2 + IS + v_dname VARCHAR2(14); + BEGIN + SELECT dname INTO v_dname FROM dept WHERE deptno = p_deptno; + RETURN v_dname; + EXCEPTION + WHEN NO_DATA_FOUND THEN + DBMS_OUTPUT.PUT_LINE('Invalid department number ' || p_deptno); + RETURN ''; + END; + -- + -- Function that updates an employee's salary based on the + -- employee number and salary increment/decrement passed + -- as IN parameters. Upon successful completion the function + -- returns the new updated salary. + -- + FUNCTION update_emp_sal ( + p_empno IN NUMBER, + p_raise IN NUMBER + ) + RETURN NUMBER + IS + v_sal NUMBER := 0; + BEGIN + SELECT sal INTO v_sal FROM emp WHERE empno = p_empno; + v_sal := v_sal + p_raise; + UPDATE emp SET sal = v_sal WHERE empno = p_empno; + RETURN v_sal; + EXCEPTION + WHEN NO_DATA_FOUND THEN + DBMS_OUTPUT.PUT_LINE('Employee ' || p_empno || ' not found'); + RETURN -1; + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('The following is SQLERRM:'); + DBMS_OUTPUT.PUT_LINE(SQLERRM); + DBMS_OUTPUT.PUT_LINE('The following is SQLCODE:'); + DBMS_OUTPUT.PUT_LINE(SQLCODE); + RETURN -1; + END; + -- + -- Procedure that inserts a new employee record into the 'emp' table. + -- + PROCEDURE hire_emp ( + p_empno NUMBER, + p_ename VARCHAR2, + p_job VARCHAR2, + p_sal NUMBER, + p_hiredate DATE DEFAULT sysdate, + p_comm NUMBER DEFAULT 0, + p_mgr NUMBER, + p_deptno NUMBER DEFAULT 10 + ) + AS + BEGIN + INSERT INTO emp(empno, ename, job, sal, hiredate, comm, mgr, deptno) + VALUES(p_empno, p_ename, p_job, p_sal, + p_hiredate, p_comm, p_mgr, p_deptno); + END; + -- + -- Procedure that deletes an employee record from the 'emp' table based + -- on the employee number. + -- + PROCEDURE fire_emp ( + p_empno NUMBER + ) + AS + BEGIN + DELETE FROM emp WHERE empno = p_empno; + END; +END; +``` diff --git a/product_docs/docs/epas/17/application_programming/02_packages/03_referencing_a_package.mdx b/product_docs/docs/epas/17/application_programming/02_packages/03_referencing_a_package.mdx new file mode 100644 index 00000000000..30881b98fb5 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/02_packages/03_referencing_a_package.mdx @@ -0,0 +1,26 @@ +--- +title: "Referencing a package" +description: "Describes how to reference the types, items, and subprograms that are declared in a package specification" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-built-in-package-guide/9.6/Database_Compatibility_for_Oracle_Developers_Built-in_Package_Guide.1.08.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.186.html" +redirects: + - /epas/latest/epas_compat_bip_guide/02_packages/03_referencing_a_package/ #generated for docs/epas/reorg-role-use-case-mode +--- + +To reference the types, items, and subprograms that are declared in a package specification, use the dot notation. For example: + +`package_name.type_name` + +`package_name.item_name` + +`package_name.subprogram_name` + +To invoke a function from the `emp_admin` package specification, execute the following SQL command: + +```sql +SELECT emp_admin.get_dept_name(10) FROM DUAL; +``` + +This example invokes the `get_dept_name` function declared in the package `emp_admin`. It passes the department number as an argument to the function, which returns the name of the department. The value returned is `ACCOUNTING`, which corresponds to department number `10`. diff --git a/product_docs/docs/epas/17/application_programming/02_packages/04_using_packages_with_user_defined_types.mdx b/product_docs/docs/epas/17/application_programming/02_packages/04_using_packages_with_user_defined_types.mdx new file mode 100644 index 00000000000..6312a2077e5 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/02_packages/04_using_packages_with_user_defined_types.mdx @@ -0,0 +1,192 @@ +--- +title: "Using packages with user-defined types" +description: "Provides an example that incorporates various user-defined types in the context of a package" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-built-in-package-guide/9.6/Database_Compatibility_for_Oracle_Developers_Built-in_Package_Guide.1.09.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.187.html" +redirects: + - /epas/latest/epas_compat_bip_guide/02_packages/04_using_packages_with_user_defined_types/ #generated for docs/epas/reorg-role-use-case-mode +--- + +This example incorporates various user-defined types in the context of a package. + +## Package specification + +The package specification of `emp_rpt` shows the declaration of a record type `emprec_typ` and a weakly typed `REF CURSOR, emp_refcur` as publicly accessible. It also shows two functions and two procedures. The function, `open_emp_by_dept`, returns the `REF CURSOR` type `EMP_REFCUR`. Procedures `fetch_emp` and `close_refcur` both declare a weakly typed `REF CURSOR` as a formal parameter. + +```sql +CREATE OR REPLACE PACKAGE emp_rpt +IS + TYPE emprec_typ IS RECORD ( + empno NUMBER(4), + ename VARCHAR(10) + ); + TYPE emp_refcur IS REF CURSOR; + + FUNCTION get_dept_name ( + p_deptno IN NUMBER + ) RETURN VARCHAR2; + FUNCTION open_emp_by_dept ( + p_deptno IN emp.deptno%TYPE + ) RETURN EMP_REFCUR; + PROCEDURE fetch_emp ( + p_refcur IN OUT SYS_REFCURSOR + ); + PROCEDURE close_refcur ( + p_refcur IN OUT SYS_REFCURSOR + ); +END emp_rpt; +``` + +## Package body + +The package body shows the declaration of several private variables: a static cursor `dept_cur`, a table type `depttab_typ`, a table variable `t_dept`, an integer variable `t_dept_max`, and a record variable `r_emp`. + +```sql +CREATE OR REPLACE PACKAGE BODY emp_rpt +IS + CURSOR dept_cur IS SELECT * FROM dept; + TYPE depttab_typ IS TABLE of dept%ROWTYPE + INDEX BY BINARY_INTEGER; + t_dept DEPTTAB_TYP; + t_dept_max INTEGER := 1; + r_emp EMPREC_TYP; + + FUNCTION get_dept_name ( + p_deptno IN NUMBER + ) RETURN VARCHAR2 + IS + BEGIN + FOR i IN 1..t_dept_max LOOP + IF p_deptno = t_dept(i).deptno THEN + RETURN t_dept(i).dname; + END IF; + END LOOP; + RETURN 'Unknown'; + END; + + FUNCTION open_emp_by_dept( + p_deptno IN emp.deptno%TYPE + ) RETURN EMP_REFCUR + IS + emp_by_dept EMP_REFCUR; + BEGIN + OPEN emp_by_dept FOR SELECT empno, ename FROM emp + WHERE deptno = p_deptno; + RETURN emp_by_dept; + END; + + PROCEDURE fetch_emp ( + p_refcur IN OUT SYS_REFCURSOR + ) + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH p_refcur INTO r_emp; + EXIT WHEN p_refcur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(r_emp.empno || ' ' || r_emp.ename); + END LOOP; + END; + + PROCEDURE close_refcur ( + p_refcur IN OUT SYS_REFCURSOR + ) + IS + BEGIN + CLOSE p_refcur; + END; +BEGIN + OPEN dept_cur; + LOOP + FETCH dept_cur INTO t_dept(t_dept_max); + EXIT WHEN dept_cur%NOTFOUND; + t_dept_max := t_dept_max + 1; + END LOOP; + CLOSE dept_cur; + t_dept_max := t_dept_max - 1; +END emp_rpt; +``` + +This package contains an initialization section that loads the private table variable `t_dept` using the private static cursor `dept_cur.t_dept`. `dept_cur.t_dept` serves as a department name lookup table in the function `get_dept_name`. + +The function `open_emp_by_dept` returns a `REF CURSOR` variable for a result set of employee numbers and names for a given department. This `REF CURSOR` variable can then be passed to the procedure `fetch_emp` to retrieve and list the individual rows of the result set. Finally, the procedure `close_refcur` can be used to close the `REF CURSOR` variable associated with this result set. + +## Using anonymous blocks +The following anonymous block runs the package function and procedures. In the anonymous block's declaration section, note the declaration of cursor variable `v_emp_cur` using the package’s public `REF CURSOR` type, `EMP_REFCUR. v_emp_cur` contains the pointer to the result set that's passed between the package function and procedures. + +```sql +DECLARE + v_deptno dept.deptno%TYPE DEFAULT 30; + v_emp_cur emp_rpt.EMP_REFCUR; +BEGIN + v_emp_cur := emp_rpt.open_emp_by_dept(v_deptno); + DBMS_OUTPUT.PUT_LINE('EMPLOYEES IN DEPT #' || v_deptno || + ': ' || emp_rpt.get_dept_name(v_deptno)); + emp_rpt.fetch_emp(v_emp_cur); + DBMS_OUTPUT.PUT_LINE('**********************'); + DBMS_OUTPUT.PUT_LINE(v_emp_cur%ROWCOUNT || ' rows were retrieved'); + emp_rpt.close_refcur(v_emp_cur); +END; +``` + +The following is the result of this anonymous block: + +```sql +__OUTPUT__ +EMPLOYEES IN DEPT #30: SALES +EMPNO ENAME +----- ------- +7499 ALLEN +7521 WARD +7654 MARTIN +7698 BLAKE +7844 TURNER +7900 JAMES +********************** +6 rows were retrieved +``` + +The following anonymous block shows another way to achieve the same result. Instead of using the package procedures `fetch_emp` and `close_refcur`, the logic of these programs is coded directly into the anonymous block. In the anonymous block’s declaration section, note the addition of record variable `r_emp`, declared using the package’s public record type, `EMPREC_TYP`. + +```sql +DECLARE + v_deptno dept.deptno%TYPE DEFAULT 30; + v_emp_cur emp_rpt.EMP_REFCUR; + r_emp emp_rpt.EMPREC_TYP; +BEGIN + v_emp_cur := emp_rpt.open_emp_by_dept(v_deptno); + DBMS_OUTPUT.PUT_LINE('EMPLOYEES IN DEPT #' || v_deptno || + ': ' || emp_rpt.get_dept_name(v_deptno)); + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH v_emp_cur INTO r_emp; + EXIT WHEN v_emp_cur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(r_emp.empno || ' ' || + r_emp.ename); + END LOOP; + DBMS_OUTPUT.PUT_LINE('**********************'); + DBMS_OUTPUT.PUT_LINE(v_emp_cur%ROWCOUNT || ' rows were retrieved'); + CLOSE v_emp_cur; +END; +``` + +The following is the result of this anonymous block. + +```sql +__OUTPUT__ +EMPLOYEES IN DEPT #30: SALES +EMPNO ENAME +----- ------- +7499 ALLEN +7521 WARD +7654 MARTIN +7698 BLAKE +7844 TURNER +7900 JAMES +********************** +6 rows were retrieved +``` diff --git a/product_docs/docs/epas/17/application_programming/02_packages/05_dropping_a_package.mdx b/product_docs/docs/epas/17/application_programming/02_packages/05_dropping_a_package.mdx new file mode 100644 index 00000000000..e5e1ba93397 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/02_packages/05_dropping_a_package.mdx @@ -0,0 +1,30 @@ +--- +title: "Dropping a package" +description: "Defines the syntax for dropping a package" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-built-in-package-guide/9.6/Database_Compatibility_for_Oracle_Developers_Built-in_Package_Guide.1.10.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.188.html" +redirects: + - /epas/latest/epas_compat_bip_guide/02_packages/05_dropping_a_package/ #generated for docs/epas/reorg-role-use-case-mode +--- + +The syntax for deleting an entire package or the package body is: + +```sql +DROP PACKAGE [ BODY ] package_name; +``` + +If you omit the keyword `BODY`, both the package specification and the package body are deleted, that is, the entire package is dropped. If you specify the keyword `BODY`, then only the package body is dropped. The package specification remains intact. `package_name` is the identifier of the package to drop. + +The following statement destroys only the package body of `emp_admin`: + +```sql +DROP PACKAGE BODY emp_admin; +``` + +The following statement drops the entire `emp_admin` package: + +```sql +DROP PACKAGE emp_admin; +``` diff --git a/product_docs/docs/epas/17/application_programming/02_packages/index.mdx b/product_docs/docs/epas/17/application_programming/02_packages/index.mdx new file mode 100644 index 00000000000..b8e8764ed36 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/02_packages/index.mdx @@ -0,0 +1,24 @@ +--- +navTitle: Working with packages +title: "Working with packages" +indexCards: simple +description: "How to use packages to encapsulate logically related types, items, and subprograms" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-built-in-package-guide/9.6/Database_Compatibility_for_Oracle_Developers_Built-in_Package_Guide.1.05.html" +redirects: + - /epas/latest/epas_compat_bip_guide/02_packages/ #generated for docs/epas/reorg-role-use-case-mode +--- + +A *package* is a named collection of functions, procedures, variables, cursors, user-defined record types, and records that are referenced using a common qualifier: the package identifier. Packages have the following characteristics: + +- They provide a convenient means of organizing the functions and procedures that perform a related purpose. Permission to use the package functions and procedures depends on one privilege granted to the entire package. You must reference all of the package programs with a common name. +- You can declare certain functions, procedures, variables, types, and so on in the package as *public*. Public entities are visible and other programs that are given `EXECUTE` privilege on the package can reference them. For public functions and procedures, only their signatures are visible: the program names, parameters, if any, and return types of functions. The SPL code of these functions and procedures isn't accessible to others. Therefore applications that use a package depend on only the information available in the signature and not in the procedural logic itself. +- You can declare other functions, procedures, variables, types, and so on in the package as *private*. Private entities can be referenced and used by function and procedures in the package but not by other external applications. Private entities are for use only by programs in the package. +- Function and procedure names can be overloaded in a package. You can define one or more functions/procedures with the same name but with different signatures. This capability enables you to create identically named programs that perform the same job but on different types of input. + +
+ +package_components creating_packages referencing_a_package using_packages_with_user_defined_types dropping_a_package + +
diff --git a/product_docs/docs/epas/17/application_programming/12_debugger/configuring_debugger.mdx b/product_docs/docs/epas/17/application_programming/12_debugger/configuring_debugger.mdx new file mode 100644 index 00000000000..0096dae2dde --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/12_debugger/configuring_debugger.mdx @@ -0,0 +1,38 @@ +--- +title: "Configuring the debugger" +description: "Describes how to configure the debugger program prior to use" +--- + +The debugger is integrated with pgAdmin 4 and EDB Postgres Enterprise Manager. If you installed EDB Postgres Advanced Server on a Windows host, pgAdmin 4 is automatically installed. The pgAdmin 4 icon is in the Windows Start menu. + +You can use the debugger in two basic ways to test programs: + +- **Standalone debugging** — Use the debugger to start the program to test. Supply any input parameter values required by the program. You can immediately observe and step through the code of the program. Standalone debugging is the typical method used for new programs and for initial problem investigation. +- **In-context debugging** — In-context debugging is useful if it's difficult to reproduce a problem using standalone debugging due to complex interaction with the calling application. Using this approach, the program to test is started by an application other than the debugger. You set a *global breakpoint* on the program to test. The application that makes the first call to the program encounters the global breakpoint. Then the application suspends execution. At that point, the debugger takes control of the called program. You can then observe and step through the code of the called program as it runs in the context of the calling application. + + After you have completely stepped through the code of the called program in the debugger, the suspended application resumes executing. + +The debugging tools and operations are the same whether using standalone or in-context debugging. The difference is in how to invoke the program being debugged. + +If your EDB Postgres Advanced Server host is on a CentOS or Linux system, you can use `yum` to install pgAdmin4. Open a command line, assume superuser privileges, and enter: + +```shell +yum install edb-pgadmin4* +``` + +On Linux, you must also install the `edb-as-server-pldebugger` RPM package, where `` is the EDB Postgres Advanced Server version number. Information about pgAdmin 4 is available at the [pgAdmin website](https://www.pgadmin.org/). + +The RPM installation adds the pgAdmin4 icon to your Applications menu. + +Before using the debugger, edit the `postgresql.conf` file (located in the `data` subdirectory of your EDB Postgres Advanced Server home directory). Add `$libdir/plugin_debugger` to the libraries listed in the `shared_preload_libraries` configuration parameter: + +```ini +shared_preload_libraries = '$libdir/dbms_pipe,$libdir/edb_gen,$libdir/plugin_debugger' +``` + +- On Linux, the `postgresql.conf` file is located in: `/var/lib/edb/as/data` +- On Windows, the `postgresql.conf` file is located in: `C:\Program Files\edb\as\data` + +Where `` is the version of EDB Postgres Advanced Server. + +After modifying the `shared_preload_libraries` parameter, restart the database server. diff --git a/product_docs/docs/epas/17/application_programming/12_debugger/debugger_interface.mdx b/product_docs/docs/epas/17/application_programming/12_debugger/debugger_interface.mdx new file mode 100644 index 00000000000..6ee1511d447 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/12_debugger/debugger_interface.mdx @@ -0,0 +1,52 @@ +--- +title: "Debugger interface overview" +description: "Provides an overview of the main window of the debugger program" +--- + +The main debugger window contains two panels: + +- The top Program Body panel displays the program source code. +- The bottom Tabs panel provides a set of tabs for different information. + +Use the tool bar icons located at the top panel to access debugging functions. + +## The Program Body panel + +The Program Body panel displays the source code of the program that's being debugged. The figure shows that the debugger is about to execute the `SELECT` statement. The blue indicator in the program body highlights the next statement to execute. + +![The Program Body](../../images/program_body.png) + +## The Tabs panel + +You can use the bottom Tabs panel to view or modify parameter values or local variables or to view messages generated by `RAISE INFO` and function results. + +The following is the information displayed by the tabs in the panel: + +- The **Parameters** tab displays the current parameter values. +- The **Local variables** tab displays the value of any variables declared in the program. +- The **Messages** tab displays any results returned by the program as it executes. +- The **Results** tab displays any program results, such as the value from the `RETURN` statement of a function. +- The **Stack** tab displays the call stack. + +## The Stack tab + + + +The **Stack** tab displays a list of programs that are currently on the call stack, that is, programs that were invoked but that haven't yet completed. When a program is called, the name of the program is added to the top of the list displayed in the **Stack** tab. When the program ends, its name is removed from the list. + +The **Stack** tab also displays information about program calls. The information includes: + +- The location of the call in the program +- The call arguments +- The name of the program being called + +Reviewing the call stack can help you trace the course of execution through a series of nested programs. +The figure shows that `emp_query_caller` is about to call a subprogram named `emp_query`. `emp_query_caller` is currently at the top of the call stack. + +![A debugged program calling a subprogram](../../images/stack_tab.png) + +After the call to `emp_query` executes, `emp_query` is displayed at the top of the **Stack** tab, and its code is displayed in the Program Body panel. + +![Debugging the called subprogram](../../images/stack_tab.png) + +After completing execution of the subprogram, control returns to the calling program (`emp_query_caller`), now displayed at the top of the **Stack** tab. diff --git a/product_docs/docs/epas/17/application_programming/12_debugger/debugging_a_program.mdx b/product_docs/docs/epas/17/application_programming/12_debugger/debugging_a_program.mdx new file mode 100644 index 00000000000..4f14bd0f824 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/12_debugger/debugging_a_program.mdx @@ -0,0 +1,115 @@ +--- +title: "Running the debugger" +description: "Describes the operations you can perform to debug a program" +--- + +You can perform the following operations to debug a program: + +- Step through the program one line at a time. +- Execute the program until you reach a breakpoint. +- View and change local variable values within the program. + +## Considerations when using the program + +- These instructions use the standalone debugging method. To start the debugger for in-context debugging, see [Setting global breakpoint for in-context debugging](#setting_global_breakpoint_for_in_context_debugging). + +- You can't debug triggers using standalone debugging. You must use in-context debugging. See [Setting global breakpoint for in-context debugging](#setting_global_breakpoint_for_in_context_debugging) for information. + + +## Stepping through the code + +Use the tool bar icons to step through a program with the debugger. The icons serve the following purposes: + +- **Step into.** Execute the currently highlighted line of code. +- **Step over.** Execute a line of code, stepping over any subfunctions invoked by the code. The subfunction executes but is debugged only if it contains a breakpoint. +- **Continue/Start.** Execute the highlighted code and continue until the program encounters a breakpoint or completes. +- **Stop.** Halt a program. + +## Using breakpoints + +As the debugger executes a program, it pauses when it reaches a breakpoint. When the debugger pauses, you can observe or change local variables or navigate to an entry in the call stack to observe variables or set other breakpoints. The next step into, step over, or continue operation forces the debugger to resume executing with the next line of code following the breakpoint. + +These are the two types of breakpoints: + +- **Local breakpoint** — You can set a local breakpoint at any executable line of code in a program. The debugger pauses execution when it reaches a line where a local breakpoint was set. + +- **Global breakpoint** — A global breakpoint triggers when any session reaches that breakpoint. Set a global breakpoint if you want to perform in-context debugging of a program. When you set a global breakpoint on a program, the debugging session that set the global breakpoint waits until that program is invoked in another session. Only a superuser can set a global breakpoint. + +### Setting a local breakpoint + +To create a local breakpoint, select the grey shaded margin to the left of the line of code where you want the local breakpoint set. The spot you select must be close to the right side of the margin as in the spot where the breakpoint dot is shown on source code line 12. When the breakpoint is created, the debugger displays a dark dot in the margin, indicating a breakpoint was set at the selected line of code. + +![Set a breakpoint by clicking in left-hand margin](../../images/setting_global_breakpoint_from_left-hand_margin.png) + +You can set as many local breakpoints as you want. Local breakpoints remain in effect for the rest of a debugging session until you remove them. + +### Removing a local breakpoint + +To remove a local breakpoint, select the breakpoint dot. The dot disappears. + +To remove all of the breakpoints from the program that currently appears in the Program Body frame, select the **Clear all breakpoints** icon. + +!!! Note + When you perform any of these actions, only the breakpoints in the program that currently appears in the Program Body panel are removed. Breakpoints in called subprograms or breakpoints in programs that call the program currently appearing in the Program Body panel aren't removed. + +### Setting a global breakpoint for in-context debugging + + + +To set a global breakpoint for in-context debugging: + +1. In the Browser panel, select the stored procedure, function, or trigger on which you want to set the breakpoint. + +1. Select **Object > Debugging > Set Breakpoint**. + +To set a global breakpoint on a trigger: + +1. Expand the table node that contains the trigger. + +1. Select the specific trigger you want to debug. + +1. Select **Object > Debugging > Set Breakpoint**. + +To set a global breakpoint in a package: + +1. Select the specific procedure or function under the package node of the package you want to debug. + +1. Select **Object > Debugging > Set Breakpoint**. + +After you select **Set Breakpoint**, the Debugger window opens and waits for an application to call the program to debug. + +The PSQL client invokes the `select_emp` function on which a global breakpoint was set. + +```sql +$ psql edb enterprisedb +psql.bin (17.2.0, server 17.2.0) +Type "help" for help. + +edb=# SELECT select_emp(7900); +``` + +The `select_emp` function doesn't finish until you step through the program in the debugger. + +![Program on which a global breakpoint was set](../../images/parameters_tab.png) + +You can now debug the program using the operations like step into, step over, and continue. Or you can set local breakpoints. After you step through executing the program, the calling application (PSQL) regains control, the `select_emp` function finishes executing, and its output is displayed. + +```sql +$ psql edb enterprisedb +psql.bin (17.2.0, server 17.2.0) +Type "help" for help. + +edb=# SELECT select_emp(7900); +__OUTPUT__ +INFO: Number : 7900 +INFO: Name : JAMES +INFO: Hire Date : 12/03/1981 +INFO: Salary : 950.00 +INFO: Commission: 0.00 +INFO: Department: SALES + select_emp +------------ +(1 row) +``` + +At this point, you can end the debugger session. If you don't end the debugger session, the next application that invokes the program encounters the global breakpoint, and the debugging cycle begins again. diff --git a/product_docs/docs/epas/17/application_programming/12_debugger/index.mdx b/product_docs/docs/epas/17/application_programming/12_debugger/index.mdx new file mode 100644 index 00000000000..0f77c9e76aa --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/12_debugger/index.mdx @@ -0,0 +1,30 @@ +--- +title: "Debugging programs" +description: "How to use the debugger to identify ways to make your program run faster, more efficiently, and more reliably" +indexCards: simple +navigation: + - configuring_debugger + - starting_debugger + - debugger_interface + - debugging_a_program +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.41.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.42.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.40.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.110.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.112.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.111.html" +--- + + + +The debugger gives developers and DBAs the ability to test and debug server-side programs using a graphical, dynamic environment. The types of programs that you can debug are: +- SPL stored procedures +- functions +- triggers +- packages +- PL/pgSQL functions and triggers. + + + diff --git a/product_docs/docs/epas/17/application_programming/12_debugger/starting_debugger.mdx b/product_docs/docs/epas/17/application_programming/12_debugger/starting_debugger.mdx new file mode 100644 index 00000000000..7443ef2a04c --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/12_debugger/starting_debugger.mdx @@ -0,0 +1,31 @@ +--- +title: "Starting the debugger" +description: "Describes how to open the debugger program" +--- + +Use pgAdmin 4 to access the debugger for standalone debugging. To open the debugger: + +1. Select the name of the stored procedure or function you want to debug in the pgAdmin 4 **Browser** panel. Or, to debug a package, select the specific procedure or function under the package node of the package you want to debug. + +1. Select **Object > Debugging > Debug**. + +You can use the Debugger window to pass parameter values when you are standalone debugging a program that expects parameters. When you start the debugger, the Debugger window opens to display any `IN` or `IN OUT` parameters the program expects. If the program declares no `IN` or `IN OUT` parameters, the Debugger window doesn't open. + +Use the fields on the Debugger window to provide a value for each parameter: + +- The **Name** field contains the formal parameter name. +- The **Type** field contains the parameter data type. +- Select the **Null?** check box to indicate that the parameter is a `NULL` value. +- Select the **Expression?** check box if the `Value` field contains an expression. +- The **Value** field contains the parameter value that's passed to the program. +- Select the **Use Default?** check box to indicate for the program to use the value in the **Default Value** field. +- The **Default Value** field contains the default value of the parameter. + +If you're debugging a procedure or function that's a member of a package that has an initialization section, select the **Debug Package Initializer** check box to step into the package initialization section, This setting allows you to debug the initialization section code before debugging the procedure or function. If you don't select the check box, the debugger executes the package initialization section without allowing you to see or step through the individual lines of code as they execute. + +After entering the desired parameter values, select **Debug** to start the debugging process. + +!!! Note + The Debugger window doesn't open during in-context debugging. Instead, the application calling the program to debug must supply any required input parameter values. + +After you complete a full debugging cycle by stepping through the program code, the Debugger window reopens. You can enter new parameter values and repeat the debugging cycle or end the debugging session. diff --git a/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/comment_command.mdx b/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/comment_command.mdx new file mode 100644 index 00000000000..d9746991c2c --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/comment_command.mdx @@ -0,0 +1,164 @@ +--- +title: "Using the COMMENT command" +description: "Describes how to add comments to objects" +redirects: + - /epas/latest/epas_guide/15_enhanced_sql_and_other_misc_features/ #generated for docs/epas/reorg-role-use-case-mode +--- + +In addition to allowing comments on objects supported by the PostgreSQL `COMMENT` command, EDB Postgres Advanced Server supports comments on other object types. The complete supported syntax is: + +```sql +COMMENT ON +{ + AGGREGATE ( ) | + CAST ( AS ) | + COLLATION | + COLUMN . | + CONSTRAINT ON | + CONSTRAINT ON DOMAIN | + CONVERSION | + DATABASE | + DOMAIN | + EXTENSION | + EVENT TRIGGER | + FOREIGN DATA WRAPPER | + FOREIGN TABLE | + FUNCTION ([[] [] [,...]])| + INDEX | + LARGE OBJECT | + MATERIALIZED VIEW | + OPERATOR (left_type, right_type) | + OPERATOR CLASS USING | + OPERATOR FAMILY USING | + PACKAGE + POLICY ON | + [ PROCEDURAL ] LANGUAGE | + PROCEDURE [([[] [] [, ...]])] + PUBLIC SYNONYM + ROLE | + RULE ON | + SCHEMA | + SEQUENCE | + SERVER | + TABLE | + TABLESPACE | + TEXT SEARCH CONFIGURATION | + TEXT SEARCH DICTIONARY | + TEXT SEARCH PARSER | + TEXT SEARCH TEMPLATE | + TRANSFORM FOR LANGUAGE | + TRIGGER ON | + TYPE | + VIEW +} IS <'text'> +``` + +Where `aggregate_signature` is: + +```sql +* | +[ ] [ ] [ , ... ] | +[ [ ] [ ] [ , ... ] ] +ORDER BY [ ] [ ] [ , ... ] +``` + +## Parameters + +`object_name` + +The name of the object on which you're commenting. + +`AGGREGATE aggregate_name (aggregate_signature)` + +Include the `AGGREGATE` clause to create a comment about an aggregate. `aggregate_name` specifies the name of an aggregate. `aggregate_signature` specifies the associated signature in one of the following forms: + +```sql +* | +[ ] [ ] [ , ... ] | +[ [ ] [ ] [ , ... ] ] +ORDER BY [ ] [ ] [ , ... ] +``` + +Where `argmode` is the mode of a function, procedure, or aggregate argument. `argmode` can be `IN`, `OUT`, `INOUT`, or `VARIADIC`. The default is `IN`. + +`argname` is the name of an aggregate argument. + +`argtype` is the data type of an aggregate argument. + +`CAST (source_type AS target_type)` + +Include the `CAST` clause to create a comment about a cast. When creating a comment about a cast, `source_type` specifies the source data type of the cast, and `target_type` specifies the target data type of the cast. + +`COLUMN relation_name.column_name` + +Include the `COLUMN` clause to create a comment about a column. `column_name` specifies the name of the column to which the comment applies. `relation_name` is the table, view, composite type, or foreign table in which a column resides. + +`CONSTRAINT constraint_name ON table_name` + +`CONSTRAINT constraint_name ON DOMAIN domain_name` + +Include the `CONSTRAINT` clause to add a comment about a constraint. When you're creating a comment about a constraint, `constraint_name` specifies the name of the constraint. `table_name` or `domain_name` specifies the name of the table or domain on which the constraint is defined. + +`FUNCTION func_name ([[argmode] [argname] argtype [, ...]])` + +Include the `FUNCTION` clause to add a comment about a function. `func_name` specifies the name of the function. `argmode` specifies the mode of the function. `argmode` can be `IN`, `OUT`, `INOUT`, or `VARIADIC`. The default is `IN`. + +`argname` specifies the name of a function, procedure, or aggregate argument. `argtype` specifies the data type of a function, procedure, or aggregate argument. + +`large_object_oid` + +`large_object_oid` is the system-assigned OID of the large object about which you're commenting. + +`OPERATOR operator_name (left_type, right_type)` + +Include the `OPERATOR` clause to add a comment about an operator. `operator_name` specifies the optionally schema-qualified name of an operator on which you're commenting. `left_type` and `right_type` are the optionally schema-qualified data types of the operator's arguments. + +`OPERATOR CLASS object_name USING index_method` + +Include the `OPERATOR CLASS` clause to add a comment about an operator class. `object_name` specifies the optionally schema-qualified name of an operator on which you're commenting. `index_method` specifies the associated index method of the operator class. + +`OPERATOR FAMILY object_name USING index_method` + +Include the `OPERATOR FAMILY` clause to add a comment about an operator family. `object_name` specifies the optionally schema-qualified name of an operator family on which you're commenting. `index_method` specifies the associated index method of the operator family. + +`POLICY policy_name ON table_name` + +Include the `POLICY` clause to add a comment about a policy. `policy_name` specifies the name of the policy. `table_name` specifies the table that the policy is associated with. + +`PROCEDURE proc_name [([[argmode] [argname] argtype [, ...]])]` + +Include the `PROCEDURE` clause to add a comment about a procedure. `proc_name` specifies the name of the procedure. `argmode` specifies the mode of the procedure. `argmode` can be `IN`, `OUT`, `INOUT`, or `VARIADIC`. The default is `IN`. + +`argname` specifies the name of a function, procedure, or aggregate argument. `argtype` specifies the data type of a function, procedure, or aggregate argument. + +`RULE rule_name ON table_name` + +Include the `RULE` clause to specify a comment on a rule. `rule_name` specifies the name of the rule. `table_name` specifies the name of the table on which the rule is defined. + +`TRANSFORM FOR type_name LANGUAGE lang_name` + +Include the `TRANSFORM FOR` clause to specify a comment on a `TRANSFORM`. + +`type_name` specifies the name of the data type of the transform. `lang_name` specifies the name of the language of the transform. + +`TRIGGER trigger_name ON table_name` + +Include the `TRIGGER` clause to specify a comment on a trigger. `trigger_name` specifies the name of the trigger. `table_name` specifies the name of the table on which the trigger is defined. + +`text` + +The comment, written as a string literal, or `NULL` to drop the comment. + +!!! Note + Names of tables, aggregates, collations, conversions, domains, foreign tables, functions, indexes, operators, operator classes, operator families, packages, procedures, sequences, text search objects, types, and views can be schema qualified. + +## Example + +This example adds a comment to a table named `new_emp`: + +```sql +COMMENT ON TABLE new_emp IS 'This table contains information about new +employees.'; +``` + +For more information about using the `COMMENT` command, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-comment.html). diff --git a/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/index.mdx b/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/index.mdx new file mode 100644 index 00000000000..f1664dcd109 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/index.mdx @@ -0,0 +1,16 @@ +--- +title: "Using enhanced SQL and other miscellaneous features" +description: "How to use the enhanced SQL functionality and additional productivity features included in EDB Postgres Advanced Server" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.60.html" + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-reference-guide/9.6/Database_Compatibility_for_Oracle_Developers_Reference_Guide.1.032.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.062.html" +--- + + + +EDB Postgres Advanced Server includes enhanced SQL functionality and other features that add flexibility and convenience. + + diff --git a/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/logical_decoding.mdx b/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/logical_decoding.mdx new file mode 100644 index 00000000000..c739ab937e1 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/logical_decoding.mdx @@ -0,0 +1,17 @@ +--- +title: "Configuring logical decoding on standby" +description: "Describes how to create a logical replication slot on a standby server" +--- + +Logical decoding on a standby server allows you to create a logical replication slot on a standby server that can respond to API operations such as `get`, `peek`, and `advance`. + +For more information about logical decoding, refer to the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/logicaldecoding-explanation.html). + +For a logical slot on a standby server to work, you must set the `hot_standby_feedback` parameter to `ON` on the standby. The `hot_standby_feedback` parameter prevents `VACCUM` from removing recently dead rows that are required by an existing logical replication slot on the standby server. If a slot conflict occurs on the standby, the slots are dropped. + +For logical decoding on a standby to work, you must set `wal_level` to `logical` on both the primary and standby servers. If you set `wal_level` to a value other than `logical`, then slots aren't created. If you set `wal_level` to a value other than `logical` on primary, and if existing logical slots are on standby, such slots are dropped. You can't create new slots. + +When transactions are written to the primary server, the activity triggers the creation of a logical slot on the standby server. If a primary server is idle, creating a logical slot on a standby server might take noticeable time. + +For more information about functions that support replication, see the [PostgreSQL documentation](https://www.postgresql.org/docs/current/functions-admin.html#FUNCTIONS-REPLICATION). See also this [logical decoding example](https://www.postgresql.org/docs/current/logicaldecoding-example.html). + diff --git a/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/obtaining_version_information.mdx b/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/obtaining_version_information.mdx new file mode 100644 index 00000000000..d68f5ca8db2 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/15_enhanced_sql_and_other_misc_features/obtaining_version_information.mdx @@ -0,0 +1,39 @@ +--- +title: "Obtaining version information" +description: "Describes how to display the product name, version, and the host system on which it was installed." +--- + +The text string output of the `version()` function displays the name of the product, its version, and the host system on which it was installed. + +For EDB Postgres Advanced Server, the `version()` output is in a format similar to the PostgreSQL community version. The first text word is *PostgreSQL* instead of *EnterpriseDB* as in EDB Postgres Advanced Server version 10 and earlier. + +The general format of the `version()` output is: + +```text +PostgreSQL $PG_VERSION_EXT (EnterpriseDB EDB Postgres Advanced Server $PG_VERSION) on $host +``` + +So for the current EDB Postgres Advanced Server, the version string appears as follows: + +```sql +edb@45032=#select version(); +__OUTPUT__ +version +----------------------------------------------------------------------------------------------- +------------------------------------------------ +PostgreSQL 17.2 (EnterpriseDB Advanced Server 17.2.0 (Debian 17.2.0-1.bullseye)) on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit +(1 row) +``` + +In contrast, for EDB Postgres Advanced Server 10, the version string was the following: + +```sql +edb=# select version(); +__OUTPUT__ + version +------------------------------------------------------------------------------------------ +------------------- +EnterpriseDB 10.4.9 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 4.4.7 20120313 (Red Hat +4.4.7-18), 64-bit +(1 row) +``` diff --git a/product_docs/docs/epas/17/application_programming/ecpgplus_guide/02_overview.mdx b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/02_overview.mdx new file mode 100644 index 00000000000..05d4ede4740 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/02_overview.mdx @@ -0,0 +1,126 @@ +--- +title: "ECPGPlus overview" +description: "Provides an overview of the ECPGPlus precompiler capabilities" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.07.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.04.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.06.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.05.html" +redirects: + - /epas/latest/ecpgplus_guide/02_overview/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +EDB enhanced ECPG (the PostgreSQL precompiler) to create ECPGPlus. ECPGPlus is a Pro\*C-compatible version of the PostgreSQL C precompiler. ECPGPlus translates a program that combines C code and embedded SQL statements into an equivalent C program. As it performs the translation, ECPGPlus verifies that the syntax of each SQL construct is correct. + +## About ECPGPlus + +The following diagram charts the path of a program containing embedded SQL statements as it's compiled into an executable: + +![Compilation of a program containing embedded SQL statements](../../images/ecpg_path.png) + +Compilation of a program containing embedded SQL statements + +To produce an executable from a C program that contains embedded SQL statements: + +1. Pass the program (`my_program.pgc` in the diagram) to the ECPGPlus precompiler. ECPGPlus translates each SQL statement in `my_program.pgc` into C code that calls the `ecpglib` API and produces a C program (`my_program.c`). +1. Pass the C program to a C compiler. The C compiler generates an object file (`my_program.o`). +1. Pass the object file (`my_program.o`) as well as the `ecpglib` library file and any other required libraries to the linker, which in turn produces the executable (`my_program`). + +While the ECPGPlus preprocessor validates the syntax of each SQL statement, it can't validate the semantics. For example, the preprocessor confirms that an `INSERT` statement is syntactically correct, but it can't confirm that the table mentioned in the `INSERT` statement exists. + +## Behind the scenes + +A client application contains a mix of C code and SQL code made up of the following elements: + +- C preprocessor directives +- C declarations (variables, types, functions, ...) +- C definitions (variables, types, functions, ...) +- SQL preprocessor directives +- SQL statements + +For example: + +```c +1 #include +2 EXEC SQL INCLUDE sqlca; +3 +4 extern void printInt(char *label, int val); +5 extern void printStr(char *label, char *val); +6 extern void printFloat(char *label, float val); +7 +8 void displayCustomer(int custNumber) +9 { +10 EXEC SQL BEGIN DECLARE SECTION; +11 VARCHAR custName[50]; +12 float custBalance; +13 int custID = custNumber; +14 EXEC SQL END DECLARE SECTION; +15 +16 EXEC SQL SELECT name, balance +17 INTO :custName, :custBalance +18 FROM customer +19 WHERE id = :custID; +20 +21 printInt("ID", custID); +22 printStr("Name", custName); +23 printFloat("Balance", custBalance); +24 } +``` + +In this code fragment: + +- Line 1 specifies a directive to the C preprocessor. + + C preprocessor directives can be interpreted or ignored. The option is controlled by a command line option (`-C PROC`) entered when you invoke ECPGPlus. In either case, ECPGPlus copies each C preprocessor directive to the output file (4) without change. Any C preprocessor directive found in the source file appears in the output file. + +- Line 2 specifies a directive to the SQL preprocessor. + + SQL preprocessor directives are interpreted by the ECPGPlus preprocessor and aren't copied to the output file. + +- Lines 4 through 6 contain C declarations. + + C declarations are copied to the output file without change, except that each `VARCHAR` declaration is translated into an equivalent `struct` declaration. + +- Lines 10 through 14 contain an embedded-SQL declaration section. + + C variables that you refer to in SQL code are known as *host variables*. If you invoke the ECPGPlus preprocessor in Pro\*C mode (`-C PROC`), you can refer to any C variable in a SQL statement. Otherwise you must declare each host variable in a `BEGIN/END DECLARATION SECTION` pair. + +- Lines 16 through 19 contain a SQL statement. + + SQL statements are translated into calls to the ECPGPlus runtime library. + +- Lines 21 through 23 contain C code. + + C code is copied to the output file without change. + +Prefix any SQL statement with `EXEC SQL`. The SQL statement extends to the next (unquoted) semicolon. For example: + +```sql +printf(“Updating employee salaries\n”); + +EXEC SQL UPDATE emp SET sal = sal * 1.25; +EXEC SQL COMMIT; + +printf(“Employee salaries updated\n”); +``` + +When the preprocessor encounters this code fragment, it passes the C code (the first line and the last line) to the output file without translation and converts each `EXEC SQL` statement into a call to an `ecpglib` function. The result is similar to the following: + +```c +printf("Updating employee salaries\n"); + +{ + ECPGdo( __LINE__, 0, 1, NULL, 0, ECPGst_normal, + "update emp set sal = sal * 1.25", + ECPGt_EOIT, ECPGt_EORT); +} + +{ + ECPGtrans(__LINE__, NULL, "commit"); +} + +printf(“Employee salaries updated\n”); +``` \ No newline at end of file diff --git a/product_docs/docs/epas/17/application_programming/ecpgplus_guide/03_using_embedded_sql.mdx b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/03_using_embedded_sql.mdx new file mode 100644 index 00000000000..eb6b5564d4a --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/03_using_embedded_sql.mdx @@ -0,0 +1,374 @@ +--- +title: "Using embedded SQL" +description: "Provides examples for making a query and for using a cursor to process a result set" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.11.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.09.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.08.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.12.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.10.html" +redirects: + - /epas/latest/ecpgplus_guide/03_using_embedded_sql/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These two examples show how to use embedded SQL with EDB Postgres Advanced Server. + +## Example: A simple query + +The first code sample shows how to execute a `SELECT` statement that returns a single row, storing the results in a group of host variables. After declaring host variables, it connects to the `edb` sample database using a hard-coded role name and the associated password and queries the `emp` table. The query returns the values into the declared host variables. After checking the value of the `NULL` indicator variable, it prints a simple result set onscreen and closes the connection. + +```c +/************************************************************ + * print_emp.pgc + * + */ +#include + +int main(void) +{ + EXEC SQL BEGIN DECLARE SECTION; + int v_empno; + char v_ename[40]; + double v_sal; + double v_comm; + short v_comm_ind; + EXEC SQL END DECLARE SECTION; + + EXEC SQL WHENEVER SQLERROR sqlprint; + + EXEC SQL CONNECT TO edb + USER 'alice' IDENTIFIED BY '1safepwd'; + + EXEC SQL + SELECT + empno, ename, sal, comm + INTO + :v_empno, :v_ename, :v_sal, :v_comm INDICATOR:v_comm_ind + FROM + emp + WHERE + empno = 7369; + + if (v_comm_ind) + printf("empno(%d), ename(%s), sal(%.2f) comm(NULL)\n", + v_empno, v_ename, v_sal); + else + printf("empno(%d), ename(%s), sal(%.2f) comm(%.2f)\n", + v_empno, v_ename, v_sal, v_comm); + EXEC SQL DISCONNECT; +} +/***********************************************************\* +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio` library and then declares the `main` function: + +```c +#include + +int main(void) +{ +``` + +Next, the application declares a set of host variables used to interact with the database server: + +```c +EXEC SQL BEGIN DECLARE SECTION; + int v_empno; + char v_ename[40]; + double v_sal; + double v_comm; + short v_comm_ind; +EXEC SQL END DECLARE SECTION; +``` + +If you plan to precompile the code in `PROC` mode, you can omit the `BEGIN DECLARE…END DECLARE` section. For more information about declaring host variables, see [Declaring host variables](#declaring-host-variables). + +The data type associated with each variable in the declaration section is a C data type. Data passed between the server and the client application must share a compatible data type. For more information about data types, see the [Supported C data types](/epas/latest/reference/application_programmer_reference/07_reference/). + +The next statement tells the server how to handle an error: + +```sql +EXEC SQL WHENEVER SQLERROR sqlprint; +``` + +If the client application encounters an error in the SQL code, the server prints an error message to `stderr` (standard error), using the `sqlprint()` function supplied with `ecpglib`. The next `EXEC SQL` statement establishes a connection with EDB Postgres Advanced Server: + +```sql +EXEC SQL CONNECT TO edb + USER 'alice' IDENTIFIED BY '1safepwd'; +``` + +In this example, the client application connects to the `edb` database using a role named alice with a password of `1safepwd`. + +The code then performs a query against the `emp` table: + +```sql +EXEC SQL + SELECT + empno, ename, sal, comm + INTO + :v_empno, :v_ename, :v_sal, :v_comm INDICATOR :v_comm_ind + FROM + emp + WHERE + empno = 7369; +``` + +The query returns information about employee number 7369. + +The `SELECT` statement uses an `INTO` clause to assign the retrieved values (from the `empno`, `ename`, `sal`, and `comm` columns) into the `:v_empno`, `:v_ename`, `:v_sal`, and `:v_comm` host variables (and the `:v_comm_ind` null indicator). The first value retrieved is assigned to the first variable listed in the `INTO` clause, the second value is assigned to the second variable, and so on. + +The `comm` column contains the commission values earned by an employee and can potentially contain a `NULL` value. The statement includes the `INDICATOR` keyword and a host variable to hold a null indicator. + +The code checks the null indicator and displays the appropriate results: + +```C +if (v_comm_ind) + printf("empno(%d), ename(%s), sal(%.2f) comm(NULL)\n", + v_empno, v_ename, v_sal); +else + printf("empno(%d), ename(%s), sal(%.2f) comm(%.2f)\n", + v_empno, v_ename, v_sal, v_comm); +``` + +If the null indicator is `0` (that is, `false`), the `comm` column contains a meaningful value, and the `printf` function displays the commission. If the null indicator contains a non-zero value, `comm` is `NULL`, and `printf` displays a value of `NULL`. A host variable (other than a null indicator) contains no meaningful value if you fetch a `NULL` into that host variable. You must use null indicators to identify any value that might be `NULL`. + +The final statement in the code sample closes the connection to the server: + +```sql +EXEC SQL DISCONNECT; +} +``` + +### Using indicator variables + +The previous example included an *indicator variable* that identifies any row in which the value of the `comm` column (when returned by the server) was `NULL`. An indicator variable is an extra host variable that denotes if the content of the preceding variable is `NULL` or truncated. The indicator variable is populated when the contents of a row are stored. An indicator variable can contain the following values: + +| Indicator value | Denotes | +| --------------------------------------------- | -------------------------------------------------------------------------------- | +| If an indicator variable is less than `0`. | The value returned by the server was `NULL`. | +| If an indicator variable is equal to `0`. | The value returned by the server was not `NULL`, and was not truncated. | +| If an indicator variable is greater than `0`. | The value returned by the server was truncated when stored in the host variable. | + +When including an indicator variable in an `INTO` clause, you don't need to include the optional `INDICATOR` keyword. + +You can omit an indicator variable if you're certain that a query never returns a `NULL` value into the corresponding host variable. If you omit an indicator variable and a query returns a `NULL` value, `ecpglib` raises a runtime error. + + + +### Declaring host variables + +You can use a *host variable* in a SQL statement at any point that a value can appear in that statement. A host variable is a C variable that you can use to pass data values from the client application to the server and return data from the server to the client application. A host variable can be: + +- An array +- A `typedef` +- A pointer +- A `struct` +- Any scalar C data type + +The code fragments that follow show using host variables in code compiled in `PROC` mode and in non-`PROC` mode. The SQL statement adds a row to the `dept` table, inserting the values returned by the variables `v_deptno`, `v_dname`, and `v_loc` into the `deptno` column, the `dname` column, and the `loc` column, respectively. + +If you're compiling in `PROC` mode, you can omit the `EXEC SQL BEGIN DECLARE SECTION` and `EXEC SQL END DECLARE SECTION` directives. `PROC` mode permits you to use C function parameters as host variables: + +```c +void addDept(int v_deptno, char v_dname, char v_loc) +{ + EXEC SQL INSERT INTO dept VALUES( :v_deptno, :v_dname, :v_loc); +} +``` + +If you aren't compiling in `PROC` mode, you must wrap embedded variable declarations with the `EXEC SQL BEGIN DECLARE SECTION` and the `EXEC SQL END DECLARE SECTION` directives: + +```c +void addDept(int v_deptno, char v_dname, char v_loc) +{ + EXEC SQL BEGIN DECLARE SECTION; + int v_deptno_copy = v_deptno; + char v_dname_copy[14+1] = v_dname; + char v_loc_copy[13+1] = v_loc; + EXEC SQL END DECLARE SECTION; + + EXEC SQL INSERT INTO dept VALUES( :v_deptno, :v_dname, :v_loc); +} +``` + +You can also include the `INTO` clause in a `SELECT` statement to use the host variables to retrieve information: + +```sql +EXEC SQL SELECT deptno, dname, loc + INTO :v_deptno, :v_dname, v_loc FROM dept; +``` + +Each column returned by the `SELECT` statement must have a type-compatible target variable in the `INTO` clause. This is a simple example that retrieves a single row. To retrieve more than one row, you must define a cursor, as shown in the next example. + +## Example: Using a cursor to process a result set + +The code sample that follows shows using a cursor to process a result set. Four basic steps are involved in creating and using a cursor: + +1. Use the `DECLARE CURSOR` statement to define a cursor. +2. Use the `OPEN CURSOR` statement to open the cursor. +3. Use the `FETCH` statement to retrieve data from a cursor. +4. Use the `CLOSE CURSOR` statement to close the cursor. + +After declaring host variables, the example connects to the `edb` database using a user-supplied role name and password and queries the `emp` table. The query returns the values into a cursor named `employees`. The code sample then opens the cursor and loops through the result set a row at a time, printing the result set. When the sample detects the end of the result set, it closes the connection. + +```c +/************************************************************ + * print_emps.pgc + * + */ +#include + +int main(int argc, char *argv[]) +{ + EXEC SQL BEGIN DECLARE SECTION; + char *username = argv[1]; + char *password = argv[2]; + int v_empno; + char v_ename[40]; + double v_sal; + double v_comm; + short v_comm_ind; + EXEC SQL END DECLARE SECTION; + + EXEC SQL WHENEVER SQLERROR sqlprint; + + EXEC SQL CONNECT TO edb USER :username IDENTIFIED BY :password; + + EXEC SQL DECLARE employees CURSOR FOR + SELECT + empno, ename, sal, comm  + FROM  + emp; + + EXEC SQL OPEN employees; + + EXEC SQL WHENEVER NOT FOUND DO break; + + for (;;) + { + EXEC SQL FETCH NEXT FROM employees  + INTO + :v_empno, :v_ename, :v_sal, :v_comm INDICATOR :v_comm_ind; + + if (v_comm_ind) + printf("empno(%d), ename(%s), sal(%.2f) comm(NULL)\n", + v_empno, v_ename, v_sal); + else + printf("empno(%d), ename(%s), sal(%.2f) comm(%.2f)\n", + v_empno, v_ename, v_sal, v_comm); + } + EXEC SQL CLOSE employees; + EXEC SQL DISCONNECT; +} +/************************************************************/ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio` library and then declares the `main` function: + +```c +#include + +int main(int argc, char *argv[]) +{ +``` +### DECLARE + +Next, the application declares a set of host variables used to interact with the database server: + +```sql +EXEC SQL BEGIN DECLARE SECTION; + char *username = argv[1]; + char *password = argv[2]; + int v_empno; + char v_ename[40]; + double v_sal; + double v_comm; + short v_comm_ind; +EXEC SQL END DECLARE SECTION; +``` + +`argv[]` is an array that contains the command line arguments entered when the user runs the client application. `argv[1]` contains the first command line argument (in this case, a `username`), and `argv[2]` contains the second command line argument (a `password`). The example omits the error-checking code you would normally include a real-world application. The declaration initializes the values of `username` and `password`, setting them to the values entered when the user invoked the client application. + +You might think that you can refer to `argv[1]` and `argv[2]` in a SQL statement instead of creating a separate copy of each variable. However, that doesn't work. All host variables must be declared in a `BEGIN/END DECLARE SECTION`, unless you're compiling in `PROC` mode. Since `argv` is a function *parameter* (not an automatic variable), you can't declare it in a `BEGIN/END DECLARE SECTION`. If you're compiling in `PROC` mode, you can refer to any C variable in a SQL statement. + +The next statement tells the server to respond to an SQL error by printing the text of the error message returned by ECPGPlus or the database server: + +```sql +EXEC SQL WHENEVER SQLERROR sqlprint; +``` + +Then, the client application establishes a connection with EDB Postgres Advanced Server: + +```sql +EXEC SQL CONNECT TO edb USER :username IDENTIFIED BY :password; +``` + +The `CONNECT` statement creates a connection to the `edb` database, using the values found in the `:username` and `:password` host variables to authenticate the application to the server when connecting. + +The next statement declares a cursor named `employees`: + +```sql +EXEC SQL DECLARE employees CURSOR FOR + SELECT + empno, ename, sal, comm  + FROM  + emp; +``` + +`employees` contains the result set of a `SELECT` statement on the `emp` table. The query returns employee information from the following columns: `empno`, `ename`, `sal`, and `comm`. Notice that when you declare a cursor, you don't include an `INTO` clause. Instead, you specify the target variables (or descriptors) when you `FETCH` from the cursor. + +### OPEN + +Before fetching rows from the cursor, the client application must `OPEN` the cursor: + +```sql +EXEC SQL OPEN employees; +``` + +In the subsequent `FETCH` section, the client application loops through the contents of the cursor. The client application includes a `WHENEVER` statement that instructs the server to `break` (that is, terminate the loop) when it reaches the end of the cursor: + +```sql +EXEC SQL WHENEVER NOT FOUND DO break; +``` +### FETCH + +The client application then uses a `FETCH` statement to retrieve each row from the cursor `INTO` the previously declared host variables: + +```c +for (;;) +{ + EXEC SQL FETCH NEXT FROM employees + INTO + :v_empno, :v_ename, :v_sal, :v_comm INDICATOR :v_comm_ind; +``` + +The `FETCH` statement uses an `INTO` clause to assign the retrieved values into the `:v_empno`, `:v_ename`, `:v_sal`, and `:v_comm` host variables (and the `:v_comm_ind` null indicator). The first value in the cursor is assigned to the first variable listed in the `INTO` clause, the second value is assigned to the second variable, and so on. + +The `FETCH` statement also includes the `INDICATOR` keyword and a host variable to hold a null indicator. If the `comm` column for the retrieved record contains a `NULL` value, `v_comm_ind` is set to a non-zero value, indicating that the column is `NULL`. + +The code then checks the null indicator and displays the appropriate results: + +```c +if (v_comm_ind) + printf("empno(%d), ename(%s), sal(%.2f) comm(NULL)\n", + v_empno, v_ename, v_sal); +else + printf("empno(%d), ename(%s), sal(%.2f) comm(%.2f)\n", + v_empno, v_ename, v_sal, v_comm); +} +``` + +If the null indicator is `0` (that is, `false`), `v_comm` contains a meaningful value, and the `printf` function displays the commission. If the null indicator contains a non-zero value, `comm` is `NULL`, and `printf` displays the string `'NULL'`. A host variable (other than a null indicator) contains no meaningful value if you fetch a `NULL` into that host variable. You must use null indicators for any value which may be `NULL`. + +### CLOSE + +The final statements in the code sample close the cursor `(employees)` and the connection to the server: + +```sql +EXEC SQL CLOSE employees; +EXEC SQL DISCONNECT; +``` diff --git a/product_docs/docs/epas/17/application_programming/ecpgplus_guide/04_using_descriptors.mdx b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/04_using_descriptors.mdx new file mode 100644 index 00000000000..d4fee0c21d4 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/04_using_descriptors.mdx @@ -0,0 +1,570 @@ +--- +title: "Using descriptors" +description: "Describes the process for executing SQL statements that are composed at runtime" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.13.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.14.html" +redirects: + - /epas/latest/ecpgplus_guide/04_using_descriptors/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Dynamic SQL allows a client application to execute SQL statements that are composed at runtime. This ability is useful when you don't know the content or form for a statement when you're writing a client application. ECPGPlus doesn't allow you to use a host variable in place of an identifier (such as a table name, column name, or index name). Instead, use dynamic SQL statements to build a string that includes the information, and then execute that string. The string is passed between the client and the server in the form of a *descriptor*. A descriptor is a data structure that contains both the data and the information about the shape of the data. + +## Overview of the client application flow + +A client application must use a `GET DESCRIPTOR` statement to retrieve information from a descriptor. The basic flow of a client application using dynamic SQL is: + +1. Use an `ALLOCATE DESCRIPTOR` statement to allocate a descriptor for the result set (select list). +2. Use an `ALLOCATE DESCRIPTOR` statement to allocate a descriptor for the input parameters (bind variables). +3. Obtain, assemble, or compute the text of an SQL statement. +4. Use a `PREPARE` statement to parse and check the syntax of the SQL statement. +5. Use a `DESCRIBE` statement to describe the select list into the select-list descriptor. +6. Use a `DESCRIBE` statement to describe the input parameters into the bind-variables descriptor. +7. Prompt the user (if required) for a value for each input parameter. Use a `SET DESCRIPTOR` statement to assign the values into a descriptor. +8. Use a `DECLARE CURSOR` statement to define a cursor for the statement. +9. Use an `OPEN CURSOR` statement to open a cursor for the statement. +10. Use a `FETCH` statement to fetch each row from the cursor, storing each row in select-list descriptor. +11. Use a `GET DESCRIPTOR` command to interrogate the select-list descriptor to find the value of each column in the current row. +12. Use a `CLOSE CURSOR` statement to close the cursor and free any cursor resources. + +## Descriptor attributes + +A descriptor can contain these attributes. + +| Field | Type | Attribute description | +| ----------------------------- | --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `CARDINALITY` | `integer` | The number of rows in the result set. | +| `DATA` | N/A | The data value. | +| `DATETIME_INTERVAL_CODE` | `integer` | If `TYPE` is `9`:

`1 - DATE`

`2 - TIME`

`3 - TIMESTAMP`

`4 - TIME WITH TIMEZONE`

`5 - TIMESTAMP WITH TIMEZONE` | +| `DATETIME_INTERVAL_PRECISION` | `integer` | Unused. | +| `INDICATOR` | `integer` | Indicates a `NULL` or truncated value. | +| `KEY_MEMBER` | `integer` | Unused (returns `FALSE`). | +| `LENGTH` | `integer` | The data length (as stored on server). | +| `NAME` | `string` | The name of the column in which the data resides. | +| `NULLABLE` | `integer` | Unused (returns `TRUE`). | +| `OCTET_LENGTH` | `integer` | The data length (in bytes) as stored on server. | +| `PRECISION` | `integer` | The data precision (if the data is of `numeric` type). | +| `RETURNED_LENGTH` | `integer` | Actual length of data item. | +| `RETURNED_OCTET_LENGTH` | `integer` | Actual length of data item. | +| `SCALE` | `integer` | The data scale (if the data is of `numeric` type). | +| `TYPE` | `integer` | A numeric code that represents the data type of the column:

`1 - SQL3_CHARACTER`

`2 - SQL3_NUMERIC`

`3 - SQL3_DECIMAL`

`4 - SQL3_INTEGER`

`5 - SQL3_SMALLINT`

`6 - SQL3_FLOAT`

`7 - SQL3_REAL`

`8 - SQL3_DOUBLE_PRECISION`

`9 - SQL3_DATE_TIME_TIMESTAMP`

`10 - SQL3_INTERVAL`

`12 - SQL3_CHARACTER_VARYING`

`13 - SQL3_ENUMERATED`

`14 - SQL3_BIT`

`15 - SQL3_BIT_VARYING`

`16 - SQL3_BOOLEAN` | + +## Example: Using a descriptor to return data + +The following simple application executes an SQL statement entered by an end user. The code sample shows: + +- How to use a SQL descriptor to execute a `SELECT` statement. +- How to find the data and metadata returned by the statement. + +The application accepts an SQL statement from an end user, tests the statement to see if it includes the `SELECT` keyword, and executes the statement. + +### Using a SQL descriptor to execute a `SELECT` statement + +When invoking the application, an end user must provide the name of the database on which to perform the SQL statement and a string that contains the text of the query. + +For example, a user might invoke the sample with the following command: + +``` +./exec_stmt edb "SELECT * FROM emp" +``` + +Sample Program: +```c +/************************************************************ + * exec_stmt.pgc + * + */ + +#include +#include +#include +#include + +EXEC SQL WHENEVER SQLERROR SQLPRINT; +static void print_meta_data( char * desc_name ); + +char *md1 = "col field data ret"; +char *md2 = "num name type len"; +char *md3 = "--- -------------------- ----------------- ---"; + +int main( int argc, char *argv[] ) +{ + + EXEC SQL BEGIN DECLARE SECTION; + char *db = argv[1]; + char *stmt = argv[2]; + int col_count; + EXEC SQL END DECLARE SECTION; + + EXEC SQL CONNECT TO :db; + + EXEC SQL ALLOCATE DESCRIPTOR parse_desc; + EXEC SQL PREPARE query FROM :stmt; + EXEC SQL DESCRIBE query INTO SQL DESCRIPTOR parse_desc; + EXEC SQL GET DESCRIPTOR parse_desc :col_count = COUNT; + +if( col_count == 0 ) +{ + EXEC SQL EXECUTE IMMEDIATE :stmt; + + if( sqlca.sqlcode >= 0 ) + EXEC SQL COMMIT; +} +else +{ + int row; + + EXEC SQL ALLOCATE DESCRIPTOR row_desc; + EXEC SQL DECLARE my_cursor CURSOR FOR query; + EXEC SQL OPEN my_cursor; + + for( row = 0; ; row++ ) + { + EXEC SQL BEGIN DECLARE SECTION; + int col; + EXEC SQL END DECLARE SECTION; + EXEC SQL FETCH IN my_cursor + INTO SQL DESCRIPTOR row_desc; + + if( sqlca.sqlcode != 0 ) + break; + + if( row == 0 ) + print_meta_data( "row_desc" ); + + printf("[RECORD %d]\n", row+1); + + for( col = 1; col <= col_count; col++ ) + { + EXEC SQL BEGIN DECLARE SECTION; + short ind; + varchar val[40+1]; + varchar name[20+1]; + EXEC SQL END DECLARE SECTION; + + EXEC SQL GET DESCRIPTOR row_desc + VALUE :col + :val = DATA, :ind = INDICATOR, :name = NAME; + + if( ind == -1 ) + printf( " %-20s : \n", name.arr ); + else if( ind > 0 ) + printf( " %-20s : \n", name.arr ); + else + printf( " %-20s : %s\n", name.arr, val.arr ); + } + + printf( "\n" ); + + } + printf( "%d rows\n", row ); +} + +exit( 0 ); +} + +static void print_meta_data( char *desc_name ) +{ + EXEC SQL BEGIN DECLARE SECTION; + char *desc = desc_name; + int col_count; + int col; + EXEC SQL END DECLARE SECTION; + +static char *types[] = +{ + "unused ", + "CHARACTER ", + "NUMERIC ", + "DECIMAL ", + "INTEGER ", + "SMALLINT ", + "FLOAT ", + "REAL ", + "DOUBLE ", + "DATE_TIME ", + "INTERVAL ", + "unused ", + "CHARACTER_VARYING", + "ENUMERATED ", + "BIT ", + "BIT_VARYING ", + "BOOLEAN ", + "abstract " +}; + +EXEC SQL GET DESCRIPTOR :desc :col_count = count; + + +printf( "%s\n", md1 ); +printf( "%s\n", md2 ); +printf( "%s\n", md3 ); + +for( col = 1; col <= col_count; col++ ) +{ + + EXEC SQL BEGIN DECLARE SECTION; + int type; + int ret_len; + varchar name[21]; + EXEC SQL END DECLARE SECTION; + char *type_name; + + EXEC SQL GET DESCRIPTOR :desc + VALUE :col + :name = NAME, + :type = TYPE, + :ret_len = RETURNED_OCTET_LENGTH; + + if( type > 0 && type < SQL3_abstract ) + type_name = types[type]; + else + type_name = "unknown"; + + printf( "%02d: %-20s %-17s %04d\n", + col, name.arr, type_name, ret_len ); +} +printf( "\n" ); +} + +/************************************************************/ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio` and `stdlib` libraries, SQL data type symbols, and the `SQLCA` (SQL communications area) structure: + +```c +#include +#include +#include +#include +``` + +The sample provides minimal error handling. When the application encounters a SQL error, it prints the error message to screen: + +```sql +EXEC SQL WHENEVER SQLERROR SQLPRINT; +``` + +### Finding the data and metadata returned by the statement + +The application includes a forward-declaration for a function named `print_meta_data()` that prints the metadata found in a descriptor: + +```c +static void print_meta_data( char * desc_name ); +``` + +The following code specifies the column header information that the application uses when printing the metadata: + +```c +char *md1 = "col field data ret"; +char *md2 = "num name type len"; +char *md3 = "--- -------------------- ----------------- ---"; + +int main( int argc, char *argv[] ) +{ +``` + +The following declaration section identifies the host variables to contain the name of the database the application connects to, the content of the SQL statement, and a host variable for the number of columns in the result set (if any). + +```sql +EXEC SQL BEGIN DECLARE SECTION; + char *db = argv[1]; + char *stmt = argv[2]; + int col_count; +EXEC SQL END DECLARE SECTION; +``` + +The application connects to the database using the default credentials: + +```sql +EXEC SQL CONNECT TO :db; +``` + +Next, the application allocates a SQL descriptor to hold the metadata for a statement: + +```sql +EXEC SQL ALLOCATE DESCRIPTOR parse_desc; +``` + +The application uses a `PREPARE` statement to check the syntax of the string provided by the user: + +```sql +EXEC SQL PREPARE query FROM :stmt; +``` + +It also uses a `DESCRIBE` statement to move the metadata for the query into the SQL descriptor. + +```sql +EXEC SQL DESCRIBE query INTO SQL DESCRIPTOR parse_desc; +``` + +Then, the application interrogates the descriptor to discover the number of columns in the result set and stores that in the host variable `col_count`. + +```sql +EXEC SQL GET DESCRIPTOR parse_desc :col_count = COUNT; +``` + +If the column count is zero, the end user didn't enter a `SELECT` statement. The application uses an `EXECUTE IMMEDIATE` statement to process the contents of the statement: + +```c +if( col_count == 0 ) +{ + EXEC SQL EXECUTE IMMEDIATE :stmt; +``` + +If the statement executes successfully, the application performs a `COMMIT`: + +```c +if( sqlca.sqlcode >= 0 ) + EXEC SQL COMMIT; +} +else +{ +``` + +If the statement entered by the user is a `SELECT` statement (which we know because the column count is non-zero), the application declares a variable named `row`: + +```c +int row; +``` + +Then, the application allocates another descriptor that holds the description and the values of a specific row in the result set: + +```sql +EXEC SQL ALLOCATE DESCRIPTOR row_desc; +``` + +The application declares and opens a cursor for the prepared statement: + +```sql +EXEC SQL DECLARE my_cursor CURSOR FOR query; +EXEC SQL OPEN my_cursor; +``` + +It loops through the rows in the result set: + +```c +for( row = 0; ; row++ ) +{ + EXEC SQL BEGIN DECLARE SECTION; + int col; + EXEC SQL END DECLARE SECTION; +``` + +Then, it uses a `FETCH` to retrieve the next row from the cursor into the descriptor: + +```sql +EXEC SQL FETCH IN my_cursor INTO SQL DESCRIPTOR row_desc; +``` + +The application confirms that the `FETCH` didn't fail. If the `FETCH` fails, the application reached the end of the result set and breaks the loop: + +```c +if( sqlca.sqlcode != 0 ) + break; +``` + +The application checks to see if this is the first row of the cursor. If it is, the application prints the metadata for the row: + +```c +if( row == 0 ) + print_meta_data( "row_desc" ); +``` + +Next, it prints a record header containing the row number: + +```c +printf("[RECORD %d]\n", row+1); +``` + +Then, it loops through each column in the row: + +```c +for( col = 1; col <= col_count; col++ ) +{ + EXEC SQL BEGIN DECLARE SECTION; + short ind; + varchar val[40+1]; + varchar name[20+1]; + EXEC SQL END DECLARE SECTION; +``` + +The application interrogates the row descriptor `(row_desc)` to copy the column value `:val`, null indicator `:ind`, and column name `:name` into the host variables declared earlier. You can retrieve multiple items from a descriptor using a comma-separated list: + +```sql +EXEC SQL GET DESCRIPTOR row_desc + VALUE :col + :val = DATA, :ind = INDICATOR, :name = NAME; +``` + +If the null indicator (`ind`) is negative, the column value is `NULL`. If the null indicator is greater than `0`, the column value is too long to fit into the val host variable, so we print ``. Otherwise, the null indicator is `0`, meaning `NOT NULL`, so we print the value. In each case, we prefix the value (or `` or ``) with the name of the column. + +```c +if( ind == -1 ) + printf( " %-20s : \n", name.arr ); +else if( ind > 0 ) + printf( " %-20s : \n", name.arr ); +else + printf( " %-20s : %s\n", name.arr, val.arr ); +} + +printf( "\n" ); +} +``` + +When the loop terminates, the application prints the number of rows fetched and exits: + +```c + printf( "%d rows\n", row ); + } + +exit( 0 ); +} +``` + +The `print_meta_data()` function extracts the metadata from a descriptor and prints the name, data type, and length of each column: + +```c +static void print_meta_data( char *desc_name ) +{ +``` + +The application declares host variables: + +```sql +EXEC SQL BEGIN DECLARE SECTION; + char *desc = desc_name; + int col_count; + int col; +EXEC SQL END DECLARE SECTION; +``` + +The application then defines an array of character strings that map data type values (`numeric`) into data type names. We use the numeric value found in the descriptor to index into this array. For example, if we find that a given column is of type `2`, we can find the name of that type (`NUMERIC`) by writing `types[2]`. + +```c +static char *types[] = +{ + "unused ", + "CHARACTER ", + "NUMERIC ", + "DECIMAL ", + "INTEGER ", + "SMALLINT ", + "FLOAT ", + "REAL ", + "DOUBLE ", + "DATE_TIME ", + "INTERVAL ", + "unused ", + "CHARACTER_VARYING", + "ENUMERATED ", + "BIT ", + "BIT_VARYING ", + "BOOLEAN ", + "abstract " +}; +``` + +The application retrieves the column count from the descriptor. The program refers to the descriptor using a host variable (`desc`) that contains the name of the descriptor. In most scenarios, you use an identifier to refer to a descriptor. In this case, the caller provided the descriptor name, so we can use a host variable to refer to the descriptor. + +```sql +EXEC SQL GET DESCRIPTOR :desc :col_count = count; +``` + +The application prints the column headers defined at the beginning of this application: + +```c +printf( "%s\n", md1 ); +printf( "%s\n", md2 ); +printf( "%s\n", md3 ); +``` + +Then, it loops through each column found in the descriptor and prints the name, type, and length of each column. + +```c +for( col = 1; col <= col_count; col++ ) +{ + EXEC SQL BEGIN DECLARE SECTION; + int type; + int ret_len; + varchar name[21]; + EXEC SQL END DECLARE SECTION; + char *type_name; +``` + +It retrieves the name, type code, and length of the current column: + +```sql +EXEC SQL GET DESCRIPTOR :desc + VALUE :col + :name = NAME, + :type = TYPE, + :ret_len = RETURNED_OCTET_LENGTH; +``` + +If the numeric type code matches a 'known' type code (that is, a type code found in the `types[]` array), it sets `type_name` to the name of the corresponding type. Otherwise, it sets `type_name` to `"unknown"`: + +```c +if( type > 0 && type < SQL3_abstract ) + type_name = types[type]; +else + type_name = "unknown"; +``` + +It then prints the column number, name, type name, and length: + +```c + printf( "%02d: %-20s %-17s %04d\n", + col, name.arr, type_name, ret_len ); + } + printf( "\n" ); +} +``` + +Invoke the sample application with the following command: + +```c +./exec_stmt test "SELECT * FROM emp WHERE empno IN(7902, 7934)" +``` + +The application returns: + +```sql +__OUTPUT__ +col field                data              ret +num name                 type              len +--- -------------------- ----------------- --- +01: empno                NUMERIC           0004 +02: ename                CHARACTER_VARYING 0004 +03: job                  CHARACTER_VARYING 0007 +04: mgr                  NUMERIC           0004 +05: hiredate             DATE_TIME         0018 +06: sal                  NUMERIC           0007 +07: comm                 NUMERIC           0000 +08: deptno               NUMERIC           0002 + +[RECORD 1] +  empno                : 7902 +  ename                : FORD +  job                  : ANALYST +  mgr                  : 7566 +  hiredate             : 03-DEC-81 00:00:00 +  sal                  : 3000.00 +  comm                 : +  deptno               : 20 + +[RECORD 2] +  empno                : 7934 +  ename                : MILLER +  job                  : CLERK +  mgr                  : 7782 +  hiredate             : 23-JAN-82 00:00:00 +  sal                  : 1300.00 +  comm                 : +  deptno               : 10 + +2 rows +``` diff --git a/product_docs/docs/epas/17/application_programming/ecpgplus_guide/05_building_executing_dynamic_sql_statements.mdx b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/05_building_executing_dynamic_sql_statements.mdx new file mode 100644 index 00000000000..2866085e93d --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/05_building_executing_dynamic_sql_statements.mdx @@ -0,0 +1,812 @@ +--- +title: "Building and executing dynamic SQL statements" +description: "Outlines four techniques for building and executing dynamic SQL statements" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.18.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.15.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.17.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.16.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.19.html" +redirects: + - /epas/latest/ecpgplus_guide/05_building_executing_dynamic_sql_statements/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The following examples show four techniques for building and executing dynamic SQL statements. Each example shows processing a different combination of statement and input types: + +- The [first example](#executing_a_nonquery_statement_without_parameters) shows processing and executing a SQL statement that doesn't contain a `SELECT` statement and doesn't require input variables. This example corresponds to the techniques used by Oracle Dynamic SQL Method 1. +- The [second example](#executing_a_nonquery_statement_with_a_specified_number_of_placeholders) shows processing and executing a SQL statement that doesn't contain a `SELECT` statement and contains a known number of input variables. This example corresponds to the techniques used by Oracle Dynamic SQL Method 2. +- The [third example](#executing_a_query_statement_with_known_number_of_placeholders) shows processing and executing a SQL statement that might contain a `SELECT` statement and includes a known number of input variables. This example corresponds to the techniques used by Oracle Dynamic SQL Method 3. +- The [fourth example](#executing_query_with_unknown_number_of_variables) shows processing and executing a SQL statement that might contain a `SELECT` statement and includes an unknown number of input variables. This example corresponds to the techniques used by Oracle Dynamic SQL Method 4. + + + +## Example: Executing a nonquery statement without parameters + +This example shows how to use the `EXECUTE IMMEDIATE` command to execute a SQL statement, where the text of the statement isn't known until you run the application. You can't use `EXECUTE IMMEDIATE` to execute a statement that returns a result set. You can't use `EXECUTE IMMEDIATE` to execute a statement that contains parameter placeholders. + +The `EXECUTE IMMEDIATE` statement parses and plans the SQL statement each time it executes, which can have a negative impact on the performance of your application. If you plan to execute the same statement repeatedly, consider using the `PREPARE/EXECUTE` technique described in [Example: Executing a nonquery statement with a specified number of placeholders](#example-executing-a-nonquery-statement-with-a-specified-number-of-placeholders). + +```c +/***********************************************************/ +#include +#include +#include + +static void handle_error(void); + +int main(int argc, char *argv[]) +{ + char *insertStmt; + + EXEC SQL WHENEVER SQLERROR DO handle_error(); + + EXEC SQL CONNECT :argv[1]; + + insertStmt = "INSERT INTO dept VALUES(50, 'ACCTG', 'SEATTLE')"; + + EXEC SQL EXECUTE IMMEDIATE :insertStmt; + + fprintf(stderr, "ok\n"); + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} + + +static void handle_error(void) +{ + fprintf(stderr, "%s\n", sqlca.sqlerrm.sqlerrmc); + + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + + exit(EXIT_FAILURE); +} + +/***********************************************************/ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio`, `string`, and `stdlib` libraries and providing basic infrastructure for the program: + +```c +#include +#include +#include + +static void handle_error(void); +int main(int argc, char *argv[]) +{ + char *insertStmt; +``` + +The example then sets up an error handler. ECPGPlus calls the `handle_error()` function whenever a SQL error occurs: + +```sql +EXEC SQL WHENEVER SQLERROR DO handle_error(); +``` + +Then, the example connects to the database using the credentials specified on the command line: + +```sql +EXEC SQL CONNECT :argv[1]; +``` + +Next, the program uses an `EXECUTE IMMEDIATE` statement to execute a SQL statement, adding a row to the `dept` table: + +```c +insertStmt = "INSERT INTO dept VALUES(50, 'ACCTG', 'SEATTLE')"; + +EXEC SQL EXECUTE IMMEDIATE :insertStmt; +``` + +If the `EXECUTE IMMEDIATE` command fails, ECPGPlus invokes the `handle_error()` function, which terminates the application after displaying an error message to the user. If the `EXECUTE IMMEDIATE` command succeeds, the application displays a message (`ok`) to the user, commits the changes, disconnects from the server, and terminates the application: + +```c + fprintf(stderr, "ok\n"); + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} +``` + +ECPGPlus calls the `handle_error()` function whenever it encounters a SQL error. The `handle_error()` function prints the content of the error message, resets the error handler, rolls back any changes, disconnects from the database, and terminates the application: + +```c +static void handle_error(void) +{ + fprintf(stderr, "%s\n", sqlca.sqlerrm.sqlerrmc); + + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + + exit(EXIT_FAILURE); +} +``` + + + +## Example: Executing a nonquery statement with a specified number of placeholders + +To execute a nonquery command that includes a known number of parameter placeholders, you must first `PREPARE` the statement (providing a *statement handle*) and then `EXECUTE` the statement using the statement handle. When the application executes the statement, it must provide a value for each placeholder found in the statement. + +When an application uses the `PREPARE/EXECUTE` mechanism, each SQL statement is parsed and planned once but might execute many times, providing different values each time. + +ECPGPlus converts each parameter value to the type required by the SQL statement, if possible. Otherwise, ECPGPlus reports an error. + +```c +/***********************************************************/ +#include +#include +#include +#include + +static void handle_error(void); + +int main(int argc, char *argv[]) +{ + char *stmtText; + + EXEC SQL WHENEVER SQLERROR DO handle_error(); + + EXEC SQL CONNECT :argv[1]; + + stmtText = "INSERT INTO dept VALUES(?, ?, ?)"; + + EXEC SQL PREPARE stmtHandle FROM :stmtText; + + EXEC SQL EXECUTE stmtHandle USING :argv[2], :argv[3], :argv[4]; + + fprintf(stderr, "ok\n"); + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} + +static void handle_error(void) +{ + printf("%s\n", sqlca.sqlerrm.sqlerrmc); + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + + exit(EXIT_FAILURE); +} +/***********************************************************/ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio`, `string`, `stdlib`, and `sqlca` libraries and providing basic infrastructure for the program: + +```c +#include +#include +#include +#include + +static void handle_error(void); + +int main(int argc, char *argv[]) +{ + char *stmtText; +``` + +The example then sets up an error handler. ECPGPlus calls the `handle_error()` function whenever a SQL error occurs. + +```sql +EXEC SQL WHENEVER SQLERROR DO handle_error(); +``` + +Then, the example connects to the database using the credentials specified on the command line: + +```sql +EXEC SQL CONNECT :argv[1]; +``` + +Next, the program uses a `PREPARE` statement to parse and plan a statement that includes three parameter markers. If the `PREPARE` statement succeeds, it creates a statement handle that you can use to execute the statement. (In this example, the statement handle is named `stmtHandle`.) You can execute a given statement multiple times using the same statement handle. + +```sql +stmtText = "INSERT INTO dept VALUES(?, ?, ?)"; + +EXEC SQL PREPARE stmtHandle FROM :stmtText; +``` + +After parsing and planning the statement, the application uses the `EXECUTE` statement to execute the statement associated with the statement handle, substituting user-provided values for the parameter markers: + +```sql +EXEC SQL EXECUTE stmtHandle USING :argv[2], :argv[3], :argv[4]; +``` + +If the `EXECUTE` command fails, ECPGPlus invokes the `handle_error()` function, which terminates the application after displaying an error message to the user. If the `EXECUTE` command succeeds, the application displays a message (`ok`) to the user, commits the changes, disconnects from the server, and terminates the application: + +```c + fprintf(stderr, "ok\n"); + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} +``` + +ECPGPlus calls the `handle_error()` function whenever it encounters a SQL error. The `handle_error()` function prints the content of the error message, resets the error handler, rolls back any changes, disconnects from the database, and terminates the application: + +```c +static void handle_error(void) +{ + printf("%s\n", sqlca.sqlerrm.sqlerrmc); + + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + exit(EXIT_FAILURE); +} +``` + + +## Example: Executing a query with a known number of placeholders + +This example shows how to execute a query with a known number of input parameters and with a known number of columns in the result set. This method uses the `PREPARE` statement to parse and plan a query and then opens a cursor and iterates through the result set. + +```c +/***********************************************************/ +#include +#include +#include +#include +#include + +static void handle_error(void); + +int main(int argc, char *argv[]) +{ + VARCHAR empno[10]; + VARCHAR ename[20]; + + EXEC SQL WHENEVER SQLERROR DO handle_error(); + + EXEC SQL CONNECT :argv[1]; + + EXEC SQL PREPARE queryHandle + FROM "SELECT empno, ename FROM emp WHERE deptno = ?"; + + EXEC SQL DECLARE empCursor CURSOR FOR queryHandle; + + EXEC SQL OPEN empCursor USING :argv[2]; + + EXEC SQL WHENEVER NOT FOUND DO break; + + while(true) + { + + EXEC SQL FETCH empCursor INTO :empno, :ename; + + printf("%-10s %s\n", empno.arr, ename.arr); + } + + EXEC SQL CLOSE empCursor; + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} + +static void handle_error(void) +{ + printf("%s\n", sqlca.sqlerrm.sqlerrmc); + + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + + exit(EXIT_FAILURE); +} + +/***********************************************************/ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio`, `string`, `stdlib`, `stdbool`, and `sqlca` libraries and providing basic infrastructure for the program: + +```c +#include +#include +#include +#include +#include + +static void handle_error(void); + +int main(int argc, char *argv[]) +{ + VARCHAR empno[10]; + VARCHAR ename[20]; +``` + +The example then sets up an error handler. ECPGPlus calls the `handle_error()` function whenever a SQL error occurs: + +```sql +EXEC SQL WHENEVER SQLERROR DO handle_error(); +``` + +Then, the example connects to the database using the credentials specified on the command line: + +```sql +EXEC SQL CONNECT :argv[1]; +``` + +Next, the program uses a `PREPARE` statement to parse and plan a query that includes a single parameter marker. If the `PREPARE` statement succeeds, it creates a statement handle that you can use to execute the statement. (In this example, the statement handle is named `stmtHandle`.) You can execute a given statement multiple times using the same statement handle. + +```sql +EXEC SQL PREPARE stmtHandle + FROM "SELECT empno, ename FROM emp WHERE deptno = ?"; +``` + +The program then declares and opens the cursor `empCursor`, substituting a user-provided value for the parameter marker in the prepared `SELECT` statement. The `OPEN` statement includes a `USING` clause, which must provide a value for each placeholder found in the query: + +```sql +EXEC SQL DECLARE empCursor CURSOR FOR stmtHandle; + +EXEC SQL OPEN empCursor USING :argv[2]; + +EXEC SQL WHENEVER NOT FOUND DO break; + +while(true) +{ +``` + +The program iterates through the cursor and prints the employee number and name of each employee in the selected department: + +```sql + EXEC SQL FETCH empCursor INTO :empno, :ename; + + printf("%-10s %s\n", empno.arr, ename.arr); +} +``` + +The program then closes the cursor, commits any changes, disconnects from the server, and terminates the application: + +```sql + EXEC SQL CLOSE empCursor; + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} +``` + +The application calls the `handle_error()` function whenever it encounters a SQL error. The `handle_error()` function prints the content of the error message, resets the error handler, rolls back any changes, disconnects from the database, and terminates the application: + +```c +static void handle_error(void) +{ + printf("%s\n", sqlca.sqlerrm.sqlerrmc); + + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + + exit(EXIT_FAILURE); +} +``` + + + +## Example: Executing a query with an unknown number of variables + +This example shows executing a query with an unknown number of input parameters or columns in the result set. This type of query might occur when you prompt the user for the text of the query or when a query is assembled from a form on which the user chooses from a number of conditions (i.e., a filter). + +```c +/***********************************************************/ +#include +#include +#include +#include + +SQLDA *params; +SQLDA *results; + +static void allocateDescriptors(int count, + int varNameLength, + int indNameLenth); +static void bindParams(void); +static void displayResultSet(void); + +int main(int argc, char *argv[]) +{ + EXEC SQL BEGIN DECLARE SECTION; + char *username = argv[1]; + char *password = argv[2]; + char *stmtText = argv[3]; + EXEC SQL END DECLARE SECTION; + + EXEC SQL WHENEVER SQLERROR sqlprint; + + EXEC SQL CONNECT TO test + USER :username + IDENTIFIED BY :password; + + params = sqlald(20, 64, 64); + results = sqlald(20, 64, 64); + + EXEC SQL PREPARE stmt FROM :stmtText; + + EXEC SQL DECLARE dynCursor CURSOR FOR stmt; + + bindParams(); + + EXEC SQL OPEN dynCursor USING DESCRIPTOR params; + + displayResultSet(20); +} + +static void bindParams(void) +{ + EXEC SQL DESCRIBE BIND VARIABLES FOR stmt INTO params; + + if (params->F < 0) + fprintf(stderr, "Too many parameters required\n"); + else + { + int i; + + params->N = params->F; + + for (i = 0; i < params->F; i++) + { + char *paramName = params->S[i]; + int nameLen = params->C[i]; + char paramValue[255]; + + printf("Enter value for parameter %.*s: ", + nameLen, paramName); + + fgets(paramValue, sizeof(paramValue), stdin); + + params->T[i] = 1; /* Data type = Character (1) */ + params->L[i] = strlen(paramValue) - 1; + params->V[i] = strdup(paramValue); + } + } +} + +static void displayResultSet(void) +{ + EXEC SQL DESCRIBE SELECT LIST FOR stmt INTO results; + + if (results->F < 0) + fprintf(stderr, "Too many columns returned by query\n"); + else if (results->F == 0) + return; + else + { + int col; + + results->N = results->F; + + for (col = 0; col < results->F; col++) + { + int null_permitted, length; + + sqlnul(&results->T[col], + &results->T[col], + &null_permitted); + + switch (results->T[col]) + { + case 2: /* NUMERIC */ + { + int precision, scale; + + sqlprc(&results->L[col], &precision, &scale); + + if (precision == 0) + precision = 38; + + length = precision + 3; + break; + } + + case 12: /* DATE */ + { + length = 30; + break; + } + + default: /* Others */ + { + length = results->L[col] + 1; + break; + } + } + + results->V[col] = realloc(results->V[col], length); + results->L[col] = length; + results->T[col] = 1; + } + + EXEC SQL WHENEVER NOT FOUND DO break; + + while (1) + { + const char *delimiter = ""; + + EXEC SQL FETCH dynCursor USING DESCRIPTOR results; + + for (col = 0; col < results->F; col++) + { + if (*results->I[col] == -1) + printf("%s%s", delimiter, ""); + else + printf("%s%s", delimiter, results->V[col]); + delimiter = ", "; + } + + + printf("\n"); + } + } +} +/***********************************************************/ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio` and `stdlib` libraries. In addition, the program includes the `sqlda.h` and `sqlcpr.h` header files. `sqlda.h` defines the SQLDA structure used throughout this example. `sqlcpr.h` defines a small set of functions used to interrogate the metadata found in an SQLDA structure. + +```c +#include +#include +#include +#include +``` + +Next, the program declares pointers to two SQLDA structures. The first SQLDA structure (`params`) is used to describe the metadata for any parameter markers found in the dynamic query text. The second SQLDA structure (`results`) contains both the metadata and the result set obtained by executing the dynamic query. + +```sql +SQLDA *params; +SQLDA *results; +``` + +The program then declares two helper functions, which are defined near the end of the code sample: + +```c +static void bindParams(void); +static void displayResultSet(void); +``` + +Next, the program declares three host variables. The first two (`username` and `password`) are used to connect to the database server. The third host variable (`stmtTxt`) is a NULL-terminated C string containing the text of the query to execute. The values for these three host variables are derived from the command-line arguments. When the program begins to execute, it sets up an error handler and then connects to the database server: + +```c +int main(int argc, char *argv[]) +{ + EXEC SQL BEGIN DECLARE SECTION; + char *username = argv[1]; + char *password = argv[2]; + char *stmtText = argv[3]; + EXEC SQL END DECLARE SECTION; + + EXEC SQL WHENEVER SQLERROR sqlprint; + EXEC SQL CONNECT TO test + USER :username + IDENTIFIED BY :password; +``` + +Next, the program calls the `sqlald()` function to allocate the memory required for each descriptor. Each descriptor contains pointers to arrays of: + +- Column names +- Indicator names +- Data types +- Lengths +- Data values + +When you allocate an `SQLDA` descriptor, you specify the maximum number of columns you expect to find in the result set (for `SELECT`-list descriptors) or the maximum number of parameters you expect to find the dynamic query text (for bind-variable descriptors). In this case, we specify that we expect no more than 20 columns and 20 parameters. You must also specify a maximum length for each column or parameter name and each indicator variable name. In this case, we expect names to be no more than 64 bytes long. + +See [SQLDA structure](/epas/latest/reference/application_programmer_reference/07_reference/) for a complete description of the `SQLDA` structure. + +```c +params = sqlald(20, 64, 64); +results = sqlald(20, 64, 64); +``` + +After allocating the `SELECT`-list and bind descriptors, the program prepares the dynamic statement and declares a cursor over the result set. + +```sql +EXEC SQL PREPARE stmt FROM :stmtText; + +EXEC SQL DECLARE dynCursor CURSOR FOR stmt; +``` + +Next, the program calls the `bindParams()` function. The `bindParams()` function examines the bind descriptor `(params)` and prompts the user for a value to substitute in place of each parameter marker found in the dynamic query. + +```c +bindParams(); +``` + +Finally, the program opens the cursor (using any parameter values supplied by the user) and calls the `displayResultSet()` function to print the result set produced by the query: + +```sql +EXEC SQL OPEN dynCursor USING DESCRIPTOR params; + +displayResultSet(); +} +``` + +The `bindParams()` function determines whether the dynamic query contains any parameter markers. If so, it prompts the user for a value for each parameter and then binds that value to the corresponding marker. The `DESCRIBE BIND VARIABLE` statement populates the `params` SQLDA structure with information describing each parameter marker: + +```c +static void bindParams(void) +{ + EXEC SQL DESCRIBE BIND VARIABLES FOR stmt INTO params; +``` + +If the statement contains no parameter markers, `params->F` contains 0. If the statement contains more parameters than fit into the descriptor, `params->F` contains a negative number. In this case, the absolute value of `params->F` indicates the number of parameter markers found in the statement. If `params->F` contains a positive number, that number indicates how many parameter markers were found in the statement. + +```c +if (params->F < 0) + fprintf(stderr, "Too many parameters required\n"); +else +{ + int i; + + params->N = params->F; +``` + +Next, the program executes a loop that prompts the user for a value, iterating once for each parameter marker found in the statement: + +```c +for (i = 0; i < params->F; i++) +{ + char *paramName = params->S[i]; + int nameLen = params->C[i]; + char paramValue[255]; + + printf("Enter value for parameter %.*s: ", + nameLen, paramName); + + fgets(paramValue, sizeof(paramValue), stdin); +``` + +After prompting the user for a value for a given parameter, the program binds that value to the parameter by setting: + +- `params->T[i]` to indicate the data type of the value +- `params->L[i]` to the length of the value (we subtract one to trim off the trailing new-line character added by `fgets()`) +- `params->V[i]` to point to a copy of the NULL-terminated string provided by the user + +```c + params->T[i] = 1; /* Data type = Character (1) */ + params->L[i] = strlen(paramValue) + 1; + params->V[i] = strdup(paramValue); + } + } +} +``` + +The `displayResultSet()` function loops through each row in the result set and prints the value found in each column. `displayResultSet()` starts by executing a `DESCRIBE SELECT LIST` statement. This statement populates an SQLDA descriptor (`results`) with a description of each column in the result set. + +```c +static void displayResultSet(void) +{ + EXEC SQL DESCRIBE SELECT LIST FOR stmt INTO results; +``` + +If the dynamic statement returns no columns (that is, the dynamic statement is not a `SELECT` statement), `results->F` contains 0. If the statement returns more columns than fit into the descriptor, `results->F` contains a negative number. In this case, the absolute value of `results->F` indicates the number of columns returned by the statement. If `results->F` contains a positive number, that number indicates how many columns were returned by the query. + +```c +if (results->F < 0) + fprintf(stderr, "Too many columns returned by query\n"); +else if (results->F == 0) + return; +else +{ + int col; + + results->N = results->F; +``` + +Next, the program enters a loop, iterating once for each column in the result set: + +```c +for (col = 0; col < results->F; col++) +{ + int null_permitted, length; +``` + +To decode the type code found in `results->T`, the program invokes the `sqlnul()` function (see the description of the `T` member of the SQLDA structure in the [The SQLDA structure](/epas/latest/reference/application_programmer_reference/07_reference/)). This call to `sqlnul()` modifies `results->T[col]` to contain only the type code (the nullability flag is copied to `null_permitted`). This step is needed because the `DESCRIBE SELECT LIST` statement encodes the type of each column and the nullability of each column into the `T` array. + +```c +sqlnul(&results->T[col], + &results->T[col], + &null_permitted); +``` + +After decoding the actual data type of the column, the program modifies the results descriptor to tell ECPGPlus to return each value in the form of a NULL-terminated string. Before modifying the descriptor, the program must compute the amount of space required to hold each value. To make this computation, the program examines the maximum length of each column `(results->V[col])` and the data type of each column `(results->T[col])`. + +For numeric values (where `results->T[col] = 2`), the program calls the `sqlprc()` function to extract the precision and scale from the column length. To compute the number of bytes required to hold a numeric value in string form, `displayResultSet()` starts with the precision (that is, the maximum number of digits) and adds three bytes for a sign character, a decimal point, and a NULL terminator. + +```c +switch (results->T[col]) +{ + case 2: /* NUMERIC */ + { + int precision, scale; + + sqlprc(&results->L[col], &precision, &scale); + + if (precision == 0) + precision = 38; + length = precision + 3; + break; + } +``` + +For date values, the program uses a hard-coded length of 30. In a real-world application, you might want to more carefully compute the amount of space required. + +```c +case 12: /* DATE */ +{ + length = 30; + break; +} +``` + +For a value of any type other than date or numeric, `displayResultSet()` starts with the maximum column width reported by `DESCRIBE SELECT LIST` and adds one extra byte for the NULL terminator. Again, in a real-world application you might want to include more careful calculations for other data types: + +```c +default: /* Others */ +{ + length = results->L[col] + 1; + break; +} +} +``` + +After computing the amount of space required to hold a given column, the program: + +- Allocates enough memory to hold the value +- Sets `results->L[col]` to indicate the number of bytes found at `results->V[col]` +- Sets the type code for the column `(results->T[col])` to `1` to instruct the upcoming `FETCH` statement to return the value in the form of a NULL-terminated string + +```c + results->V[col] = malloc(length); + results->L[col] = length; + results->T[col] = 1; +} +``` + +At this point, the results descriptor is configured such that a `FETCH` statement can copy each value into an appropriately sized buffer in the form of a NULL-terminated string. + +Next, the program defines a new error handler to break out of the upcoming loop when the cursor is exhausted. + +```sql +EXEC SQL WHENEVER NOT FOUND DO break; + +while (1) +{ + const char *delimiter = ""; +``` + +The program executes a `FETCH` statement to fetch the next row in the cursor into the `results` descriptor. If the `FETCH` statement fails (because the cursor is exhausted), control transfers to the end of the loop because of the `EXEC SQL WHENEVER` directive found before the top of the loop. + + `EXEC SQL FETCH dynCursor USING DESCRIPTOR results;` + +The `FETCH` statement populates the following members of the results descriptor: + +- `*results->I[col]` indicates whether the column contains a NULL value `(-1)` or a non-NULL value `(0)`. If the value is non-NULL but too large to fit into the space provided, the value is truncated, and `*results->I[col]` contains a positive value. +- `results->V[col]` contains the value fetched for the given column (unless `*results->I[col]` indicates that the column value is NULL). +- `results->L[col]` contains the length of the value fetched for the given column. + +Finally, `displayResultSet()` iterates through each column in the result set, examines the corresponding NULL indicator, and prints the value. The result set isn't aligned. Instead, each value is separated from the previous value by a comma. + +```c +for (col = 0; col < results->F; col++) +{ + if (*results->I[col] == -1) + printf("%s%s", delimiter, ""); + else + printf("%s%s", delimiter, results->V[col]); + delimiter = ", "; +} + +printf("\n"); +} +} +} +/***********************************************************/ +``` diff --git a/product_docs/docs/epas/17/application_programming/ecpgplus_guide/06_error_handling.mdx b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/06_error_handling.mdx new file mode 100644 index 00000000000..b4fa22842b3 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/06_error_handling.mdx @@ -0,0 +1,198 @@ +--- +title: "Error handling" +description: "Outlines the methods for detecting and handling errors in embedded SQL code" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.20.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.21.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.22.html" +redirects: + - /epas/latest/ecpgplus_guide/06_error_handling/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +ECPGPlus provides two methods to detect and handle errors in embedded SQL code. A client application can: + +- Examine the `sqlca` data structure for error messages and supply customized error handling for your client application. +- Include `EXEC SQL WHENEVER` directives to instruct the ECPGPlus compiler to add error-handling code. + +## Error handling with sqlca + +The SQL communications area (`sqlca`) is a global variable used by `ecpglib` to communicate information from the server to the client application. After executing a SQL statement such as an `INSERT` or `SELECT` statement, you can inspect the contents of `sqlca` to determine if the statement completed successfully or if the statement failed. + +`sqlca` has the following structure: + +```c +struct +{ + char sqlcaid[8]; + long sqlabc; + long sqlcode; + struct + { + int sqlerrml; + char sqlerrmc[SQLERRMC_LEN]; + } sqlerrm; + char sqlerrp[8]; + long sqlerrd[6]; + char sqlwarn[8]; + char sqlstate[5]; + +} sqlca; +``` + +Use the following directive to implement `sqlca` functionality: + +```sql +EXEC SQL INCLUDE sqlca; +``` + +If you include the `ecpg` directive, you don't need to `#include` the `sqlca.h` file in the client application's header declaration. + +The EDB Postgres Advanced Server `sqlca` structure contains the following members: + +- `sqlcaid` — Contains the string: `"SQLCA"`. + +- `sqlabc` — `sqlabc` contains the size of the `sqlca` structure. + +- `sqlcode` — The `sqlcode` member was deprecated with SQL 92. EDB Postgres Advanced Server supports `sqlcode` for backward compatibility. Use the `sqlstate` member when writing new code. + + `sqlcode` is an integer value. A positive `sqlcode` value indicates that the client application encountered a harmless processing condition. A negative value indicates a warning or error. + + If a statement processes without error, `sqlcode` contains a value of `0`. If the client application encounters an error or warning during a statement's execution, `sqlcode` contains the last code returned. + + The SQL standard defines only a positive value of 100, which indicates that the most recent SQL statement processed returned or affected no rows. Since the SQL standard doesn't define other `sqlcode` values, be aware that the values assigned to each condition can vary from database to database. + +`sqlerrm` is a structure embedded in `sqlca`, composed of two members: + +- `sqlerrml` — Contains the length of the error message currently stored in `sqlerrmc`. + +- `sqlerrmc` — Contains the null-terminated message text associated with the code stored in `sqlstate`. If a message exceeds 149 characters, `ecpglib` truncates the error message. + +- `sqlerrp` — Contains the string `"NOT SET"`. + +`sqlerrd` is an array that contains six elements: + +- `sqlerrd[1]` — Contains the OID of the processed row (if applicable). + +- `sqlerrd[2]` — Contains the number of processed or returned rows. + +- `sqlerrd[0]`, `sqlerrd[3]`, `sqlerrd[4]` and `sqlerrd[5]` are unused. + +`sqlwarn` is an array that contains 8 characters: + +- `sqlwarn[0]` — Contains a value of `'W'` if any other element in `sqlwarn` is set to `'W'`. + +- `sqlwarn[1]` — Contains a value of `'W'` if a data value was truncated when it was stored in a host variable. + +- `sqlwarn[2]` — Contains a value of `'W'` if the client application encounters a nonfatal warning. + +- `sqlwarn[3]`, `sqlwarn[4]`, `sqlwarn[5]`, `sqlwarn[6]`, and `sqlwarn[7]` are unused. + +`sqlstate` is a five-character array that contains a SQL-compliant status code after the execution of a statement from the client application. If a statement processes without error, `sqlstate` contains a value of `00000`. `sqlstate` isn't a null-terminated string. + +`sqlstate` codes are assigned in a hierarchical scheme: + +- The first two characters of `sqlstate` indicate the general class of the condition. +- The last three characters of `sqlstate` indicate a specific status within the class. + +If the client application encounters multiple errors (or warnings) during an SQL statement's execution, `sqlstate` contains the last code returned. + +## List of sqlstate and sqlcode values + +The following table lists the `sqlstate` and `sqlcode` values, as well as the symbolic name and error description for the related condition. + +| sqlstate | sqlcode (deprecated) | Symbolic name | Description | +| ------------------- | -------------------- | ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `YE001` | `-12` | `ECPG_OUT_OF_MEMORY` | Virtual memory is exhausted. | +| `YE002` | `-200` | `ECPG_UNSUPPORTED` | The preprocessor generated an unrecognized item. Might indicate incompatibility between the preprocessor and the library. | +| `07001`, or `07002` | `-201` | `ECPG_TOO_MANY_ARGUMENTS` | The program specifies more variables than the command expects. | +| `07001`, or `07002` | `-202` | `ECPG_TOO_FEW_ARGUMENTS` | The program specified fewer variables than the command expects. | +| `21000` | `-203` | `ECPG_TOO_MANY_MATCHES` | The SQL command returned multiple rows, but the statement was prepared to receive a single row. | +| `42804` | `-204` | `ECPG_INT_FORMAT` | The host variable (defined in the C code) is of type INT, and the selected data is of a type that can't be converted into an INT. `ecpglib` uses the `strtol()` function to convert string values into numeric form. | +| `42804` | `-205` | `ECPG_UINT_FORMAT` | The host variable (defined in the C code) is an unsigned INT, and the selected data is of a type that can't be converted into an unsigned INT. `ecpglib` uses the `strtoul()` function to convert string values into numeric form. | +| `42804` | `-206` | `ECPG_FLOAT_FORMAT` | The host variable (defined in the C code) is of type FLOAT, and the selected data is of a type that can't be converted into a FLOAT. `ecpglib` uses the `strtod()` function to convert string values into numeric form. | +| `42804` | `-211` | `ECPG_CONVERT_BOOL` | The host variable (defined in the C code) is of type BOOL, and the selected data can't be stored in a BOOL. | +| `YE002` | `-2-1` | `ECPG_EMPTY` | The statement sent to the server was empty. | +| `22002` | `-213` | `ECPG_MISSING_INDICATOR` | A NULL indicator variable wasn't supplied for the NULL value returned by the server. (The client application received an unexpected NULL value.). | +| `42804` | `-214` | `ECPG_NO_ARRAY` | The server returned an array, and the corresponding host variable can't store an array. | +| `42804` | `-215` | `ECPG_DATA_NOT_ARRAY` | The server returned a value that isn't an array into a host variable that expects an array value. | +| `08003` | `-220` | `ECPG_NO_CONN` | The client application attempted to use a nonexistent connection. | +| `YE002` | `-221` | `ECPG_NOT_CONN` | The client application attempted to use an allocated but closed connection. | +| `26000` | `-230` | `ECPG_INVALID_STMT` | The statement wasn't prepared. | +| `33000` | `-240` | `ECPG_UNKNOWN_DESCRIPTOR` | The specified descriptor isn't found. | +| `07009` | `-241` | `ECPG_INVALID_DESCRIPTOR_INDEX` | The descriptor index is out of range. | +| `YE002` | `-242` | `ECPG_UNKNOWN_DESCRIPTOR_ITEM` | The client application requested an invalid descriptor item (internal error). | +| `07006` | `-243` | `ECPG_VAR_NOT_NUMERIC` | A dynamic statement returned a numeric value for a non-numeric host variable. | +| `07006` | `-244` | `ECPG_VAR_NOT_CHAR` | A dynamic SQL statement returned a CHAR value, and the host variable isn't a CHAR. | +| | `-400` | `ECPG_PGSQL` | The server returned an error message. The resulting message contains the error text. | +| `08007` | `-401` | `ECPG_TRANS` | The server can't start, commit, or roll back the specified transaction. | +| `08001` | `-402` | `ECPG_CONNECT` | The client application's attempt to connect to the database failed. | +| `02000` | `100` | `ECPG_NOT_FOUND` | The last command retrieved or processed no rows, or you reached the end of a cursor. | + +## Implementing simple error handling for client applications + +Use the `EXEC SQL WHENEVER` directive to implement simple error handling for client applications compiled with ECPGPlus. The syntax of the directive is: + +```sql +EXEC SQL WHENEVER ; +``` + +This directive instructs the ECPG compiler to insert error-handling code into your program. + +The code instructs the client application to perform a specified action if the client application detects a given condition. The *condition* can be one of the following: + +`SQLERROR` + + A `SQLERROR` condition exists when `sqlca.sqlcode` is less than zero. + +`SQLWARNING` + + A `SQLWARNING` condition exists when `sqlca.sqlwarn[0]` contains a `'W'`. + +`NOT FOUND` + + A `NOT FOUND` condition exists when `sqlca.sqlcode` is `ECPG_NOT_FOUND` (when a query returns no data). + +You can specify that the client application perform one of the following *actions* if it encounters one of the previous conditions: + +`CONTINUE` + + Specify `CONTINUE` to instruct the client application to continue processing, ignoring the current `condition`. `CONTINUE` is the default action. + +`DO CONTINUE` + + An action of `DO CONTINUE` generates a `CONTINUE` statement in the emitted C code. If it encounters the condition, it skips the rest of the code in the loop and continues with the next iteration. You can use it only in a loop. + +`GOTO label` or `GO TO label` + + Use a C `goto` statement to jump to the specified `label`. + +`SQLPRINT` + + Print an error message to `stderr` (standard error), using the `sqlprint()` function. The `sqlprint()` function prints `sql error` followed by the contents of `sqlca.sqlerrm.sqlerrmc`. + +`STOP` + + Call `exit(1)` to signal an error and terminate the program. + +`DO BREAK` + + Execute the C `break` statement. Use this action in loops or `switch` statements. + +`CALL name(args)` or `DO name(args)` + + Invoke the C function specified by the name `parameter`, using the parameters specified in the `args` parameter. + +## Example + +The following code fragment prints a message if the client application encounters a warning and aborts the application if it encounters an error: + +```sql +EXEC SQL WHENEVER SQLWARNING SQLPRINT; +EXEC SQL WHENEVER SQLERROR STOP; +``` + +!!! Note + The ECPGPlus compiler processes your program from top to bottom, even though the client application might not execute from top to bottom. The compiler directive is applied to each line in order and remains in effect until the compiler encounters another directive. If the control of the flow in your program isn't top to bottom, consider adding error-handling directives to any parts of the program that might be missed during compiling. diff --git a/product_docs/docs/epas/17/application_programming/ecpgplus_guide/index.mdx b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/index.mdx new file mode 100644 index 00000000000..ffc3d72ba2f --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/index.mdx @@ -0,0 +1,36 @@ +--- +navTitle: Including embedded SQL commands +title: "Including embedded SQL commands" +indexCards: simple +description: "How to use ECPGPlus to complie applications" +navigation: + - 02_overview + - installing_ecpgplus + - 03_using_embedded_sql + - 04_using_descriptors + - 05_building_executing_dynamic_sql_statements + - 06_error_handling +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/toc.html" +redirects: + - /epas/latest/ecpgplus_guide/ #generated for docs/epas/reorg-role-use-case-mode +--- + +EDB enhanced ECPG (the PostgreSQL precompiler) to create ECPGPlus. ECPGPlus allows you to include Pro\*C-compatible embedded SQL commands in C applications when connected to an EDB Postgres Advanced Server database. When you use ECPGPlus to compile an application, the SQL code syntax is checked and translated into C. + +ECPGPlus supports: + +- Oracle Dynamic SQL – Method 4 (ODS-M4) +- Pro\*C-compatible anonymous blocks +- A `CALL` statement compatible with Oracle databases + +As part of ECPGPlus's Pro\*C compatibility, you don't need to include the `BEGIN DECLARE SECTION` and `END DECLARE SECTION` directives. + +While most ECPGPlus statements work with community PostgreSQL, the `CALL` statement and the `EXECUTE…END EXEC` statement work only when the client application is connected to EDB Postgres Advanced Server. + +
+ +introduction overview using_embedded_sql using_descriptors building_executing_dynamic_sql_statements error_handling reference conclusion + +
diff --git a/product_docs/docs/epas/17/application_programming/ecpgplus_guide/installing_ecpgplus.mdx b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/installing_ecpgplus.mdx new file mode 100644 index 00000000000..29605d0a77d --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/ecpgplus_guide/installing_ecpgplus.mdx @@ -0,0 +1,145 @@ +--- +title: "Installing and configuring ECPGPlus" +description: "Provides an overview of the ECPGPlus precompiler capabilities" +--- + +On Windows, ECPGPlus is installed by the EDB Postgres Advanced Server installation wizard as part of the Database Server component. On Linux, you install ECPGPlus by running an executable. + +## Installing ECPGPlus + +On Linux, install with the `edb-as-server-devel` RPM package, where `` is the EDB Postgres Advanced Server version number. On Linux, the executable is located in: + +```text +/usr/edb/as14/bin +``` + +On Windows, the executable is located in: + +```text +C:\Program Files\edb\as14\bin +``` + +When invoking the ECPGPlus compiler, the executable must be in your search path (`%PATH%` on Windows, `$PATH` on Linux). For example, the following commands set the search path to include the directory that holds the ECPGPlus executable file `ecpg`. + +On Windows: + +```shell +set EDB_PATH=C:\Program Files\edb\as14\bin +set PATH=%EDB_PATH%;%PATH% +``` + +On Linux: + +```shell +export EDB_PATH==/usr/edb/as14/bin +export PATH=$EDB_PATH:$PATH +``` + +## Constructing a makefile + +A makefile contains a set of instructions that tell the make utility how to transform a program written in C that contains embedded SQL into a C program. To try the examples, you need: + +- A C compiler and linker +- The make utility +- ECPGPlus preprocessor and library +- A makefile that contains instructions for ECPGPlus + +The following code is an example of a makefile for the samples included in this documentation. To use the sample code, save it in a file named `makefile` in the directory that contains the source code file. + +```c +INCLUDES = -I$(shell pg_config --includedir) +LIBPATH = -L $(shell pg_config --libdir) +CFLAGS += $(INCLUDES) -g +LDFLAGS += -g +LDLIBS += $(LIBPATH) -lecpg -lpq + +.SUFFIXES: .pgc,.pc + +.pgc.c: + ecpg -c $(INCLUDES) $? + +.pc.c: + ecpg -C PROC -c $(INCLUDES) $? +``` + +The first two lines use the pg_config program to locate the necessary header files and library directories: + +```sql +INCLUDES = -I$(shell pg_config --includedir) +LIBPATH = -L $(shell pg_config --libdir) +``` + +The pg_config program is shipped with EDB Postgres Advanced Server. + +make knows to use the `CFLAGS` variable when running the C compiler and `LDFLAGS` and `LDLIBS` when invoking the linker. ECPG programs must be linked against the ECPG runtime library (`-lecpg`) and the libpq library (`-lpq`). + +```sql +CFLAGS += $(INCLUDES) -g +LDFLAGS += -g +LDLIBS += $(LIBPATH) -lecpg -lpq +``` + +The sample makefile tells make how to translate a `.pgc` or a `.pc` file into a C program. Two lines in the makefile specify the mode in which the source file is compiled. The first compile option is: + +```c +.pgc.c: + ecpg -c $(INCLUDES) $? +``` + +The first option tells make how to transform a file that ends in `.pgc` (presumably, an ECPG source file) into a file that ends in `.c` (a C program), using community ECPG, without the ECPGPlus enhancements. It invokes the ECPG precompiler with the `-c` flag, which instructs the compiler to convert SQL code into C, using the value of the `INCLUDES` variable and the name of the `.pgc` file. + +```c +.pc.c: + ecpg -C PROC -c $(INCLUDES) $? +``` + +The second option tells make how to transform a file that ends in `.pg` (an ECPG source file) into a file that ends in `.c` (a C program) using the ECPGPlus extensions. It invokes the ECPG precompiler with the `-c` flag, which instructs the compiler to convert SQL code to C. It also uses the `-C PROC` flag, which instructs the compiler to use ECPGPlus in Pro\*C-compatibility mode, using the value of the `INCLUDES` variable and the name of the `.pgc` file. + +When you run make, pass the name of the ECPG source code file you want to compile. For example, to compile an ECPG source code file named `customer_list.pgc`, use the command: + +```shell +make customer_list +``` + +The make utility: + +1. Consults the makefile located in the current directory. +1. Discovers that the makefile contains a rule that compiles `customer_list.pgc` into a C program (`customer_list.c`). +1. Uses the rules built into `make` to compile `customer_list.c` into an executable program. + +## ECPGPlus command line options + +In the sample makefile, make includes the `-C` option when invoking ECPGPlus to invoke ECPGPlus in Pro\*C-compatible mode. + +If you include the `-C` `PROC` keywords at the command line, in addition to the ECPG syntax, you can use Pro\*C command line syntax. For example: + +```shell +$ ecpg -C PROC INCLUDE=/usr/edb/as14/include acct_update.c +``` + +To display a complete list of the other ECPGPlus options available, in the ECPGPlus installation directory, enter: + +```shell +./ecpg --help +``` + +The command line options are: + +| Option | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| -c | Generate C code from embedded SQL code. | +| -C <mode> | Specify a compatibility mode:

`INFORMIX`

`INFORMIX_SE`

`PROC` | +| -D <symbol> | Define a preprocessor symbol.

The -D keyword isn't supported when compiling in `PROC` mode. Instead, use the Oracle-style `‘DEFINE=’` clause. | +| -h | Parse a header file. This option includes option `'-c'`. | +| -i | Parse system. Include files as well. | +| -I <directory> | Search <directory> for `include` files. | +| -o <outfile> | Write the result to <outfile>. | +| -r <option> | Specify runtime behavior. The value of <option> can be:

`no_indicator` — Don't use indicators, but instead use special values to represent NULL values.

`prepare` — Prepare all statements before using them.

`questionmarks` — Allow use of a question mark as a placeholder.

`usebulk` — Enable bulk processing for `INSERT`, `UPDATE`, and `DELETE` statements that operate on host variable arrays. | +| --regression | Run in regression testing mode. | +| -t | Turn on autocommit of transactions. | +| -l | Disable `#line` directives. | +| --help | Display the help options. | +| --version | Output version information. | + +!!! Note + If you don't specify an output file name when invoking ECPGPlus, the output file name is created by removing the `.pgc` extension from the file name and appending `.c`. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/02_spl_block_structure.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/02_spl_block_structure.mdx new file mode 100644 index 00000000000..f9ffade6228 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/02_spl_block_structure.mdx @@ -0,0 +1,96 @@ +--- +title: "SPL block structure overview" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.050.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.144.html" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/02_spl_block_structure/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Regardless of whether the program is a procedure, function, subprogram, or trigger, an SPL program has the same *block* structure. A block consists of up to three sections: an optional declaration section, a mandatory executable section, and an optional exception section. Minimally, a block has an executable section that consists of one or more SPL statements between the keywords `BEGIN` and `END`. + +Use the optional declaration section to declare variables, cursors, types, and subprograms that are used by the statements in the executable and exception sections. Declarations appear just before the `BEGIN` keyword of the executable section. Depending on the context of where the block is used, the declaration section can begin with the keyword `DECLARE`. + +You can include an exception section in the `BEGIN - END` block. The exception section begins with the keyword `EXCEPTION` and continues until the end of the block in which it appears. If an exception is thrown by a statement in the block, program control might go to the exception section where the thrown exception is handled, depending on the exception and the contents of the exception section. + +The following is the general structure of a block: + +```sql +[ [ DECLARE ] + + ] + BEGIN + + [ EXCEPTION + WHEN THEN + [, ...] ] + END; +``` + +- `pragmas` are the directives (`AUTONOMOUS_TRANSACTION` is the currently supported pragma). +- `declarations` are one or more variable, cursor, type, or subprogram declarations that are local to the block. If subprogram declarations are included, you must declare them after all other variable, cursor, and type declarations. Terminate each declaration with a semicolon. The use of the keyword `DECLARE` depends on the context in which the block appears. +- `statements` are one or more SPL statements. Terminate each statement with a semicolon. You must also terminate the end of the block denoted by the keyword `END` with a semicolon. +- If present, the keyword `EXCEPTION` marks the beginning of the exception section. `exception_condition` is a conditional expression testing for one or more types of exceptions. If an exception matches one of the exceptions in `exception_condition`, the `statements` following the `WHEN exception_condition` clause are executed. There can be one or more `WHEN exception_condition` clauses, each followed by `statements`. + +!!! Note + A `BEGIN/END` block is considered a statement, thus you can nest blocks. The exception section can also contain nested blocks. + +The following is the simplest possible block, consisting of the `NULL` statement in the executable section. The `NULL` statement is an executable statement that does nothing. + +```sql +BEGIN + NULL; +END; +``` + +The following block contains a declaration section as well as the executable section: + +```sql +DECLARE + v_numerator NUMBER(2); + v_denominator NUMBER(2); + v_result NUMBER(5,2); +BEGIN + v_numerator := 75; + v_denominator := 14; + v_result := v_numerator / v_denominator; + DBMS_OUTPUT.PUT_LINE(v_numerator || ' divided by ' || v_denominator || + ' is ' || v_result); +END; +``` + +In this example, three numeric variables are declared of data type `NUMBER`. Values are assigned to two of the variables, and one number is divided by the other. Results are stored in a third variable and then displayed. The output is: + +```sql +__OUTPUT__ +75 divided by 14 is 5.36 +``` + +The following block consists of a declaration, an executable, and an exception: + +```sql +DECLARE + v_numerator NUMBER(2); + v_denominator NUMBER(2); + v_result NUMBER(5,2); +BEGIN + v_numerator := 75; + v_denominator := 0; + v_result := v_numerator / v_denominator; + DBMS_OUTPUT.PUT_LINE(v_numerator || ' divided by ' || v_denominator || + ' is ' || v_result); +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('An exception occurred'); +END; +``` + +The following output shows that the statement in the exception section is executed as a result of the division by zero: + +```sql +__OUTPUT__ +An exception occurred +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/03_anonymous_blocks.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/03_anonymous_blocks.mdx new file mode 100644 index 00000000000..b5394b86a1f --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/03_anonymous_blocks.mdx @@ -0,0 +1,17 @@ +--- +title: "Anonymous blocks" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.051.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.145.html" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/03_anonymous_blocks/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You typically write blocks as part of a procedure, function, subprogram, or trigger. You name procedure, function, and trigger programs and store them in the database if you want to reuse them. + +For quick, one-time execution such as testing, you can enter the block without providing a name or storing it in the database. A block without a name and that isn't stored in the database is called an *anonymous block*. Once the block is executed and erased from the application buffer, you can't execute it again unless the block code you enter it into the application again. + +Typically, the same block of code executes many times. To run a block of code repeatedly without reentering the code each time, with some simple modifications, you can turn an anonymous block into a procedure or function. See [Procedures overview](04_procedures_overview) and [Functions overview](05_functions_overview). diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure.mdx new file mode 100644 index 00000000000..6b3eb1053d3 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure.mdx @@ -0,0 +1,175 @@ +--- +title: "Creating a procedure" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `CREATE PROCEDURE` command defines and names a standalone procedure that's stored in the database. + +If you include a schema name, then the procedure is created in the specified schema. Otherwise it's created in the current schema. The name of the new procedure must not match any existing procedure with the same input argument types in the same schema. However, procedures of different input argument types can share a name. This is called *overloading*. + +!!! Note + Overloading of procedures is an EDB Postgres Advanced Server feature. Overloading of stored, standalone procedures isn't compatible with Oracle databases. + +## Updating the definition of an existing procedure + +To update the definition of an existing procedure, use `CREATE OR REPLACE PROCEDURE`. You can't change the name or argument types of a procedure this way. Attempting to do so creates a new, distinct procedure. When using `OUT` parameters, you can't change the types of any `OUT` parameters except by dropping the procedure. + +```sql +CREATE [OR REPLACE] PROCEDURE [ () ] + [ + IMMUTABLE + | STABLE + | VOLATILE + | DETERMINISTIC + | [ NOT ] LEAKPROOF + | CALLED ON NULL INPUT + | RETURNS NULL ON NULL INPUT + | STRICT + | [ EXTERNAL ] SECURITY INVOKER + | [ EXTERNAL ] SECURITY DEFINER + | AUTHID DEFINER + | AUTHID CURRENT_USER + | PARALLEL { UNSAFE | RESTRICTED | SAFE } + | COST + | ROWS + | SET + { TO | = | FROM CURRENT } + ...] +{ IS | AS } + [ PRAGMA AUTONOMOUS_TRANSACTION; ] + [ ] + BEGIN + + END [ ]; +``` + +Where: + +`name` + + `name` is the identifier of the procedure. + +`parameters` + + `parameters` is a list of formal parameters. + +`declarations` + + `declarations` are variable, cursor, type, or subprogram declarations. If you include subprogram declarations, you must declare them after all other variable, cursor, and type declarations. + +`statements` + + `statements` are SPL program statements. The `BEGIN - END` block can contain an `EXCEPTION` section. + +`IMMUTABLE` + +`STABLE` + +`VOLATILE` + + These attributes inform the query optimizer about the behavior of the procedure. You can specify only one choice. `VOLATILE` is the default behavior. + +- `IMMUTABLE` indicates that the procedure can't modify the database and always reaches the same result when given the same argument values. It doesn't perform database lookups or otherwise use information not directly present in its argument list. If you include this clause, you can immediately replace any call of the procedure with all-constant arguments with the procedure value. + +- `STABLE` indicates that the procedure can't modify the database. It also indicates that, in a single table scan, it consistently returns the same result for the same argument values but that its result might change across SQL statements. Use this selection for procedures that depend on database lookups, parameter variables (such as the current time zone), and so on. + +- `VOLATILE` indicates that the procedure value can change even in a single table scan, so you can't make optimizations. You must classify any function that has side effects as volatile, even if its result is predictable, to prevent calls from being optimized away. + +`DETERMINISTIC` + + `DETERMINISTIC` is a synonym for `IMMUTABLE`. A `DETERMINISTIC` procedure can't modify the database and always reaches the same result when given the same argument values. It doesn't do database lookups or otherwise use information not directly present in its argument list. If you include this clause, any call of the procedure with all-constant arguments can be immediately replaced with the procedure value. + +`[ NOT ] LEAKPROOF` + + A `LEAKPROOF` procedure has no side effects and reveals no information about the values used to call the procedure. + +`CALLED ON NULL INPUT` + +`RETURNS NULL ON NULL INPUT` + +`STRICT` + +- `CALLED ON NULL INPUT` (the default) indicates that the procedure is called normally when some of its arguments are `NULL`. It's the author's responsibility to check for `NULL` values if necessary and respond appropriately. + +- `RETURNS NULL ON NULL INPUT` or `STRICT` indicates that the procedure always returns `NULL` when any of its arguments are `NULL`. If these clauses are specified, the procedure isn't executed when there are `NULL` arguments. Instead a `NULL` result is assumed automatically. + +`[ EXTERNAL ] SECURITY DEFINER` + + `SECURITY DEFINER` specifies for the procedure to execute with the privileges of the user that created it. This is the default. The key word `EXTERNAL` is allowed for SQL conformance but is optional. + +`[ EXTERNAL ] SECURITY INVOKER` + + The `SECURITY INVOKER` clause indicates for the procedure to execute with the privileges of the user that calls it. The key word `EXTERNAL` is allowed for SQL conformance but is optional. + +`AUTHID DEFINER` + +`AUTHID CURRENT_USER` + +- The `AUTHID DEFINER` clause is a synonym for `[EXTERNAL] SECURITY DEFINER`. If you omit the `AUTHID` clause or specify `AUTHID DEFINER`, the rights of the procedure owner determine access privileges to database objects. + +- The `AUTHID CURRENT_USER` clause is a synonym for `[EXTERNAL] SECURITY INVOKER`. If you specify `AUTHID CURRENT_USER`, the rights of the current user executing the procedure determine access privileges. + +`PARALLEL { UNSAFE | RESTRICTED | SAFE }` + + The `PARALLEL` clause enables the use of parallel sequential scans, that is, parallel mode. A parallel sequential scan uses multiple workers to scan a relation in parallel during a query, in contrast to a serial sequential scan. + +- When the value is set to `UNSAFE`, you can't execute the procedure in parallel mode. The presence of such a procedure forces a serial execution plan. This is the default setting. + +- When the value is set to `RESTRICTED`, you can execute the procedure in parallel mode, but the execution is restricted to the parallel group leader. If the qualification for any particular relation has anything that's parallel restricted, that relation isn't chosen for parallelism. + +- When the value is set to `SAFE`, you can execute the procedure in parallel mode without restriction. + +`COST execution_cost` + + `execution_cost` is a positive number giving the estimated execution cost for the procedure, in units of `cpu_operator_cost`. If the procedure returns a set, this is the cost per returned row. Larger values cause the planner to try to avoid evaluating the function more often than necessary. + +`ROWS result_rows` + + `result_rows` is a positive number giving the estimated number of rows for the planner to expect the procedure to return. This setting is allowed only when the procedure is declared to return a set. The default is 1000 rows. + +`SET configuration_parameter { TO value | = value | FROM CURRENT }` + + The `SET` clause causes the specified configuration parameter to be set to the specified value when the procedure is entered and then restored to its prior value when the procedure exits. `SET FROM CURRENT` saves the session's current value of the parameter as the value to apply when the procedure is entered. + + If a `SET` clause is attached to a procedure, then the effects of a `SET LOCAL` command executed inside the procedure for the same variable are restricted to the procedure. The configuration parameter's prior value is restored at procedure exit. An ordinary `SET` command (without `LOCAL`) overrides the `SET` clause, much as it does for a previous `SET LOCAL` command. The effects of this command persist after procedure exit, unless the current transaction is rolled back. + +`PRAGMA AUTONOMOUS_TRANSACTION` + + `PRAGMA AUTONOMOUS_TRANSACTION` is the directive that sets the procedure as an autonomous transaction. + +!!! Note + - The `STRICT`, `LEAKPROOF`, `PARALLEL`, `COST`, `ROWS` and `SET` keywords provide extended functionality for EDB Postgres Advanced Server and aren't supported by Oracle. + + - By default, stored procedures are created as `SECURITY DEFINERS`, but when written in plpgsql, the stored procedures are created as `SECURITY INVOKERS`. + +## Example + +This example shows a simple procedure that takes no parameters: + +```sql +CREATE OR REPLACE PROCEDURE simple_procedure +IS +BEGIN + DBMS_OUTPUT.PUT_LINE('That''s all folks!'); +END simple_procedure; +``` + +Store the procedure in the database by entering the procedure code in EDB Postgres Advanced Server. + +This example shows using the `AUTHID DEFINER` and `SET` clauses in a procedure declaration. The `update_salary` procedure conveys the privileges of the role that defined the procedure to the role that's calling the procedure while the procedure executes. + +```sql +CREATE OR REPLACE PROCEDURE update_salary(id INT, new_salary NUMBER) + SET SEARCH_PATH = 'public' SET WORK_MEM = '1MB' + AUTHID DEFINER IS +BEGIN + UPDATE emp SET salary = new_salary WHERE emp_id = id; +END; +``` + +Include the `SET` clause to set the procedure's search path to `public` and the work memory to `1MB`. These settings don't affect other procedures, functions, and objects. + +In the example, the `AUTHID DEFINER` clause temporarily grants privileges to a role that otherwise might not be allowed to execute the statements in the procedure. To instruct the server to use the privileges associated with the role invoking the procedure, replace the `AUTHID DEFINER` clause with the `AUTHID CURRENT_USER` clause. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/02_calling_a_procedure.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/02_calling_a_procedure.mdx new file mode 100644 index 00000000000..48425b9bdd0 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/02_calling_a_procedure.mdx @@ -0,0 +1,37 @@ +--- +title: "Calling a procedure" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/04_procedures_overview/02_calling_a_procedure/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can invoke a procedure from another SPL program by specifying the procedure name followed by any parameters and a semicolon: + +```text + [ ([ ]) ]; +``` + +Where: + +`name` is the identifier of the procedure. + +`parameters` is a list of parameters. + +!!! Note + - If there are no parameters to pass, you can call the procedure with an empty parameter list, or you can omit the parentheses. + + - The syntax for calling a procedure is the same as in the preceding syntax diagram when executing it with the `EXEC` command in PSQL or EDB\*Plus. See [SQL reference](../../../../reference/oracle_compatibility_reference/epas_compat_sql/63_exec/) for information about the `EXEC` command. + +This example calls the procedure from an anonymous block: + +```sql +BEGIN + simple_procedure; +END; + +That's all folks! +``` + +!!! Note + Each application has its own way of calling a procedure. For example, a Java application uses the application programming interface JDBC. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/03_deleting_a_procedure.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/03_deleting_a_procedure.mdx new file mode 100644 index 00000000000..13a47cb22d4 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/03_deleting_a_procedure.mdx @@ -0,0 +1,27 @@ +--- +title: "Deleting a procedure" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/04_procedures_overview/03_deleting_a_procedure/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can delete a procedure from the database using the `DROP PROCEDURE` command. + +```sql +DROP PROCEDURE [ IF EXISTS ] [ () ] + [ CASCADE | RESTRICT ]; +``` + +Where `name` is the name of the procedure to drop. + +!!! Note + - The specification of the parameter list is required in EDB Postgres Advanced Server under certain circumstances such as in an overloaded procedure. Oracle requires that you always omit the parameter list. + + - Using `IF EXISTS`, `CASCADE`, or `RESTRICT` isn't compatible with Oracle databases. See [SQL reference](../../../../reference/oracle_compatibility_reference/epas_compat_sql/50_drop_procedure) for information on these options. + +This example drops the procedure `simple_procedure`: + +```sql +DROP PROCEDURE simple_procedure; +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/index.mdx new file mode 100644 index 00000000000..7e18a9f73c6 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Procedures overview" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.052.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.146.html" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/04_procedures_overview/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Procedures are standalone SPL programs that you invoke or call as an individual SPL program statement. When called, procedures can optionally receive values from the caller in the form of input parameters. They can optionally return values to the caller in the form of output parameters. + +
+ +creating_a_procedure calling_a_procedure deleting_a_procedure + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/01_creating_a_function.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/01_creating_a_function.mdx new file mode 100644 index 00000000000..f5d1ee54f19 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/01_creating_a_function.mdx @@ -0,0 +1,180 @@ +--- +title: "Creating a function" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/05_functions_overview/01_creating_a_function/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `CREATE FUNCTION` command defines and names a standalone function to store in the database. + +If a schema name is included, then the function is created in the specified schema. Otherwise it's created in the current schema. The name of the new function must not match any existing function with the same input argument types in the same schema. However, functions of different input argument types can share a name. Sharing a name is called *overloading*. + +!!! Note + Overloading functions is an EDB Postgres Advanced Server feature. **Overloading stored, standalone functions isn't compatible with Oracle databases.** + +## Updating the definition of an existing function + +To update the definition of an existing function, use `CREATE OR REPLACE FUNCTION`. You can't change the name or argument types of a function this way. If you try to, you instead create a new, distinct function. Also, `CREATE OR REPLACE FUNCTION` doesn't let you change the return type of an existing function. To do that, you must drop and recreate the function. When using `OUT` parameters, you can't change the types of any `OUT` parameters except by dropping the function. + +```sql +CREATE [ OR REPLACE ] FUNCTION [ () ] + RETURN + [ + IMMUTABLE + | STABLE + | VOLATILE + | DETERMINISTIC + | [ NOT ] LEAKPROOF + | CALLED ON NULL INPUT + | RETURNS NULL ON NULL INPUT + | STRICT + | [ EXTERNAL ] SECURITY INVOKER + | [ EXTERNAL ] SECURITY DEFINER + | AUTHID DEFINER + | AUTHID CURRENT_USER + | PARALLEL { UNSAFE | RESTRICTED | SAFE } + | COST + | ROWS + | SET + { TO | = | FROM CURRENT } + ...] +{ IS | AS } + [ PRAGMA AUTONOMOUS_TRANSACTION; ] + [ ] + BEGIN + + END [ ]; +``` + +Where: + + `name` is the identifier of the function. + + `parameters` is a list of formal parameters. + + `data_type` is the data type of the value returned by the function’s `RETURN` statement. + + `declarations` are variable, cursor, type, or subprogram declarations. If you include subprogram declarations, you must declare them after all other variable, cursor, and type declarations. + + `statements` are SPL program statements. The `BEGIN - END` block can contain an `EXCEPTION` section. + +`IMMUTABLE` + +`STABLE` + +`VOLATILE` + + These attributes inform the query optimizer about the behavior of the function. You can specify only one. `VOLATILE` is the default behavior. + +- `IMMUTABLE` indicates that the function can't modify the database and always reaches the same result when given the same argument values. It doesn't do database lookups or otherwise use information not directly present in its argument list. If you include this clause, any call of the function with all-constant arguments can be immediately replaced with the function value. + +- `STABLE` indicates that the function can't modify the database. It also indicates that, in a single table scan, it consistently returns the same result for the same argument values but that its result might change across SQL statements. Select this attribute for functions that depend on database lookups, parameter variables (such as the current time zone), and so on. + +- `VOLATILE` indicates that the function value can change even in a single table scan, so no optimizations can be made. Classify any function that has side effects as volatile, even if its result is predictable, to prevent calls from being optimized away. + +`DETERMINISTIC` + + `DETERMINISTIC` is a synonym for `IMMUTABLE`. A `DETERMINISTIC` function can't modify the database and always reaches the same result when given the same argument values. It doesn't do database lookups or otherwise use information not directly present in its argument list. If you include this clause, you can replace any call of the function with all-constant arguments with the function value. + +`[ NOT ] LEAKPROOF` + + A `LEAKPROOF` function has no side effects and reveals no information about the values used to call the function. + +`CALLED ON NULL INPUT` + +`RETURNS NULL ON NULL INPUT` + +`STRICT` + +- `CALLED ON NULL INPUT` (the default) indicates for the procedure to be called normally when some of its arguments are `NULL`. It is the author's responsibility to check for `NULL` values if necessary and respond appropriately. + +- `RETURNS NULL ON NULL INPUT` or `STRICT` indicates that the procedure always returns `NULL` when any of its arguments are `NULL`. If you specify these clauses, the procedure isn't executed when there are `NULL` arguments. Instead, a `NULL` result is assumed automatically. + +`[ EXTERNAL ] SECURITY DEFINER` + + `SECURITY DEFINER` (the default) specifies for the function to execute with the privileges of the user that created it. The key word `EXTERNAL` is allowed for SQL conformance but is optional. + +`[ EXTERNAL ] SECURITY INVOKER` + + The `SECURITY INVOKER` clause indicates for the function to execute with the privileges of the user that calls it. The key word `EXTERNAL` is allowed for SQL conformance but is optional. + +`AUTHID DEFINER` + +`AUTHID CURRENT_USER` + +- The `AUTHID DEFINER` clause is a synonym for `[EXTERNAL] SECURITY DEFINER`. If the `AUTHID` clause is omitted or if `AUTHID DEFINER` is specified, the rights of the function owner determine access privileges to database objects. + +- The `AUTHID CURRENT_USER` clause is a synonym for `[EXTERNAL] SECURITY INVOKER`. If `AUTHID CURRENT_USER` is specified, the rights of the current user executing the function determine access privileges. + +`PARALLEL { UNSAFE | RESTRICTED | SAFE }` + + The `PARALLEL` clause enables the use of parallel sequential scans, that is, parallel mode. A parallel sequential scan uses multiple workers to scan a relation in parallel during a query in contrast to a serial sequential scan. + +- When this value is set to `UNSAFE`, you can't execute the function in parallel mode. The presence of such a function in a SQL statement forces a serial execution plan. This is the default setting. + +- When this value is set to `RESTRICTED`, you can execute the function in parallel mode, but the execution is restricted to the parallel group leader. If the qualification for any particular relation has anything that's parallel restricted, that relation isn't chosen for parallelism. + +- When this value is set to `SAFE`, you can execute the function in parallel mode with no restriction. + +`COST execution_cost` + + `execution_cost` is a positive number giving the estimated execution cost for the function, in units of `cpu_operator_cost`. If the function returns a set, this is the cost per returned row. Larger values cause the planner to try to avoid evaluating the function more often than necessary. + +`ROWS result_rows` + + `result_rows` is a positive number giving the estimated number of rows for the planner to expect the function to return. This is allowed only when the function is declared to return a set. The default assumption is 1000 rows. + +`SET configuration_parameter { TO value | = value | FROM CURRENT }` + + The `SET` clause causes the specified configuration parameter to be set to the specified value when the function is entered and then restored to its prior value when the function exits. `SET FROM CURRENT` saves the session's current value of the parameter as the value to apply when the function is entered. + + If a `SET` clause is attached to a function, then the effects of a `SET LOCAL` command executed inside the function for the same variable are restricted to the function. The configuration parameter's prior value is restored at function exit. A `SET` command without `LOCAL` overrides the `SET` clause, much as it does for a previous `SET LOCAL` command. The effects of such a command persist after procedure exit, unless the current transaction is rolled back. + +`PRAGMA AUTONOMOUS_TRANSACTION` + + `PRAGMA AUTONOMOUS_TRANSACTION` is the directive that sets the function as an autonomous transaction. + +!!! Note + The `STRICT`, `LEAKPROOF`, `PARALLEL`, `COST`, `ROWS` and `SET` keywords provide extended functionality for EDB Postgres Advanced Server and aren't supported by Oracle. + +## Examples + +This example shows a simple function that takes no parameters: + +```sql +CREATE OR REPLACE FUNCTION simple_function + RETURN VARCHAR2 +IS +BEGIN + RETURN 'That''s All Folks!'; +END simple_function; +``` + +This function takes two input parameters: + +```sql +CREATE OR REPLACE FUNCTION emp_comp ( + p_sal NUMBER, + p_comm NUMBER +) RETURN NUMBER +IS +BEGIN + RETURN (p_sal + NVL(p_comm, 0)) * 24; +END emp_comp; +``` + +This example uses the `AUTHID CURRENT_USER` clause and `STRICT` keyword in a function declaration: + +```sql +CREATE OR REPLACE FUNCTION dept_salaries(dept_id int) RETURN NUMBER + STRICT + AUTHID CURRENT_USER +BEGIN + RETURN QUERY (SELECT sum(salary) FROM emp WHERE deptno = id); +END; +``` + +Include the `STRICT` keyword to instruct the server to return `NULL` if any input parameter passed is `NULL`. If a `NULL` value is passed, the function doesn't execute. + +The `dept_salaries` function executes with the privileges of the role that's calling the function. If the current user doesn't have the privileges to perform the `SELECT` statement querying the `emp` table (to display employee salaries), the function reports an error. To instruct the server to use the privileges associated with the role that defined the function, replace the `AUTHID CURRENT_USER` clause with the `AUTHID DEFINER` clause. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/02_calling_a_function.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/02_calling_a_function.mdx new file mode 100644 index 00000000000..d734c7dcbb2 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/02_calling_a_function.mdx @@ -0,0 +1,55 @@ +--- +title: "Calling a function" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/05_functions_overview/02_calling_a_function/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can use a function anywhere an expression can appear in an SPL statement. Invoke a function by specifying its name followed by any parameters enclosed in parentheses. + +```text + [ ([ ]) ] +``` + +`name` is the name of the function. + +`parameters` is a list of parameters. + +!!! Note + If there are no parameters to pass, you can call the function with an empty parameter list, or you can omit the parentheses. + +This example shows how to call the function from another SPL program: + +```sql +BEGIN + DBMS_OUTPUT.PUT_LINE(simple_function); +END; + +That's All Folks! +``` + +You typically use a function in a SQL statement, as this example shows: + +```sql +SELECT empno "EMPNO", ename "ENAME", sal "SAL", comm "COMM", + emp_comp(sal, comm) "YEARLY COMPENSATION" FROM emp; +__OUTPUT__ + EMPNO | ENAME | SAL | COMM | YEARLY COMPENSATION +-------+--------+---------+---------+--------------------- + 7369 | SMITH | 800.00 | | 19200.00 + 7499 | ALLEN | 1600.00 | 300.00 | 45600.00 + 7521 | WARD | 1250.00 | 500.00 | 42000.00 + 7566 | JONES | 2975.00 | | 71400.00 + 7654 | MARTIN | 1250.00 | 1400.00 | 63600.00 + 7698 | BLAKE | 2850.00 | | 68400.00 + 7782 | CLARK | 2450.00 | | 58800.00 + 7788 | SCOTT | 3000.00 | | 72000.00 + 7839 | KING | 5000.00 | | 120000.00 + 7844 | TURNER | 1500.00 | 0.00 | 36000.00 + 7876 | ADAMS | 1100.00 | | 26400.00 + 7900 | JAMES | 950.00 | | 22800.00 + 7902 | FORD | 3000.00 | | 72000.00 + 7934 | MILLER | 1300.00 | | 31200.00 +(14 rows) +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/03_deleting_a_function.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/03_deleting_a_function.mdx new file mode 100644 index 00000000000..b3db08d5710 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/03_deleting_a_function.mdx @@ -0,0 +1,27 @@ +--- +title: "Deleting a function" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/05_functions_overview/03_deleting_a_function/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can delete a function from the database using `DROP FUNCTION`. + +```sql +DROP FUNCTION [ IF EXISTS ] [ () ] + [ CASCADE | RESTRICT ]; +``` + +Where `name` is the name of the function to drop. + +!!! Note + - Specifying the parameter list is required in EDB Postgres Advanced Server under certain circumstances such as in an overloaded function. Oracle requires that you always omit the parameter list. + + - Use of `IF EXISTS`, `CASCADE`, or `RESTRICT` isn't compatible with Oracle databases. See the [SQL reference](../../../../reference/oracle_compatibility_reference/epas_compat_sql/47_drop_function) for information on these options. + +This example drops the function `simple_function`: + +```sql +DROP FUNCTION simple_function; +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/index.mdx new file mode 100644 index 00000000000..d1e9bfdc7b2 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/index.mdx @@ -0,0 +1,22 @@ +--- +title: "Functions overview" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.053.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.147.html" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/05_functions_overview/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Functions are standalone SPL programs that are invoked as expressions. When evaluated, a function returns a value that's substituted in the expression in which the function is embedded. Functions can optionally take values from the calling program in the form of input parameters. + +In addition to the fact that the function returns a value, a function can also optionally return values to the caller in the form of output parameters. However, we don't encourage the use of output parameters in functions. + +
+ +creating_a_function calling_a_function deleting_a_function + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/01_positional_vs_named_parameter_notation.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/01_positional_vs_named_parameter_notation.mdx new file mode 100644 index 00000000000..e41d8fcf33a --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/01_positional_vs_named_parameter_notation.mdx @@ -0,0 +1,106 @@ +--- +title: "Positional versus named parameter notation" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/01_positional_vs_named_parameter_notation/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can use either *positional* or *named* parameter notation when passing parameters to a function or procedure. + +- If you specify parameters using *positional notation*, you must list the parameters in the order that they are declared. If you specify parameters with named notation, the order of the parameters doesn't matter. + +- If you specify parameters using *named notation*, list the name of each parameter followed by an arrow (`=>`) and the parameter value. Named notation is more verbose but makes your code easier to read and maintain. + +This example uses positional and named parameter notation: + +```sql +CREATE OR REPLACE PROCEDURE emp_info ( + p_deptno IN NUMBER, + p_empno IN OUT NUMBER, + p_ename IN OUT VARCHAR2, +) +IS +BEGIN + dbms_output.put_line('Department Number =' || p_deptno); + dbms_output.put_line('Employee Number =' || p_empno); + dbms_output.put_line('Employee Name =' || p_ename; +END; +``` + +To call the procedure using positional notation, pass the following: + +```sql +emp_info(30, 7455, 'Clark'); +``` + +To call the procedure using named notation, pass the following: + +```sql +emp_info(p_ename =>'Clark', p_empno=>7455, p_deptno=>30); +``` + +If you used named notation, you don't need to rearrange a procedure’s parameter list if the parameter list changes, the parameters are reordered, or an optional parameter is added. + +When an argument has a default value and the argument isn't a trailing argument, you must use named notation to call the procedure or function. This example shows a procedure with two leading default arguments: + +```sql +CREATE OR REPLACE PROCEDURE check_balance ( + p_customerID IN NUMBER DEFAULT NULL, + p_balance IN NUMBER DEFAULT NULL, + p_amount IN NUMBER +) +IS +DECLARE + balance NUMBER; +BEGIN + IF (p_balance IS NULL AND p_customerID IS NULL) THEN + RAISE_APPLICATION_ERROR + (-20010, 'Must provide balance or customer'); + ELSEIF (p_balance IS NOT NULL AND p_customerID IS NOT NULL) THEN + RAISE_APPLICATION_ERROR + (-20020,'Must provide balance or customer, not both'); + ELSEIF (p_balance IS NULL) THEN + balance := getCustomerBalance(p_customerID); + ELSE + balance := p_balance; + END IF; + + IF (amount > balance) THEN + RAISE_APPLICATION_ERROR + (-20030, 'Balance insufficient'); + END IF; +END; +``` + +You can omit nontrailing argument values when you call this procedure only by using named notation. When using positional notation, only trailing arguments are allowed to default. You can call this procedure with the following arguments: + +```sql +check_balance(p_customerID => 10, p_amount = 500.00) + +check_balance(p_balance => 1000.00, p_amount = 500.00) +``` + +You can use a combination of positional and named notation, referred to as *mixed* notation, to specify parameters. This example shows using mixed parameter notation: + +```sql +CREATE OR REPLACE PROCEDURE emp_info ( + p_deptno IN NUMBER, + p_empno IN OUT NUMBER, + p_ename IN OUT VARCHAR2, +) +IS +BEGIN + dbms_output.put_line('Department Number =' || p_deptno); + dbms_output.put_line('Employee Number =' || p_empno); + dbms_output.put_line('Employee Name =' || p_ename; +END; +``` + +You can call the procedure using mixed notation: + +```sql +emp_info(30, p_ename =>'Clark', p_empno=>7455); +``` + +When using mixed notation, named arguments can't precede positional arguments. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/02_parameter_modes.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/02_parameter_modes.mdx new file mode 100644 index 00000000000..80fa19364bd --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/02_parameter_modes.mdx @@ -0,0 +1,32 @@ +--- +title: "Parameter modes" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/02_parameter_modes/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A parameter has one of three possible modes: `IN`, `OUT`, or `IN OUT`. The following characteristics of a formal parameter depend on its mode: + +- Its initial value when the procedure or function is called +- Whether the called procedure or function can modify the formal parameter +- How the actual parameter value is passed from the calling program to the called program +- What happens to the formal parameter value when an unhandled exception occurs in the called program + +The following table summarizes the behavior of parameters according to their mode. + +| Mode property | IN | IN OUT | OUT | +| ------------------------------------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------- | +| Formal parameter initialized to: | Actual parameter value | Actual parameter value | Actual parameter value | +| Formal parameter modifiable by the called program? | No | Yes | Yes | +| Actual parameter contains: (after normal called program termination) | Original actual parameter value prior to the call | Last value of the formal parameter | Last value of the formal parameter | +| Actual parameter contains: (after a handled exception in the called program) | Original actual parameter value prior to the call | Last value of the formal parameter | Last value of the formal parameter | +| Actual parameter contains: (after an unhandled exception in the called program) | Original actual parameter value prior to the call | Original actual parameter value prior to the call | Original actual parameter value prior to the call | + +As shown by the table: + +- The `IN` formal parameter is initialized to the actual parameter with which it's called unless it was explicitly initialized with a default value. You can reference the `IN` parameter in the called program. However, the called program can't assign a new value to the `IN` parameter. After control returns to the calling program, the actual parameter always contains the same value that it had prior to the call. + +- Like an `IN` parameter, an `IN OUT` formal parameter is initialized to the actual parameter with which it's called. Like an `OUT` parameter, an `IN OUT` formal parameter can be modifiwed by the called program. The last value in the formal parameter is passed to the calling program’s actual parameter if the called program ends without an exception. If a handled exception occurs, the value of the actual parameter takes on the last value assigned to the formal parameter. If an unhandled exception occurs, the value of the actual parameter remains as it was prior to the call. + +- The `OUT` formal parameter is initialized to the actual parameter with which it's called. The called program can reference and assign new values to the formal parameter. If the called program ends without an exception, the actual parameter takes on the value last set in the formal parameter. If a handled exception occurs, the value of the actual parameter takes on the last value assigned to the formal parameter. If an unhandled exception occurs, the value of the actual parameter remains as it was prior to the call. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/03_using_default_values_in_parameters.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/03_using_default_values_in_parameters.mdx new file mode 100644 index 00000000000..3351c854b72 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/03_using_default_values_in_parameters.mdx @@ -0,0 +1,75 @@ +--- +title: "Using default values in parameters" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/03_using_default_values_in_parameters/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can set a default value of a formal parameter by including the `DEFAULT` clause or using the assignment operator (`:=`) in the `CREATE PROCEDURE` or `CREATE FUNCTION` statement. + +## Syntax + +The general form of a formal parameter declaration is: + +```text +( [ IN|OUT|IN OUT ] [{DEFAULT | := } ]) +``` + +`name` is an identifier assigned to the parameter. + +`IN|OUT|IN OUT` specifies the parameter mode. + +`data_type` is the data type assigned to the variable. + +`expr` is the default value assigned to the parameter. If you don't include a `DEFAULT` clause, the caller must provide a value for the parameter. + +The default value is evaluated every time you invoke the function or procedure. For example, assigning `SYSDATE` to a parameter of type `DATE` causes the parameter to have the time of the current invocation, not the time when the procedure or function was created. + +## Example + +This example uses the assignment operator to set a default value of `SYSDATE` into the parameter `hiredate:` + +```sql +CREATE OR REPLACE PROCEDURE hire_emp ( + p_empno NUMBER, + p_ename VARCHAR2, + p_hiredate DATE := SYSDATE +) +IS +BEGIN + INSERT INTO emp(empno, ename, hiredate) + VALUES(p_empno, p_ename, p_hiredate); + + DBMS_OUTPUT.PUT_LINE('Hired!'); +END hire_emp; +``` + +If the parameter declaration includes a default value, you can omit the parameter from the actual parameter list when you call the procedure. Calls to the sample procedure `(hire_emp)` must include two arguments: the employee number `(p_empno)` and employee name `(p_empno)`. The third parameter `(p_hiredate)` defaults to the value of `SYSDATE:` + +```sql +hire_emp (7575, Clark) +``` + +If you do include a value for the actual parameter when you call the procedure, that value takes precedence over the default value. This command adds an employee with a hiredate of `February 15, 2010`, regardless of the current value of `SYSDATE`. + +```sql +hire_emp (7575, Clark, 15-FEB-2010) +``` + +You can write the same procedure by substituting the `DEFAULT` keyword for the assignment operator: + +```sql +CREATE OR REPLACE PROCEDURE hire_emp ( + p_empno NUMBER, + p_ename VARCHAR2, + p_hiredate DATE DEFAULT SYSDATE +) +IS +BEGIN + INSERT INTO emp(empno, ename, hiredate) + VALUES(p_empno, p_ename, p_hiredate); + + DBMS_OUTPUT.PUT_LINE('Hired!'); +END hire_emp; +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/declaring_parameters.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/declaring_parameters.mdx new file mode 100644 index 00000000000..a84ced4e434 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/declaring_parameters.mdx @@ -0,0 +1,87 @@ +--- +title: "Declaring parameters" +--- + +Declare parameters in the procedure or function definition, and enclose them in parentheses following the procedure or function name. Parameters declared in the procedure or function definition are known as *formal parameters*. When you invoke the procedure or function, the calling program supplies the actual data to use in the called program’s processing as well as the variables that receive the results of the called program’s processing. The data and variables supplied by the calling program when the procedure or function is called are referred to as the *actual parameters*. + +The following is the general format of a formal parameter declaration: + +```text +( [ IN | OUT | IN OUT ] [ DEFAULT ]) +``` + +- `name` is an identifier assigned to the formal parameter. +- Whether a parameter is `IN`, `OUT`, or `IN OUT` is referred to as the parameter’s *mode*. If specified, `IN` defines the parameter for receiving input data into the procedure or function. An `IN` parameter can also be initialized to a default value. If specified, `OUT` defines the parameter for returning data from the procedure or function. If specified, `IN OUT` allows the parameter to be used for both input and output. If all of `IN`, `OUT`, and `IN OUT` are omitted, then the parameter acts as if it were defined as `IN` by default. +- `data_type` defines the data type of the parameter. +- `value` is a default value assigned to an `IN` parameter in the called program if you don't specify an actual parameter in the call. + +This example shows a procedure that takes parameters: + +```sql +CREATE OR REPLACE PROCEDURE emp_query ( + p_deptno IN NUMBER, + p_empno IN OUT NUMBER, + p_ename IN OUT VARCHAR2, + p_job OUT VARCHAR2, + p_hiredate OUT DATE, + p_sal OUT NUMBER +) +IS +BEGIN + SELECT empno, ename, job, hiredate, sal + INTO p_empno, p_ename, p_job, p_hiredate, p_sal + FROM emp + WHERE deptno = p_deptno + AND (empno = p_empno + OR ename = UPPER(p_ename)); +END; +``` + +In this example, `p_deptno` is an `IN` formal parameter, `p_empno` and `p_ename` are `IN OUT` formal parameters, and `p_job, p_hiredate` and `p_sal` are `OUT` formal parameters. + +!!! Note + In the example, no maximum length was specified on the `VARCHAR2` parameters, and no precision and scale were specified on the `NUMBER` parameters. It's illegal to specify a length, precision, scale, or other constraints on parameter declarations. These constraints are inherited from the actual parameters that are used when the procedure or function is called. + +The `emp_query` procedure can be called by another program, passing it the actual parameters. This example is another SPL program that calls `emp_query`: + +```sql +DECLARE + v_deptno NUMBER(2); + v_empno NUMBER(4); + v_ename VARCHAR2(10); + v_job VARCHAR2(9); + v_hiredate DATE; + v_sal NUMBER; +BEGIN + v_deptno := 30; + v_empno := 7900; + v_ename := ''; + emp_query(v_deptno, v_empno, v_ename, v_job, v_hiredate, v_sal); + DBMS_OUTPUT.PUT_LINE('Department : ' || v_deptno); + DBMS_OUTPUT.PUT_LINE('Employee No: ' || v_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || v_ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || v_job); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || v_hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || v_sal); +END; +``` + +In this example, `v_deptno`, `v_empno`, `v_ename`, `v_job`, `v_hiredate`, and `v_sal` are the actual parameters. + +The output from the example is: + +```sql +__OUTPUT__ +Department : 30 +Employee No: 7900 +Name : JAMES +Job : CLERK +Hire Date : 03-DEC-81 +Salary : 950 +``` + +
+ +positional_vs_named_parameter_notation parameter_modes using_default_values_in_parameters + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx new file mode 100644 index 00000000000..ff33188fd88 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Procedure and function parameters" +indexCards: simple +navigation: +- declaring_parameters +- 01_positional_vs_named_parameter_notation +- 02_parameter_modes +- 03_using_default_values_in_parameters +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.054.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.148.html" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +An important aspect of using procedures and functions is the capability to pass data from the calling program to the procedure or function and to receive data back from the procedure or function. You do this by using *parameters*. + diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/01_creating_a_subprocedure.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/01_creating_a_subprocedure.mdx new file mode 100644 index 00000000000..52099985964 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/01_creating_a_subprocedure.mdx @@ -0,0 +1,128 @@ +--- +title: "Creating a subprocedure" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/01_creating_a_subprocedure/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `PROCEDURE` clause specified in the declaration section defines and names a subprocedure local to that block. + +- The term *block* refers to the SPL block structure consisting of an optional declaration section, a mandatory executable section, and an optional exception section. Blocks are the structures for standalone procedures and functions, anonymous blocks, subprograms, triggers, packages, and object type methods. + +- The phrase *the identifier is local to the block* means that the identifier (that is, a variable, cursor, type, or subprogram) is declared in the declaration section of that block. Therefore, the SPL code can access it in the executable section and optional exception section of that block. + +## Declaring subprocedures + +You can declare subprocedures only after all the other variable, cursor, and type declarations included in the declaration section. Subprograms must be the last set of declarations. + +```sql +PROCEDURE [ () ] +{ IS | AS } + [ PRAGMA AUTONOMOUS_TRANSACTION; ] + [ ] + BEGIN + + END [ ]; +``` + +Where: + +- `name` is the identifier of the subprocedure. + +- `parameters` is a list of formal parameters. + +- `PRAGMA AUTONOMOUS_TRANSACTION` is the directive that sets the subprocedure as an autonomous transaction. + +- `declarations` are variable, cursor, type, or subprogram declarations. If subprogram declarations are included, you must declare them after all other variable, cursor, and type declarations. + +- `statements` are SPL program statements. The `BEGIN - END` block can contain an `EXCEPTION` section. + +## Example: Subprocedure in an anonymous block + +This example is a subprocedure in an anonymous block: + +```sql +DECLARE + PROCEDURE list_emp + IS + v_empno NUMBER(4); + v_ename VARCHAR2(10); + CURSOR emp_cur IS + SELECT empno, ename FROM emp ORDER BY empno; + BEGIN + OPEN emp_cur; + DBMS_OUTPUT.PUT_LINE('Subprocedure list_emp:'); + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH emp_cur INTO v_empno, v_ename; + EXIT WHEN emp_cur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || v_ename); + END LOOP; + CLOSE emp_cur; + END; +BEGIN + list_emp; +END; +``` + +Invoking this anonymous block produces the following output: + +```sql +__OUTPUT__ +Subprocedure list_emp: +EMPNO ENAME +----- ------- +7369 SMITH +7499 ALLEN +7521 WARD +7566 JONES +7654 MARTIN +7698 BLAKE +7782 CLARK +7788 SCOTT +7839 KING +7844 TURNER +7876 ADAMS +7900 JAMES +7902 FORD +7934 MILLER +``` + +## Example: Subprocedure in a trigger + +This example is a subprocedure in a trigger: + +```sql +CREATE OR REPLACE TRIGGER dept_audit_trig + AFTER INSERT OR UPDATE OR DELETE ON dept +DECLARE + v_action VARCHAR2(24); + PROCEDURE display_action ( + p_action IN VARCHAR2 + ) + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('User ' || USER || ' ' || p_action || + ' dept on ' || TO_CHAR(SYSDATE,'YYYY-MM-DD')); + END display_action; +BEGIN + IF INSERTING THEN + v_action := 'added'; + ELSIF UPDATING THEN + v_action := 'updated'; + ELSIF DELETING THEN + v_action := 'deleted'; + END IF; + display_action(v_action); +END; +``` + +Invoking this trigger produces the following output: + +```sql +INSERT INTO dept VALUES (50,'HR','DENVER'); +__OUTPUT__ +User enterprisedb added dept on 2016-07-26 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/02_creating_a_subfunction.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/02_creating_a_subfunction.mdx new file mode 100644 index 00000000000..0f2a0b069bd --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/02_creating_a_subfunction.mdx @@ -0,0 +1,75 @@ +--- +title: "Creating a subfunction" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/02_creating_a_subfunction/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `FUNCTION` clause specified in the declaration section defines and names a subfunction local to that block. + +- The term *block* refers to the SPL block structure consisting of an optional declaration section, a mandatory executable section, and an optional exception section. Blocks are the structures for standalone procedures and functions, anonymous blocks, subprograms, triggers, packages, and object type methods. + +- The phrase *the identifier is local to the block* means that the identifier (that is, a variable, cursor, type, or subprogram) is declared in the declaration section of that block and is therefore accessible by the SPL code in the executable section and optional exception section of that block. + +## Declaring a subfunction + +```sql +FUNCTION [ () ] +RETURN +{ IS | AS } + [ PRAGMA AUTONOMOUS_TRANSACTION; ] + [ ] + BEGIN + + END [ ]; +``` + +Where: + +- `name` is the identifier of the subfunction. + +- `parameters` is a list of formal parameters. + +- `data_type` is the data type of the value returned by the function’s `RETURN` statement. + +- `PRAGMA AUTONOMOUS_TRANSACTION` is the directive that sets the subfunction as an autonomous transaction. + +- `declarations` are variable, cursor, type, or subprogram declarations. If subprogram declarations are included, they must be declared after all other variable, cursor, and type declarations. + +- `statements` are SPL program statements. The `BEGIN - END` block can contain an `EXCEPTION` section. + +## Example: Recursive subfunction + +This example shows the use of a recursive subfunction: + +```sql +DECLARE + FUNCTION factorial ( + n BINARY_INTEGER + ) RETURN BINARY_INTEGER + IS + BEGIN + IF n = 1 THEN + RETURN n; + ELSE + RETURN n * factorial(n-1); + END IF; + END factorial; +BEGIN + FOR i IN 1..5 LOOP + DBMS_OUTPUT.PUT_LINE(i || '! = ' || factorial(i)); + END LOOP; +END; +``` + +The following is the output: + +```sql +__OUTPUT__ +1! = 1 +2! = 2 +3! = 6 +4! = 24 +5! = 120 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/03_block_relationships.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/03_block_relationships.mdx new file mode 100644 index 00000000000..6ba631fead5 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/03_block_relationships.mdx @@ -0,0 +1,81 @@ +--- +title: "Declaring block relationships" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/03_block_relationships/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can declare the relationship between blocks in an SPL program. The ability to invoke subprograms and access identifiers declared in a block depends on this relationship. + +## About block relationships + +The following are the basic terms: + +- A *block* is the basic SPL structure consisting of an optional declaration section, a mandatory executable section, and an optional exception section. Blocks implement standalone procedure and function programs, anonymous blocks, triggers, packages, and subprocedures and subfunctions. +- An identifier (variable, cursor, type, or subprogram) *local to a block* means that it's declared in the declaration section of the given block. You can access such local identifiers from the executable section and optional exception section of the block. +- The *parent block* contains the declaration of another block, that is, the *child block*. +- *Descendent blocks* are the set of blocks forming the child relationship starting from a given parent block. +- *Ancestor blocks* are the set of blocks forming the parental relationship starting from a given child block. +- The set of descendent (ancestor) blocks form a *hierarchy*. +- The *level* is an ordinal number of a given block from the highest ancestor block. For example, given a standalone procedure, the subprograms declared in the declaration section of this procedure are all at the same level, such as level 1. Additional subprograms in the declaration section of the subprograms declared in the standalone procedure are at the next level, that is, level 2. +- The *sibling blocks* are the set of blocks that have the same parent block, that is, they are all locally declared in the same block. Sibling blocks are at the same level relative to each other. + +## Example + +The following schematic of a set of procedure declaration sections provides an example of a set of blocks and their relationships to their surrounding blocks. + +The two vertical lines on the left-hand side of the blocks indicate there are two pairs of sibling blocks. `block_1a` and `block_1b` is one pair, and `block_2a` and `block_2b` is the second pair. + +The relationship of each block with its ancestors is shown on the right-hand side of the blocks. Three hierarchical paths are formed when progressing up the hierarchy from the lowest-level child blocks. The first consists of `block_0`, `block_1a`, `block_2a`, and `block_3`. The second is `block_0`, `block_1a`, and `block_2b`. The third is `block_0`, `block_1b`, and `block_2b`. + +```sql +CREATE PROCEDURE block_0 +IS + . + +---- PROCEDURE block_1a ------- Local to block_0 + | IS + | . | + | . | + | . | + | +-- PROCEDURE block_2a ---- Local to block_1a and descendant + | | IS of block_0 + | | . | + | | . | + | | . | + | | PROCEDURE block_3 -- Local to block_2a and descendant + | | IS of block_1a, and block_0 + | Siblings . | + | | . | + | | . | + | | END block_3; | + | | END block_2a; | + | +-- PROCEDURE block_2b ---- Local to block_1a and descendant + | | IS of block_0 + Siblings | , | + | | . | + | | . | + | +-- END block_2b; | + | | + | END block_1a; ---------+ + +---- PROCEDURE block_1b; ------- Local to block_0 + | IS + | . | + | . | + | . | + | PROCEDURE block_2b ---- Local to block_1b and descendant + | IS of block_0 + | . | + | . | + | . | + | END block_2b; | + | | + +---- END block_1b; ---------+ +BEGIN + . + . + . +END block_0; +``` + +The rules for invoking subprograms based on block location are described starting with [Invoking subprograms](04_invoking_subprograms/#invoking_subprograms). The rules for accessing variables based on block location are described in [Accessing subprogram variables](07_accessing_subprogram_variables/#accessing_subprogram_variables). diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/04_invoking_subprograms.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/04_invoking_subprograms.mdx new file mode 100644 index 00000000000..eb240543652 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/04_invoking_subprograms.mdx @@ -0,0 +1,355 @@ +--- +title: "Invoking subprograms" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/04_invoking_subprograms/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Invoke a subprogram in the same manner as a standalone procedure or function by specifying its name and any actual parameters. + +You can invoke the subprogram with zero, one, or more qualifiers. Qualifiers are the names of the parent subprograms or labeled anonymous blocks forming the ancestor hierarchy from which the subprogram was declared. + +## Overview of subprograms + +Invoke the subprogram using a dot-separated list of qualifiers ending with the subprogram name and any of its arguments: + +```text +[[.][...].] [()] +``` +### Specifying qualifiers + +If specified, `qualifier_n` is the subprogram in which `subprog` was declared in its declaration section. The preceding list of qualifiers must reside in a continuous path up the hierarchy from `qualifier_n` to `qualifier_1`. `qualifier_1` can be any ancestor subprogram in the path as well as any of the following: + +- Standalone procedure name containing the subprogram +- Standalone function name containing the subprogram +- Package name containing the subprogram +- Object type name containing the subprogram in an object type method +- An anonymous block label included prior to the `DECLARE` keyword if a declaration section exists, or prior to the `BEGIN` keyword if there is no declaration section + +!!! Note + `qualifier_1` can't be a schema name. If it is, an error is thrown when invoking the subprogram. This EDB Postgres Advanced Server restriction isn't compatible with Oracle databases, which allow the use of the schema name as a qualifier. + +`arguments` is the list of actual parameters to pass to the subprocedure or subfunction. + +### Searching for subprograms + +When you invoke the subprogram, the search for the subprogram occurs as follows: + +- The invoked subprogram name of its type (that is, subprocedure or subfunction) along with any qualifiers in the specified order (referred to as the invocation list) is used to find a matching set of blocks residing in the same hierarchical order. The search begins in the block hierarchy where the lowest level is the block from where the subprogram is invoked. The declaration of the subprogram must be in the SPL code prior to the code line where it's invoked when the code is observed from top to bottom. (You can achieve an exception to this requirement using a forward declaration. See [Using forward declarations](05_using_forward_declarations/#using_forward_declarations).) +- If the invocation list doesn't match the hierarchy of blocks starting from the block where the subprogram is invoked, a comparison is made by matching the invocation list starting with the parent of the previous starting block. In other words, the comparison progresses up the hierarchy. +- If there are sibling blocks of the ancestors, the invocation list comparison also includes the hierarchy of the sibling blocks but always comparing in an upward level. It doesn't compare the descendants of the sibling blocks. +- This comparison process continues up the hierarchies until the first complete match is found, in which case the located subprogram is invoked. The formal parameter list of the matched subprogram must comply with the actual parameter list specified for the invoked subprogram. Otherwise an error occurs when invoking the subprogram. +- If no match is found after searching up to the standalone program, then an error is thrown when invoking the subprogram. + +!!! Note + The EDB Postgres Advanced Server search algorithm for subprogram invocation isn't completely compatible with Oracle databases. For Oracle, the search looks for the first match of the first qualifier (that is, `qualifier_1`). When such a match is found, all remaining qualifiers, the subprogram name, subprogram type, and arguments of the invocation must match the hierarchy content where the matching first qualifier is found. Otherwise an error is thrown. For EDB Postgres Advanced Server, a match isn't found unless all qualifiers, the subprogram name, and the subprogram type of the invocation match the hierarchy content. If such an exact match isn't initially found, EDB Postgres Advanced Server continues the search progressing up the hierarchy. + +You can access the location of subprograms relative to the block from where the invocation is made as follows: + +- You can invoke subprograms declared in the local block from the executable section or the exception section of the same block. +- You can invoke subprograms declared in the parent or other ancestor blocks from the child block of the parent or other ancestors. +- You can call subprograms declared in sibling blocks from a sibling block or from any descendent block of the sibling. + +However, you can't access the following locations of subprograms relative to the block from where the invocation is made: + +- Subprograms declared in blocks that are descendants of the block from where the invocation is attempted +- Subprograms declared in blocks that are descendants of a sibling block from where the invocation is attempted + +## Invoking locally declared subprograms + +This example contains a single hierarchy of blocks contained in the standalone procedure `level_0`. In the executable section of the procedure `level_1a`, the means of invoking the local procedure `level_2a` are shown, with and without qualifiers. + +Access to the descendant of the local procedure `level_2a`, which is the procedure `level_3a`, isn't permitted, with or without qualifiers. These calls are commented out in the example. + +```sql +CREATE OR REPLACE PROCEDURE level_0 +IS + PROCEDURE level_1a + IS + PROCEDURE level_2a + IS + PROCEDURE level_3a + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('........ BLOCK level_3a'); + DBMS_OUTPUT.PUT_LINE('........ END BLOCK level_3a'); + END level_3a; + BEGIN + DBMS_OUTPUT.PUT_LINE('...... BLOCK level_2a'); + DBMS_OUTPUT.PUT_LINE('...... END BLOCK level_2a'); + END level_2a; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1a'); + level_2a; -- Local block called + level_1a.level_2a; -- Qualified local block called + level_0.level_1a.level_2a; -- Double qualified local block called +-- level_3a; -- Error - Descendant of local block +-- level_2a.level_3a; -- Error - Descendant of local block + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1a'); + END level_1a; +BEGIN + DBMS_OUTPUT.PUT_LINE('BLOCK level_0'); + level_1a; + DBMS_OUTPUT.PUT_LINE('END BLOCK level_0'); +END level_0; +``` + +When the standalone procedure is invoked, the output is the following. This output indicates that the procedure `level_2a` is successfully invoked from the calls in the executable section of the procedure `level_1a`: + +```sql +BEGIN + level_0; +END; +__OUTPUT__ +BLOCK level_0 +.. BLOCK level_1a +...... BLOCK level_2a +...... END BLOCK level_2a +...... BLOCK level_2a +...... END BLOCK level_2a +...... BLOCK level_2a +...... END BLOCK level_2a +.. END BLOCK level_1a +END BLOCK level_0 +``` + +If you try to run the procedure `level_0` with any of the calls to the descendent block uncommented, then an error occurs. + +## Invoking subprograms declared in ancestor blocks + +This example shows how to invoke subprograms that are declared in parent and other ancestor blocks relative to the block where the invocation is made. + +In this example, the executable section of procedure `level_3a` invokes the procedure `level_2a`, which is its parent block. `v_cnt` is used to avoid an infinite loop. + +```sql +CREATE OR REPLACE PROCEDURE level_0 +IS + v_cnt NUMBER(2) := 0; + PROCEDURE level_1a + IS + PROCEDURE level_2a + IS + PROCEDURE level_3a + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('........ BLOCK level_3a'); + v_cnt := v_cnt + 1; + IF v_cnt < 2 THEN + level_2a; -- Parent block called + END IF; + DBMS_OUTPUT.PUT_LINE('........ END BLOCK level_3a'); + END level_3a; + BEGIN + DBMS_OUTPUT.PUT_LINE('...... BLOCK level_2a'); + level_3a; -- Local block called + DBMS_OUTPUT.PUT_LINE('...... END BLOCK level_2a'); + END level_2a; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1a'); + level_2a; -- Local block called + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1a'); + END level_1a; +BEGIN + DBMS_OUTPUT.PUT_LINE('BLOCK level_0'); + level_1a; + DBMS_OUTPUT.PUT_LINE('END BLOCK level_0'); +END level_0; +``` + +The following is the output: + +```sql +BEGIN + level_0; +END; +__OUTPUT__ +BLOCK level_0 +.. BLOCK level_1a +...... BLOCK level_2a +........ BLOCK level_3a +...... BLOCK level_2a +........ BLOCK level_3a +........ END BLOCK level_3a +...... END BLOCK level_2a +........ END BLOCK level_3a +...... END BLOCK level_2a +.. END BLOCK level_1a +END BLOCK level_0 +``` + +In a similar example, the executable section of the procedure `level_3a` invokes the procedure `level_1a`, which is further up the ancestor hierarchy. `v_cnt` is again used to avoid an infinite loop. + +```sql +CREATE OR REPLACE PROCEDURE level_0 +IS + v_cnt NUMBER(2) := 0; + PROCEDURE level_1a + IS + PROCEDURE level_2a + IS + PROCEDURE level_3a + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('........ BLOCK level_3a'); + v_cnt := v_cnt + 1; + IF v_cnt < 2 THEN + level_1a; -- Ancestor block called + END IF; + DBMS_OUTPUT.PUT_LINE('........ END BLOCK level_3a'); + END level_3a; + BEGIN + DBMS_OUTPUT.PUT_LINE('...... BLOCK level_2a'); + level_3a; -- Local block called + DBMS_OUTPUT.PUT_LINE('...... END BLOCK level_2a'); + END level_2a; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1a'); + level_2a; -- Local block called + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1a'); + END level_1a; +BEGIN + DBMS_OUTPUT.PUT_LINE('BLOCK level_0'); + level_1a; + DBMS_OUTPUT.PUT_LINE('END BLOCK level_0'); +END level_0; +``` + +The following is the output: + +```sql +BEGIN + level_0; +END; +__OUTPUT__ +BLOCK level_0 +.. BLOCK level_1a +...... BLOCK level_2a +........ BLOCK level_3a +.. BLOCK level_1a +...... BLOCK level_2a +........ BLOCK level_3a +........ END BLOCK level_3a +...... END BLOCK level_2a +.. END BLOCK level_1a +........ END BLOCK level_3a +...... END BLOCK level_2a +.. END BLOCK level_1a +END BLOCK level_0 +``` + +## Invoking subprograms declared in sibling blocks + +These examples show how you can invoke subprograms that are declared in a sibling block relative to the local, parent, or other ancestor blocks from where the invocation of the subprogram is made. + +In this example, the executable section of the procedure `level_1b` invokes the procedure `level_1a`, which is its sibling block. Both are local to the standalone procedure `level_0`. + +Invoking `level_2a` or, equivalently, `level_1a.level_2a` from the procedure `level_1b` is commented out as this call results in an error. Invoking a descendent subprogram (`level_2a`) of a sibling block (`level_1a`) isn't permitted. + +```sql +CREATE OR REPLACE PROCEDURE level_0 +IS + v_cnt NUMBER(2) := 0; + PROCEDURE level_1a + IS + PROCEDURE level_2a + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('...... BLOCK level_2a'); + DBMS_OUTPUT.PUT_LINE('...... END BLOCK level_2a'); + END level_2a; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1a'); + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1a'); + END level_1a; + PROCEDURE level_1b + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1b'); + level_1a; -- Sibling block called +-- level_2a; -- Error – Descendant of sibling block +-- level_1a.level_2a; -- Error - Descendant of sibling block + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1b'); + END level_1b; +BEGIN + DBMS_OUTPUT.PUT_LINE('BLOCK level_0'); + level_1b; + DBMS_OUTPUT.PUT_LINE('END BLOCK level_0'); +END level_0; +``` + +The following is the output: + +```sql +BEGIN + level_0; +END; +__OUTPUT__ +BLOCK level_0 +.. BLOCK level_1b +.. BLOCK level_1a +.. END BLOCK level_1a +.. END BLOCK level_1b +END BLOCK level_0 +``` + +In this example, the procedure `level_1a` is successfully invoked. It's the sibling of the procedure `level_1b`, which is an ancestor of the procedure `level_3b`. + +```sql +CREATE OR REPLACE PROCEDURE level_0 +IS + PROCEDURE level_1a + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1a'); + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1a'); + END level_1a; + PROCEDURE level_1b + IS + PROCEDURE level_2b + IS + PROCEDURE level_3b + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('........ BLOCK level_3b'); + level_1a; -- Ancestor's sibling block called + level_0.level_1a; -- Qualified ancestor's sibling block + DBMS_OUTPUT.PUT_LINE('........ END BLOCK level_3b'); + END level_3b; + BEGIN + DBMS_OUTPUT.PUT_LINE('...... BLOCK level_2b'); + level_3b; -- Local block called + DBMS_OUTPUT.PUT_LINE('...... END BLOCK level_2b'); + END level_2b; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1b'); + level_2b; -- Local block called + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1b'); + END level_1b; +BEGIN + DBMS_OUTPUT.PUT_LINE('BLOCK level_0'); + level_1b; + DBMS_OUTPUT.PUT_LINE('END BLOCK level_0'); +END level_0; +``` + +The following is the output: + +```sql +BEGIN + level_0; +END; +__OUTPUT__ +BLOCK level_0 +.. BLOCK level_1b +...... BLOCK level_2b +........ BLOCK level_3b +.. BLOCK level_1a +.. END BLOCK level_1a +.. BLOCK level_1a +.. END BLOCK level_1a +........ END BLOCK level_3b +...... END BLOCK level_2b +.. END BLOCK level_1b +END BLOCK level_0 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/05_using_forward_declarations.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/05_using_forward_declarations.mdx new file mode 100644 index 00000000000..b594c2db4b9 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/05_using_forward_declarations.mdx @@ -0,0 +1,57 @@ +--- +title: "Using forward declarations" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/05_using_forward_declarations/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +When you want to invoke a subprogram, you must declare it in the hierarchy of blocks in the standalone program prior to where you invoke it. In other words, when scanning the SPL code from beginning to end, the subprogram declaration must appear before its invocation. + +However, you can construct the SPL code so that the full declaration of the subprogram appears in the SPL code after the point in the code where it's invoked. (The full declaration includes its optional declaration section, its mandatory executable section, and optional exception section.) + +You can do this by inserting a *forward declaration* in the SPL code prior to its invocation. The forward declaration is the specification of a subprocedure or subfunction name, formal parameters, and return type if it's a subfunction. + +You must specify the full subprogram consisting of the optional declaration section, the executable section, and the optional exception section in the same declaration section as the forward declaration. However it can appear following other subprogram declarations that invoke this subprogram with the forward declaration. + +Typical use of a forward declaration is when two subprograms invoke each other: + +```sql +DECLARE + FUNCTION add_one ( + p_add IN NUMBER + ) RETURN NUMBER; + FUNCTION test_max ( + p_test IN NUMBER) + RETURN NUMBER + IS + BEGIN + IF p_test < 5 THEN + RETURN add_one(p_test); + END IF; + DBMS_OUTPUT.PUT('Final value is '); + RETURN p_test; + END; + FUNCTION add_one ( + p_add IN NUMBER) + RETURN NUMBER + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('Increase by 1'); + RETURN test_max(p_add + 1); + END; +BEGIN + DBMS_OUTPUT.PUT_LINE(test_max(3)); +END; +``` + +Subfunction `test_max` invokes subfunction `add_one`, which also invokes subfunction `test_max`. A forward declaration is required for one of the subprograms, which is implemented for `add_one` at the beginning of the anonymous block declaration section. + +The resulting output from the anonymous block is as follows: + +```sql +__OUTPUT__ +Increase by 1 +Increase by 1 +Final value is 5 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/06_overloading_subprograms.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/06_overloading_subprograms.mdx new file mode 100644 index 00000000000..9d629226023 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/06_overloading_subprograms.mdx @@ -0,0 +1,165 @@ +--- +title: "Overloading subprograms" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/06_overloading_subprograms/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Generally, subprograms of the same type (subprocedure or subfunction) with the same name and same formal parameter specification can appear multiple times in the same standalone program as long as they aren't sibling blocks (that is, the subprograms aren't declared in the same local block). + +You can invoke each subprogram individually depending on the use of qualifiers and the location where the subprogram invocation is made. + +However, it's possible to declare subprograms, even as siblings, that are of the same subprogram type and name as long as certain aspects of the formal parameters differ. These characteristics (subprogram type, name, and formal parameter specification) are generally known as a program’s *signature*. + +The declaration of multiple subprograms where the signatures are identical except for certain aspects of the formal parameter specification is referred to as subprogram *overloading*. + +## Requirements + +The particular overloaded subprogram to invoke is determined by a match of the actual parameters specified by the subprogram invocation and the formal parameter lists of the overloaded subprograms. + +Any of the following differences permit overloaded subprograms: + +- The number of formal parameters are different. +- At least one pair of data types of the corresponding formal parameters (that is, compared according to the same order of appearance in the formal parameter list) are different but aren't aliases. + +The following differences alone don't permit overloaded subprograms: + +- Different formal parameter names +- Different parameter modes (`IN`, `IN OUT`, `OUT`) for the corresponding formal parameters +- For subfunctions, different data types in the `RETURN` clause + +One of the differences allowing overloaded subprograms is different data types. + +## Using aliases + +Certain data types have alternative names referred to as *aliases*, which can be used for the table definition. + +For example, you can specify fixed-length character data types as `CHAR` or `CHARACTER`. You can specify variable-length character data types as `CHAR VARYING`, `CHARACTER VARYING`, `VARCHAR`, or `VARCHAR2`. For integers, there are `BINARY_INTEGER`, `PLS_INTEGER`, and `INTEGER` data types. For numbers, there are `NUMBER`, `NUMERIC`, `DEC`, and `DECIMAL` data types. + +For detailed information about the data types supported by EDB Postgres Advanced Server, see [Data types](../../../../reference/sql_reference/02_data_types/). + +Thus, when attempting to create overloaded subprograms, the formal parameter data types aren't considered different if the specified data types are aliases of each other. + +You can determine if certain data types are aliases of other types by displaying the table definition containing the data types. + +## Example: Data types and aliases + +The following table definition contains some data types and their aliases: + +```sql +CREATE TABLE data_type_aliases ( + dt_BLOB BLOB, + dt_LONG_RAW LONG RAW, + dt_RAW RAW(4), + dt_BYTEA BYTEA, + dt_INTEGER INTEGER, + dt_BINARY_INTEGER BINARY_INTEGER, + dt_PLS_INTEGER PLS_INTEGER, + dt_REAL REAL, + dt_DOUBLE_PRECISION DOUBLE PRECISION, + dt_FLOAT FLOAT, + dt_NUMBER NUMBER, + dt_DECIMAL DECIMAL, + dt_NUMERIC NUMERIC, + dt_CHAR CHAR, + dt_CHARACTER CHARACTER, + dt_VARCHAR2 VARCHAR2(4), + dt_CHAR_VARYING CHAR VARYING(4), + dt_VARCHAR VARCHAR(4) +); +``` + +Using the PSQL `\d` command to display the table definition, the Type column displays the data type internally assigned to each column based on its data type in the table definition. + +```sql +\d data_type_aliases + Column | Type | Modifiers +---------------------+----------------------+----------- + dt_blob | bytea | + dt_long_raw | bytea | + dt_raw | bytea(4) | + dt_bytea | bytea | + dt_integer | integer | + dt_binary_integer | integer | + dt_pls_integer | integer | + dt_real | real | + dt_double_precision | double precision | + dt_float | double precision | + dt_number | numeric | + dt_decimal | numeric | + dt_numeric | numeric | + dt_char | character(1) | + dt_character | character(1) | + dt_varchar2 | character varying(4) | + dt_char_varying | character varying(4) | + dt_varchar | character varying(4) | +``` + +In the example, the base set of data types are `bytea`, `integer`, `real`, `double precision`, `numeric`, `character`, and `character varying`. + +When attempting to declare overloaded subprograms, a pair of formal parameter data types that are aliases aren't enough to allow subprogram overloading. Thus, parameters with data types `INTEGER` and `PLS_INTEGER` can't overload a pair of subprograms. However, data types `INTEGER` and `REAL`, `INTEGER` and `FLOAT`, or `INTEGER` and `NUMBER` can overload the subprograms. + +!!! Note + The overloading rules based on formal parameter data types aren't compatible with Oracle databases. Generally, the EDB Postgres Advanced Server rules are more flexible, and certain combinations are allowed in EDB Postgres Advanced Server that result in an error when attempting to create the procedure or function in Oracle databases. + +For certain pairs of data types used for overloading, you might need to cast the arguments specified by the subprogram invocation to avoid an error encountered during runtime of the subprogram. Invoking a subprogram must include the actual parameter list that can specifically identify the data types. Certain pairs of overloaded data types might require the `CAST` function to explicitly identify data types. For example, pairs of overloaded data types that might require casting during the invocation are `CHAR` and `VARCHAR2`, or `NUMBER` and `REAL`. + +## Example: Overloaded subfunctions + +This example shows a group of overloaded subfunctions invoked from an anonymous block. The executable section of the anonymous block contains the use of the `CAST` function to invoke overloaded functions with certain data types. + +```sql +DECLARE + FUNCTION add_it ( + p_add_1 IN BINARY_INTEGER, + p_add_2 IN BINARY_INTEGER + ) RETURN VARCHAR2 + IS + BEGIN + RETURN 'add_it BINARY_INTEGER: ' || TO_CHAR(p_add_1 + p_add_2,9999.9999); + END add_it; + FUNCTION add_it ( + p_add_1 IN NUMBER, + p_add_2 IN NUMBER + ) RETURN VARCHAR2 + IS + BEGIN + RETURN 'add_it NUMBER: ' || TO_CHAR(p_add_1 + p_add_2,999.9999); + END add_it; + FUNCTION add_it ( + p_add_1 IN REAL, + p_add_2 IN REAL + ) RETURN VARCHAR2 + IS + BEGIN + RETURN 'add_it REAL: ' || TO_CHAR(p_add_1 + p_add_2,9999.9999); + END add_it; + FUNCTION add_it ( + p_add_1 IN DOUBLE PRECISION, + p_add_2 IN DOUBLE PRECISION + ) RETURN VARCHAR2 + IS + BEGIN + RETURN 'add_it DOUBLE PRECISION: ' || TO_CHAR(p_add_1 + p_add_2,9999.9999); + END add_it; +BEGIN + DBMS_OUTPUT.PUT_LINE(add_it (25, 50)); + DBMS_OUTPUT.PUT_LINE(add_it (25.3333, 50.3333)); + DBMS_OUTPUT.PUT_LINE(add_it (TO_NUMBER(25.3333), TO_NUMBER(50.3333))); + DBMS_OUTPUT.PUT_LINE(add_it (CAST('25.3333' AS REAL), CAST('50.3333' AS REAL))); + DBMS_OUTPUT.PUT_LINE(add_it (CAST('25.3333' AS DOUBLE PRECISION), + CAST('50.3333' AS DOUBLE PRECISION))); +END; +``` + +The following is the output displayed from the anonymous block: + +```sql +__OUTPUT__ +add_it BINARY_INTEGER: 75.0000 +add_it NUMBER: 75.6666 +add_it NUMBER: 75.6666 +add_it REAL: 75.6666 +add_it DOUBLE PRECISION: 75.6666 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables.mdx new file mode 100644 index 00000000000..35712c77b2d --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables.mdx @@ -0,0 +1,411 @@ +--- +title: "Accessing subprogram variables" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can access variables declared in blocks, such as subprograms or anonymous blocks, from the executable section or the exception section of other blocks depending on their relative location. + +Accessing a variable means being able to reference it in a SQL statement or an SPL statement as you can with any local variable. + +!!! Note + If the subprogram signature contains formal parameters, you can access these in the same way as local variables of the subprogram. All discussion related to variables of a subprogram also applies to formal parameters of the subprogram. + +Accessing variables includes not only those defined as a data type but also includes others such as record types, collection types, and cursors. + +At most one qualifier can access the variable. The qualifier is the name of the subprogram or labeled anonymous block in which the variable was locally declared. + +## Syntax + +The syntax to reference a variable is: + +```text +[.] +``` + +If specified, `qualifier` is the subprogram or labeled anonymous block in which `variable` was declared in its declaration section (that is, it's a local variable). + +!!! Note + In EDB Postgres Advanced Server, in only one circumstance are two qualifiers are permitted. This scenario is for accessing public variables of packages where you can specify the reference in the following format: + +```text +schema_name.package_name.public_variable_name +``` + +For more information about supported package syntax, see [Built-in packages](../../../../reference/oracle_compatibility_reference/epas_compat_bip_guide/03_built-in_packages/). + +## Requirements + +You can access variables in the following ways: + +- Variables can be accessed as long as the block in which the variable was locally declared is in the ancestor hierarchical path starting from the block containing the reference to the variable. Such variables declared in ancestor blocks are referred to as *global variables*. +- If a reference to an unqualified variable is made, the first attempt is to locate a local variable of that name. If such a local variable doesn't exist, then the search for the variable is made in the parent of the current block, and so forth, proceeding up the ancestor hierarchy. If such a variable isn't found, then an error occurs when the subprogram is invoked. +- If a reference to a qualified variable is made, the same search process is performed but searching for the first match of the subprogram or labeled anonymous block that contains the local variable. The search proceeds up the ancestor hierarchy until a match is found. If such a match isn't found, then an error occurs when the subprogram is invoked. + +You can't access the following location of variables relative to the block from where the reference to the variable is made: + +- Variables declared in a descendent block +- Variables declared in a sibling block, a sibling block of an ancestor block, or any descendants within the sibling block + +!!! Note + The EDB Postgres Advanced Server process for accessing variables isn't compatible with Oracle databases. For Oracle, you can specify any number of qualifiers, and the search is based on the first match of the first qualifier in a similar manner to the Oracle matching algorithm for invoking subprograms. + +## Accessing variables with the same name + +This example shows similar access attempts when all variables in all blocks have the same name: + +```sql +CREATE OR REPLACE PROCEDURE level_0 +IS + v_common VARCHAR2(20) := 'Value from level_0'; + PROCEDURE level_1a + IS + v_common VARCHAR2(20) := 'Value from level_1a'; + PROCEDURE level_2a + IS + v_common VARCHAR2(20) := 'Value from level_2a'; + BEGIN + DBMS_OUTPUT.PUT_LINE('...... BLOCK level_2a'); + DBMS_OUTPUT.PUT_LINE('........ v_common: ' || v_common); + DBMS_OUTPUT.PUT_LINE('........ level_2a.v_common: ' || level_2a.v_common); + DBMS_OUTPUT.PUT_LINE('........ level_1a.v_common: ' || level_1a.v_common); + DBMS_OUTPUT.PUT_LINE('........ level_0.v_common: ' || level_0.v_common); + DBMS_OUTPUT.PUT_LINE('...... END BLOCK level_2a'); + END level_2a; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1a'); + DBMS_OUTPUT.PUT_LINE('.... v_common: ' || v_common); + DBMS_OUTPUT.PUT_LINE('.... level_0.v_common: ' || level_0.v_common); + level_2a; + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1a'); + END level_1a; + PROCEDURE level_1b + IS + v_common VARCHAR2(20) := 'Value from level_1b'; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1b'); + DBMS_OUTPUT.PUT_LINE('.... v_common: ' || v_common); + DBMS_OUTPUT.PUT_LINE('.... level_0.v_common : ' || level_0.v_common); + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1b'); + END level_1b; +BEGIN + DBMS_OUTPUT.PUT_LINE('BLOCK level_0'); + DBMS_OUTPUT.PUT_LINE('.. v_common: ' || v_common); + level_1a; + level_1b; + DBMS_OUTPUT.PUT_LINE('END BLOCK level_0'); +END level_0; +``` + +The following is the output showing the content of each variable when the procedure is invoked: + +```sql +BEGIN + level_0; +END; +__OUTPUT__ +BLOCK level_0 +.. v_common: Value from level_0 +.. BLOCK level_1a +.... v_common: Value from level_1a +.... level_0.v_common: Value from level_0 +...... BLOCK level_2a +........ v_common: Value from level_2a +........ level_2a.v_common: Value from level_2a +........ level_1a.v_common: Value from level_1a +........ level_0.v_common: Value from level_0 +...... END BLOCK level_2a +.. END BLOCK level_1a +.. BLOCK level_1b +.... v_common: Value from level_1b +.... level_0.v_common : Value from level_0 +.. END BLOCK level_1b +END BLOCK level_0 +``` + +## Using labels to qualify access to variables + +You can also use the labels on anonymous blocks to qualify access to variables. This example shows variable access in a set of nested anonymous blocks: + +```sql +DECLARE + v_common VARCHAR2(20) := 'Value from level_0'; +BEGIN + DBMS_OUTPUT.PUT_LINE('BLOCK level_0'); + DBMS_OUTPUT.PUT_LINE('.. v_common: ' || v_common); + <> + DECLARE + v_common VARCHAR2(20) := 'Value from level_1a'; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1a'); + DBMS_OUTPUT.PUT_LINE('.... v_common: ' || v_common); + <> + DECLARE + v_common VARCHAR2(20) := 'Value from level_2a'; + BEGIN + DBMS_OUTPUT.PUT_LINE('...... BLOCK level_2a'); + DBMS_OUTPUT.PUT_LINE('........ v_common: ' || v_common); + DBMS_OUTPUT.PUT_LINE('........ level_1a.v_common: ' || level_1a.v_common); + DBMS_OUTPUT.PUT_LINE('...... END BLOCK level_2a'); + END; + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1a'); + END; + <> + DECLARE + v_common VARCHAR2(20) := 'Value from level_1b'; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1b'); + DBMS_OUTPUT.PUT_LINE('.... v_common: ' || v_common); + DBMS_OUTPUT.PUT_LINE('.... level_1b.v_common: ' || level_1b.v_common); + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1b'); + END; + DBMS_OUTPUT.PUT_LINE('END BLOCK level_0'); +END; +``` + +The following is the output showing the content of each variable when the anonymous block is invoked: + +```sql +__OUTPUT__ +BLOCK level_0 +.. v_common: Value from level_0 +.. BLOCK level_1a +.... v_common: Value from level_1a +...... BLOCK level_2a +........ v_common: Value from level_2a +........ level_1a.v_common: Value from level_1a +...... END BLOCK level_2a +.. END BLOCK level_1a +.. BLOCK level_1b +.... v_common: Value from level_1b +.... level_1b.v_common: Value from level_1b +.. END BLOCK level_1b +END BLOCK level_0 +``` +## Examples + +### Example: Accessing record types in parent blocks + +This example is an object type whose object type method, `display_emp`, contains the record type `emp_typ` and the subprocedure `emp_sal_query`. The record variable `r_emp` declared locally to `emp_sal_query` can access the record type `emp_typ` declared in the parent block `display_emp`. + +```sql +CREATE OR REPLACE TYPE emp_pay_obj_typ AS OBJECT +( + empno NUMBER(4), + MEMBER PROCEDURE display_emp(SELF IN OUT emp_pay_obj_typ) +); + +CREATE OR REPLACE TYPE BODY emp_pay_obj_typ AS + MEMBER PROCEDURE display_emp (SELF IN OUT emp_pay_obj_typ) + IS + TYPE emp_typ IS RECORD ( + ename emp.ename%TYPE, + job emp.job%TYPE, + hiredate emp.hiredate%TYPE, + sal emp.sal%TYPE, + deptno emp.deptno%TYPE + ); + PROCEDURE emp_sal_query ( + p_empno IN emp.empno%TYPE + ) + IS + r_emp emp_typ; + v_avgsal emp.sal%TYPE; + BEGIN + SELECT ename, job, hiredate, sal, deptno + INTO r_emp.ename, r_emp.job, r_emp.hiredate, r_emp.sal, r_emp.deptno + FROM emp WHERE empno = p_empno; + DBMS_OUTPUT.PUT_LINE('Employee # : ' || p_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || r_emp.ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || r_emp.job); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || r_emp.hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || r_emp.sal); + DBMS_OUTPUT.PUT_LINE('Dept # : ' || r_emp.deptno); + + SELECT AVG(sal) INTO v_avgsal + FROM emp WHERE deptno = r_emp.deptno; + IF r_emp.sal > v_avgsal THEN + DBMS_OUTPUT.PUT_LINE('Employee''s salary is more than the ' + || 'department average of ' || v_avgsal); + ELSE + DBMS_OUTPUT.PUT_LINE('Employee''s salary does not exceed the ' + || 'department average of ' || v_avgsal); + END IF; + END; + BEGIN + emp_sal_query(SELF.empno); + END; +END; +``` + +The following is the output displayed when an instance of the object type is created and the procedure `display_emp` is invoked: + +```sql +DECLARE + v_emp EMP_PAY_OBJ_TYP; +BEGIN + v_emp := emp_pay_obj_typ(7900); + v_emp.display_emp; +END; +__OUTPUT__ +Employee # : 7900 +Name : JAMES +Job : CLERK +Hire Date : 03-DEC-81 00:00:00 +Salary : 950.00 +Dept # : 30 +Employee's salary does not exceed the department average of 1566.67 +``` +### Example: Accessing an upper-level procedure + +This example is a package with three levels of subprocedures. A record type, collection type, and cursor type declared in the upper-level procedure can be accessed by the descendent subprocedure. + +```sql +CREATE OR REPLACE PACKAGE emp_dept_pkg +IS + PROCEDURE display_emp ( + p_deptno NUMBER + ); +END; + +CREATE OR REPLACE PACKAGE BODY emp_dept_pkg +IS + PROCEDURE display_emp ( + p_deptno NUMBER + ) + IS + TYPE emp_rec_typ IS RECORD ( + empno emp.empno%TYPE, + ename emp.ename%TYPE + ); + TYPE emp_arr_typ IS TABLE OF emp_rec_typ INDEX BY BINARY_INTEGER; + TYPE emp_cur_type IS REF CURSOR RETURN emp_rec_typ; + PROCEDURE emp_by_dept ( + p_deptno emp.deptno%TYPE + ) + IS + emp_arr emp_arr_typ; + emp_refcur emp_cur_type; + i BINARY_INTEGER := 0; + PROCEDURE display_emp_arr + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + FOR j IN emp_arr.FIRST .. emp_arr.LAST LOOP + DBMS_OUTPUT.PUT_LINE(emp_arr(j).empno || ' ' || + emp_arr(j).ename); + END LOOP; + END display_emp_arr; + BEGIN + OPEN emp_refcur FOR SELECT empno, ename FROM emp WHERE deptno = p_deptno; + LOOP + i := i + 1; + FETCH emp_refcur INTO emp_arr(i).empno, emp_arr(i).ename; + EXIT WHEN emp_refcur%NOTFOUND; + END LOOP; + CLOSE emp_refcur; + display_emp_arr; + END emp_by_dept; + BEGIN + emp_by_dept(p_deptno); + END; +END; +``` + +The following is the output displayed when the top-level package procedure is invoked: + +```sql +BEGIN + emp_dept_pkg.display_emp(20); +END; +__OUTPUT__ +EMPNO ENAME +----- ------- +7369 SMITH +7566 JONES +7788 SCOTT +7876 ADAMS +7902 FORD +``` + +### Example: Accessing variables in blocks + +This example shows how variables in various blocks are accessed, with and without qualifiers. The lines that are commented out show attempts to access variables that result in an error. + +```sql +CREATE OR REPLACE PROCEDURE level_0 +IS + v_level_0 VARCHAR2(20) := 'Value from level_0'; + PROCEDURE level_1a + IS + v_level_1a VARCHAR2(20) := 'Value from level_1a'; + PROCEDURE level_2a + IS + v_level_2a VARCHAR2(20) := 'Value from level_2a'; + BEGIN + DBMS_OUTPUT.PUT_LINE('...... BLOCK level_2a'); + DBMS_OUTPUT.PUT_LINE('........ v_level_2a: ' || v_level_2a); + DBMS_OUTPUT.PUT_LINE('........ v_level_1a: ' || v_level_1a); + DBMS_OUTPUT.PUT_LINE('........ level_1a.v_level_1a: ' || level_1a.v_level_1a); + DBMS_OUTPUT.PUT_LINE('........ v_level_0: ' || v_level_0); + DBMS_OUTPUT.PUT_LINE('........ level_0.v_level_0: ' || level_0.v_level_0); + DBMS_OUTPUT.PUT_LINE('...... END BLOCK level_2a'); + END level_2a; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1a'); + level_2a; +-- DBMS_OUTPUT.PUT_LINE('.... v_level_2a: ' || v_level_2a); +-- Error - Descendent block ----^ +-- DBMS_OUTPUT.PUT_LINE('.... level_2a.v_level_2a: ' || level_2a.v_level_2a); +-- Error - Descendent block ---------------^ + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1a'); + END level_1a; + PROCEDURE level_1b + IS + v_level_1b VARCHAR2(20) := 'Value from level_1b'; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1b'); + DBMS_OUTPUT.PUT_LINE('.... v_level_1b: ' || v_level_1b); + DBMS_OUTPUT.PUT_LINE('.... v_level_0 : ' || v_level_0); +-- DBMS_OUTPUT.PUT_LINE('.... level_1a.v_level_1a: ' || level_1a.v_level_1a); +-- Error - Sibling block -----------------^ +-- DBMS_OUTPUT.PUT_LINE('.... level_2a.v_level_2a: ' || level_2a.v_level_2a); +-- Error - Sibling block descendant ------^ + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1b'); + END level_1b; +BEGIN + DBMS_OUTPUT.PUT_LINE('BLOCK level_0'); + DBMS_OUTPUT.PUT_LINE('.. v_level_0: ' || v_level_0); + level_1a; + level_1b; + DBMS_OUTPUT.PUT_LINE('END BLOCK level_0'); +END level_0; +``` + +The following is the output showing the content of each variable when the procedure is invoked: + +```sql +BEGIN + level_0; +END; +__OUTPUT__ +BLOCK level_0 +.. v_level_0: Value from level_0 +.. BLOCK level_1a +...... BLOCK level_2a +........ v_level_2a: Value from level_2a +........ v_level_1a: Value from level_1a +........ level_1a.v_level_1a: Value from level_1a +........ v_level_0: Value from level_0 +........ level_0.v_level_0: Value from level_0 +...... END BLOCK level_2a +.. END BLOCK level_1a +.. BLOCK level_1b +.... v_level_1b: Value from level_1b +.... v_level_0 : Value from level_0 +.. END BLOCK level_1b +END BLOCK level_0 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/index.mdx new file mode 100644 index 00000000000..9e41462e83f --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/index.mdx @@ -0,0 +1,34 @@ +--- +title: "Subprograms: subprocedures and subfunctions" +indexCards: simple +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can use the capability and functionality of SPL procedure and function programs to your advantage to build well-structured and maintainable programs by organizing the SPL code into subprocedures and subfunctions. + +You can invoke the same SPL code multiple times from different locations in a relatively large SPL program by declaring subprocedures and subfunctions in the SPL program. + +Subprocedures and subfunctions have the following characteristics: + +- The syntax, structure, and functionality of subprocedures and subfunctions are almost identical to standalone procedures and functions. The major difference is the use of the keyword `PROCEDURE` or `FUNCTION` instead of `CREATE PROCEDURE` or `CREATE FUNCTION` to declare the subprogram. +- Subprocedures and subfunctions provide isolation for the identifiers (that is, variables, cursors, types, and other subprograms) declared within itself. That is, you can't access or alter these identifiers from the upper, parent-level SPL programs or subprograms outside of the subprocedure or subfunction. This ensures that the subprocedure and subfunction results are reliable and predictable. +- The declaration section of subprocedures and subfunctions can include its own subprocedures and subfunctions. Thus, a multi-level hierarchy of subprograms can exist in the standalone program. In the hierarchy, a subprogram can access the identifiers of upper-level parent subprograms and also invoke upper-level parent subprograms. However, the same access to identifiers and invocation can't be done for lower-level child subprograms in the hierarchy. + +You can declare and invoke subprocedures and subfunctions from any of the following types of SPL programs: + +- Standalone procedures and functions +- Anonymous blocks +- Triggers +- Packages +- Procedure and function methods of an object type body +- Subprocedures and subfunctions declared in any of the preceding programs + + +
+ +creating_a_subprocedure creating_a_subfunction block_relationships invoking_subprograms using_forward_declarations overloading_subprograms accessing_subprogram_variables + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/08_compilation_errors_in_procedures_and_functions.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/08_compilation_errors_in_procedures_and_functions.mdx new file mode 100644 index 00000000000..7264839644d --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/08_compilation_errors_in_procedures_and_functions.mdx @@ -0,0 +1,74 @@ +--- +title: "Compilation errors in procedures and functions" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.055.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.149.html" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/08_compilation_errors_in_procedures_and_functions/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +When the EDB Postgres Advanced Server parsers compile a procedure or function, they confirm that both the `CREATE` statement and the program body (the portion of the program that follows the `AS` keyword) conform to the grammar rules for SPL and SQL constructs. By default, the server stops compiling if a parser detects an error. The parsers detect syntax errors in expressions, but they don't detect semantic errors. Semantic errors include an expression referencing a nonexistent column, table, or function, or a value of incorrect type. + +## Setting an error count compilation limit + +`spl.max_error_count` instructs the server to stop parsing if it encounters the specified number of errors in SPL code or when it encounters an error in SQL code. The default value of `spl.max_error_count` is `10`. The maximum value is `1000`. Setting `spl.max_error_count` to a value of `1` instructs the server to stop parsing when it encounters the first error in either SPL or SQL code. + +You can use the `SET` command to specify a value for `spl.max_error_count` for your current session. The syntax is: + +```sql +SET spl.max_error_count = +``` + +Where `number_of_errors` specifies the number of SPL errors that can occur before the server stops compiling. For example: + +```sql +SET spl.max_error_count = 6 +``` +## Example + +The example instructs the server to continue past the first five SPL errors it encounters. When the server encounters the sixth error, it stops validating and prints six detailed error messages and one error summary. + +To save time when developing new code or when importing code from another source, you might want to set the `spl.max_error_count` configuration parameter to a relatively high number of errors. + +If you instruct the server to continue parsing in spite of errors in the SPL code in a program body, and the parser encounters an error in a segment of SQL code, there can be more errors in any SPL or SQL code that follows the incorrect SQL code. For example, the following function results in two errors: + +```sql +CREATE FUNCTION computeBonus(baseSalary number) RETURN number AS +BEGIN + + bonus := baseSalary * 1.10; + total := bonus + 100; + + RETURN bonus; +END; + +ERROR: "bonus" is not a known variable +LINE 4: bonus := baseSalary * 1.10; + ^ +ERROR: "total" is not a known variable +LINE 5: total := bonus + 100; + ^ +ERROR: compilation of SPL function/procedure "computebonus" failed due to 2 errors +``` + +This example adds a `SELECT` statement to the example. The error in the `SELECT` statement masks the other errors that follow. + +```sql +CREATE FUNCTION computeBonus(employeeName number) RETURN number AS +BEGIN + SELECT salary INTO baseSalary FROM emp + WHERE ename = employeeName; + + bonus := baseSalary * 1.10; + total := bonus + 100; + + RETURN bonus; + +END; + +ERROR: "basesalary" is not a known variable +LINE 3: SELECT salary INTO baseSalary FROM emp WHERE ename = emp... +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/01_execute_privilege.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/01_execute_privilege.mdx new file mode 100644 index 00000000000..0a39e05f954 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/01_execute_privilege.mdx @@ -0,0 +1,41 @@ +--- +title: "EXECUTE privilege" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/09_program_security/01_execute_privilege/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +An SPL program (function, procedure, or package) can begin execution only if any of the following are true: + +- The current user is a superuser. +- The current user was granted `EXECUTE` privilege on the SPL program. +- The current user inherits `EXECUTE` privilege on the SPL program by virtue of being a member of a group that has this privilege. +- `EXECUTE` privilege was granted to the `PUBLIC` group. + +Whenever an SPL program is created in EDB Postgres Advanced Server, `EXECUTE` privilege is granted to the `PUBLIC` group by default. Therefore, any user can immediately execute the program. + +You can remove this default privilege by using the `REVOKE EXECUTE` command. For example: + +```sql +REVOKE EXECUTE ON PROCEDURE list_emp FROM PUBLIC; +``` + +You can then grant explicit `EXECUTE` privilege on the program to individual users or groups. + +```sql +GRANT EXECUTE ON PROCEDURE list_emp TO john; +``` + +Now, user `john` can execute the `list_emp` program. Other users who don't meet any of the required conditions can't. + +Once a program begins to execute, the next aspect of security is the privilege checks that occur if the program attempts to perform an action on any database object including: + +- Reading or modifying table or view data +- Creating, modifying, or deleting a database object such as a table, view, index, or sequence +- Obtaining the current or next value from a sequence +- Calling another program (function, procedure, or package) + +Each such action can be protected by privileges on the database object either allowed or disallowed for the user. + +It's possible for a database to have more than one object of the same type with the same name, but each such object belongs to a different schema in the database. If this is the case, which object is being referenced by an SPL program? For more information, see [Database object name resolution](02_database_object_name_resolution). \ No newline at end of file diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/02_database_object_name_resolution.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/02_database_object_name_resolution.mdx new file mode 100644 index 00000000000..b705bb8e5e9 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/02_database_object_name_resolution.mdx @@ -0,0 +1,27 @@ +--- +title: "Database object name resolution" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/09_program_security/02_database_object_name_resolution/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can reference a database object inside an SPL program either by its qualified name or by an unqualified name. A qualified name is in the form of `schema.name`, where `schema` is the name of the schema under which the database object with identifier `name` exists. An unqualified name doesn't have the `schema.` portion. When a reference is made to a qualified name, there is no ambiguity as to which database object is intended. It either does or doesn't exist in the specified schema. + +Locating an object with an unqualified name, however, requires the use of the current user’s search path. When a user becomes the current user of a session, a default search path is always associated with that user. The search path consists of a list of schemas that are searched in left-to-right order for locating an unqualified database object reference. The object is considered nonexistent if it can’t be found in any of the schemas in the search path. You can display the default search path in PSQL using the `SHOW search_path` command: + +```sql +edb=# SHOW search_path; +__OUTPUT__ + search_path +----------------- + "$user", public +(1 row) +``` + +`$user` in the search path is a generic placeholder that refers to the current user. So if the current user of this session is `enterprisedb`, an unqualified database object is searched for in the following schemas in this order: first in `enterprisedb`, and then in `public`. + +Once an unqualified name is resolved in the search path, you can determine if the current user has the appropriate privilege to perform the desired action on that specific object. + +!!! Note + The concept of the search path isn't compatible with Oracle databases. For an unqualified reference, Oracle looks in the schema of the current user for the named database object. Also, in Oracle, a user and their schema is the same entity. In EDB Postgres Advanced Server, a user and a schema are two distinct objects. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/03_database_object_privileges.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/03_database_object_privileges.mdx new file mode 100644 index 00000000000..683e24c2f78 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/03_database_object_privileges.mdx @@ -0,0 +1,9 @@ +--- +title: "Database object privileges" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/09_program_security/03_database_object_privileges/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Once an SPL program begins execution, any attempt to access a database object from the program results in a check. This check ensures that the current user is authorized to perform the intended action against the referenced object. Privileges on database objects are added and removed using the `GRANT` and `REVOKE` commands. If the current user attempts unauthorized access on a database object, then the program throws an exception. See [Exception handling](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/07_exception_handling/#exception_handling). diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/04_definers_vs_invokers_rights.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/04_definers_vs_invokers_rights.mdx new file mode 100644 index 00000000000..3f93904ddb3 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/04_definers_vs_invokers_rights.mdx @@ -0,0 +1,23 @@ +--- +title: "About definer and invoker rights" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/09_program_security/04_definers_vs_invokers_rights/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +When an SPL program is about to begin executing, a determination is made as to the user to associate with this process. This user is referred to as the *current user*. The current user’s database object privileges are used to determine whether access to database objects referenced in the program is permitted. The current prevailing search path in effect when the program is invoked is used to resolve any unqualified object references. + +The selection of the current user is influenced by whether the SPL program was created with definer’s right or invoker’s rights. The `AUTHID` clause determines that selection. Appearance of the clause `AUTHID DEFINER` gives the program definer’s rights. This is also the default if the `AUTHID` clause is omitted. Use of the clause `AUTHID CURRENT_USER` gives the program invoker’s rights. The difference between the two is summarized as follows: + +- If a program has *definer’s rights*, then the owner of the program becomes the current user when program execution begins. The program owner’s database object privileges are used to determine if access to a referenced object is permitted. In a definer’s rights program, the user who actually invoked the program is irrelevant. +- If a program has *invoker’s rights*, then the current user at the time the program is called remains the current user while the program (but not necessarily the subprogram) is executing. When an invoker’s rights program is invoked, the current user is typically the user that started the session (made the database connection). You can change the current user after the session started using the `SET ROLE` command. In an invoker’s rights program, who actually owns the program is irrelevant. + +From the previous definitions, the following observations can be made: + +- If a definer’s rights program calls a definer’s rights program, the current user changes from the owner of the calling program to the owner of the called program while the called program executes. +- If a definer’s rights program calls an invoker’s rights program, the owner of the calling program remains the current user whiile both the calling and called programs execute. +- If an invoker’s rights program calls an invoker’s rights program, the current user of the calling program remains the current user while the called program executes. +- If an invoker’s rights program calls a definer’s rights program, the current user switches to the owner of the definer’s rights program while the called program executes. + +The same principles apply if the called program in turn calls another program in these cases. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/05_security_example.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/05_security_example.mdx new file mode 100644 index 00000000000..0c0a5e0f820 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/05_security_example.mdx @@ -0,0 +1,281 @@ +--- +title: "Security example" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/09_program_security/05_security_example/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + + +In this example, a new database is created along with two users: +- `– hr_mgr`, who owns a copy of the entire sample application in schema `hr_mgr` +- `sales_mgr`, who owns a schema named `sales_mgr` that has a copy of only the `emp` table containing only the employees who work in sales + +The procedure `list_emp`, function `hire_clerk`, and package `emp_admin` are used in this example. All of the default privileges that are granted upon installation of the sample application are removed and then explicitly regranted so as to present a more secure environment. + +Programs `list_emp` and `hire_clerk` are changed from the default of definer’s rights to invoker’s rights. Then, when `sales_mgr` runs these programs, they act on the `emp` table in the `sales_mgr` schema since the `sales_mgr` search path and privileges are used for name resolution and authorization checking. + +Programs `get_dept_name` and `hire_emp` in the `emp_admin` package are then executed by `sales_mgr`. In this case, the `dept` table and `emp` table in the `hr_mgr` schema are accessed as `hr_mgr` is the owner of the `emp_admin` package which is using definer’s rights. Since the default search path is in effect with the `$user` placeholder, the schema matching the user (in this case, `hr_mgr`) is used to find the tables. + +## Step 1: Create database and users + +As user `enterprisedb`, create the `hr` database: + +```sql +CREATE DATABASE hr; +``` + +Switch to the hr database and create the users: + +```sql +\c hr enterprisedb +CREATE USER hr_mgr IDENTIFIED BY password; +CREATE USER sales_mgr IDENTIFIED BY password; +``` + +## Step 2: Create the sample application + +Create the entire sample application, owned by `hr_mgr`, in the `hr_mgr` schema. + +```sql +\c - hr_mgr +\i /usr/edb/as17/share/edb-sample.sql + +BEGIN +CREATE TABLE +CREATE TABLE +CREATE TABLE +CREATE VIEW +CREATE SEQUENCE + . + . + . +CREATE PACKAGE +CREATE PACKAGE BODY +COMMIT +``` + +## Step 3: Create the emp table in schema sales_mgr + +Create a subset of the `emp` table owned by `sales_mgr` in the `sales_mgr` schema. + +```sql +\c – hr_mgr +GRANT USAGE ON SCHEMA hr_mgr TO sales_mgr; +\c – sales_mgr +CREATE TABLE emp AS SELECT * FROM hr_mgr.emp WHERE job = 'SALESMAN'; +``` + +The `GRANT USAGE ON SCHEMA` command allows `sales_mgr` access into the `hr_mgr`’s schema to make a copy of the `hr_mgr` `emp` table. This step is required in EDB Postgres Advanced Server and isn't compatible with Oracle databases. Oracle doesn't have the concept of a schema that's distinct from its user. + +## Step 4: Remove default privileges + +Remove all privileges to later illustrate the minimum required privileges needed. + +```sql +\c – hr_mgr +REVOKE USAGE ON SCHEMA hr_mgr FROM sales_mgr; +REVOKE ALL ON dept FROM PUBLIC; +REVOKE ALL ON emp FROM PUBLIC; +REVOKE ALL ON next_empno FROM PUBLIC; +REVOKE EXECUTE ON FUNCTION new_empno() FROM PUBLIC; +REVOKE EXECUTE ON PROCEDURE list_emp FROM PUBLIC; +REVOKE EXECUTE ON FUNCTION hire_clerk(VARCHAR2,NUMBER) FROM PUBLIC; +REVOKE EXECUTE ON PACKAGE emp_admin FROM PUBLIC; +``` + +## Step 5: Change list_emp to invoker’s rights + +While connected as user `hr_mgr`, add the `AUTHID CURRENT_USER` clause to the `list_emp` program and resave it in EDB Postgres Advanced Server. When performing this step, be sure you're logged in as `hr_mgr`. Otherwise the modified program might end up in the `public` schema instead of in the `hr_mgr` schema. + +```sql +CREATE OR REPLACE PROCEDURE list_emp +AUTHID CURRENT_USER +IS + v_empno NUMBER(4); + v_ename VARCHAR2(10); + CURSOR emp_cur IS + SELECT empno, ename FROM emp ORDER BY empno; +BEGIN + OPEN emp_cur; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH emp_cur INTO v_empno, v_ename; + EXIT WHEN emp_cur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || v_ename); + END LOOP; + CLOSE emp_cur; +END; +``` + +## Step 6: Change hire_clerk to invoker’s rights and qualify call to new_empno + +While connected as user `hr_mgr`, add the `AUTHID CURRENT_USER` clause to the `hire_clerk` program. + +Also, after the `BEGIN` statement, fully qualify the reference `new_empno` to `hr_mgr.new_empno` to ensure the `hire_clerk` function call to the `new_empno` function resolves to the `hr_mgr` schema. + +When resaving the program, be sure you're logged in as `hr_mgr`. Otherwise the modified program might end up in the `public` schema instead of in the `hr_mgr` schema. + +```sql +CREATE OR REPLACE FUNCTION hire_clerk ( + p_ename VARCHAR2, + p_deptno NUMBER +) RETURN NUMBER +AUTHID CURRENT_USER +IS + v_empno NUMBER(4); + v_ename VARCHAR2(10); + v_job VARCHAR2(9); + v_mgr NUMBER(4); + v_hiredate DATE; + v_sal NUMBER(7,2); + v_comm NUMBER(7,2); + v_deptno NUMBER(2); +BEGIN + v_empno := hr_mgr.new_empno; + INSERT INTO emp VALUES (v_empno, p_ename, 'CLERK', 7782, + TRUNC(SYSDATE), 950.00, NULL, p_deptno); + SELECT empno, ename, job, mgr, hiredate, sal, comm, deptno INTO + v_empno, v_ename, v_job, v_mgr, v_hiredate, v_sal, v_comm, v_deptno + FROM emp WHERE empno = v_empno; + DBMS_OUTPUT.PUT_LINE('Department : ' || v_deptno); + DBMS_OUTPUT.PUT_LINE('Employee No: ' || v_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || v_ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || v_job); + DBMS_OUTPUT.PUT_LINE('Manager : ' || v_mgr); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || v_hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || v_sal); + DBMS_OUTPUT.PUT_LINE('Commission : ' || v_comm); + RETURN v_empno; +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('The following is SQLERRM:'); + DBMS_OUTPUT.PUT_LINE(SQLERRM); + DBMS_OUTPUT.PUT_LINE('The following is SQLCODE:'); + DBMS_OUTPUT.PUT_LINE(SQLCODE); + RETURN -1; +END; +``` + +## Step 7: Grant required privileges + +While connected as user `hr_mgr`, grant the privileges needed so `sales_mgr` can execute the `list_emp` procedure, `hire_clerk` function, and `emp_admin` package. The only data object `sales_mgr` has access to is the `emp` table in the `sales_mgr` schema. `sales_mgr` has no privileges on any table in the `hr_mgr` schema. + +```sql +GRANT USAGE ON SCHEMA hr_mgr TO sales_mgr; +GRANT EXECUTE ON PROCEDURE list_emp TO sales_mgr; +GRANT EXECUTE ON FUNCTION hire_clerk(VARCHAR2,NUMBER) TO sales_mgr; +GRANT EXECUTE ON FUNCTION new_empno() TO sales_mgr; +GRANT EXECUTE ON PACKAGE emp_admin TO sales_mgr; +``` + +## Step 8: Run programs list_emp and hire_clerk + +Connect as user `sales_mgr`, and run the following anonymous block: + +```sql +\c – sales_mgr +DECLARE + v_empno NUMBER(4); +BEGIN + hr_mgr.list_emp; + DBMS_OUTPUT.PUT_LINE('*** Adding new employee ***'); + v_empno := hr_mgr.hire_clerk('JONES',40); + DBMS_OUTPUT.PUT_LINE('*** After new employee added ***'); + hr_mgr.list_emp; +END; + +EMPNO ENAME +----- ------- +7499 ALLEN +7521 WARD +7654 MARTIN +7844 TURNER +*** Adding new employee *** +Department : 40 +Employee No: 8000 +Name : JONES +Job : CLERK +Manager : 7782 +Hire Date : 08-NOV-07 00:00:00 +Salary : 950.00 +*** After new employee added *** +EMPNO ENAME +----- ------- +7499 ALLEN +7521 WARD +7654 MARTIN +7844 TURNER +8000 JONES +``` + +The table and sequence accessed by the programs of the anonymous block are shown in the following diagram. The gray ovals represent the schemas of `sales_mgr` and `hr_mgr`. The current user during each program execution is shown in parenthesis in bold red font. + +![Invokers Rights Programs](../../images/invokers_rights_programs.png) + +
Fig. 1: Invokers Rights Programs
+ +Selecting from the `sales_mgr` `emp` table shows that the update was made in this table. + +```sql +SELECT empno, ename, hiredate, sal, deptno, +hr_mgr.emp_admin.get_dept_name(deptno) FROM sales_mgr.emp; +__OUTPUT__ + empno | ename | hiredate | sal | deptno | get_dept_name +-------+--------+--------------------+---------+--------+--------------- + 7499 | ALLEN | 20-FEB-81 00:00:00 | 1600.00 | 30 | SALES + 7521 | WARD | 22-FEB-81 00:00:00 | 1250.00 | 30 | SALES + 7654 | MARTIN | 28-SEP-81 00:00:00 | 1250.00 | 30 | SALES + 7844 | TURNER | 08-SEP-81 00:00:00 | 1500.00 | 30 | SALES + 8000 | JONES | 08-NOV-07 00:00:00 | 950.00 | 40 | OPERATIONS +(5 rows) +``` + +The following diagram shows that the `SELECT` command references the `emp` table in the `sales_mgr` schema. However, the `dept` table referenced by the `get_dept_name` function in the `emp_admin` package is from the `hr_mgr` schema since the `emp_admin` package has definer’s rights and is owned by `hr_mgr`. The default search path setting with the `$user` placeholder resolves the access by user `hr_mgr` to the `dept` table in the `hr_mgr` schema. + +![Definer's Rights Package](../../images/definers_rights_package.png) + +
Fig. 2: Definer's Rights Package
+ +## Step 9: Run program hire_emp in the emp_admin package + +While connected as user `sales_mgr`, run the `hire_emp` procedure in the `emp_admin` package. + +```sql +EXEC hr_mgr.emp_admin.hire_emp(9001, +'ALICE','SALESMAN',8000,TRUNC(SYSDATE),1000,7369,40); +``` + +This diagram shows that the `hire_emp` procedure in the `emp_admin` definer’s rights package updates the `emp` table belonging to `hr_mgr`. The object privileges of `hr_mgr` are used and the default search path setting with the `$user` placeholder resolves to the schema of `hr_mgr`. + +![Definer's Rights Package](../../images/definer's_rights_package.png) + +
Fig. 3: Definer's Rights Package
+ +Now connect as user `hr_mgr`. The following `SELECT` command verifies that the new employee was added to the `hr_mgr` `emp` table since the `emp_admin` package has definer’s rights and `hr_mgr` is the `emp_admin` owner. + +```sql +\c – hr_mgr +SELECT empno, ename, hiredate, sal, deptno, +hr_mgr.emp_admin.get_dept_name(deptno) FROM hr_mgr.emp; +__OUTPUT__ +empno | ename | hiredate | sal | deptno | get_dept_name +-------+--------+--------------------+---------+--------+--------------- + 7369 | SMITH | 17-DEC-80 00:00:00 | 800.00 | 20 | RESEARCH + 7499 | ALLEN | 20-FEB-81 00:00:00 | 1600.00 | 30 | SALES + 7521 | WARD | 22-FEB-81 00:00:00 | 1250.00 | 30 | SALES + 7566 | JONES | 02-APR-81 00:00:00 | 2975.00 | 20 | RESEARCH + 7654 | MARTIN | 28-SEP-81 00:00:00 | 1250.00 | 30 | SALES + 7698 | BLAKE | 01-MAY-81 00:00:00 | 2850.00 | 30 | SALES + 7782 | CLARK | 09-JUN-81 00:00:00 | 2450.00 | 10 | ACCOUNTING + 7788 | SCOTT | 19-APR-87 00:00:00 | 3000.00 | 20 | RESEARCH + 7839 | KING | 17-NOV-81 00:00:00 | 5000.00 | 10 | ACCOUNTING + 7844 | TURNER | 08-SEP-81 00:00:00 | 1500.00 | 30 | SALES + 7876 | ADAMS | 23-MAY-87 00:00:00 | 1100.00 | 20 | RESEARCH + 7900 | JAMES | 03-DEC-81 00:00:00 | 950.00 | 30 | SALES + 7902 | FORD | 03-DEC-81 00:00:00 | 3000.00 | 20 | RESEARCH + 7934 | MILLER | 23-JAN-82 00:00:00 | 1300.00 | 10 | ACCOUNTING + 9001 | ALICE | 08-NOV-07 00:00:00 | 8000.00 | 40 | OPERATIONS +(15 rows) +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/index.mdx new file mode 100644 index 00000000000..337591d753c --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/09_program_security/index.mdx @@ -0,0 +1,24 @@ +--- +title: "Program security" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.056.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.150.html" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/09_program_security/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can control the security over whether a user can execute an SPL program. You can also control the database objects an SPL program can access for any given user executing the program. These are controlled by the following: + +- Privilege to execute a program +- Privileges granted on the database objects (including other SPL programs) that a program attempts to access +- Whether the program is defined with definer’s rights or invoker’s rights + +
+ +execute_privilege database_object_name_resolution database_object_privileges definers_vs_invokers_rights security_example + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/index.mdx new file mode 100644 index 00000000000..dc41bbca5ff --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/02_spl_programs/index.mdx @@ -0,0 +1,22 @@ +--- +title: "Types of SPL programs" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.049.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.143.html" +redirects: + - /epas/latest/epas_compat_spl/02_spl_programs/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +SPL is a procedural, block-structured language. You can create four different types of programs using SPL: *procedures*, *functions*, *triggers*, and *packages*. + +In addition, you can use SPL to create subprograms. A *subprogram* refers to a *subprocedure* or a *subfunction*. These are nearly identical in appearance to procedures and functions. They differ in that procedures and functions are *standalone programs*. They are stored individually in the database, and you can invoke them from other SPL programs or from PSQL. You can invoke subprograms only from the standalone program where they were created. + +
+ +basic_spl_elements spl_block_structure anonymous_blocks procedures_overview functions_overview procedure_and_function_parameters subprograms_subprocedures_and_subfunctions compilation_errors_in_procedures_and_functions program_security + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/01_declaring_a_variable.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/01_declaring_a_variable.mdx new file mode 100644 index 00000000000..a5c1a3686fc --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/01_declaring_a_variable.mdx @@ -0,0 +1,55 @@ +--- +title: "Declaring a variable" +redirects: + - /epas/latest/epas_compat_spl/03_variable_declarations/01_declaring_a_variable/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Generally, you must declare all variables used in a block in the declaration section of the block. A variable declaration consists of a name that's assigned to the variable and its data type. Optionally, you can initialize the variable to a default value in the variable declaration. + +## Syntax + +The general syntax of a variable declaration is: + +```sql + [ { := | DEFAULT } { | NULL } ]; +``` + +`name` is an identifier assigned to the variable. + +`type` is the data type assigned to the variable. + +`[ := expression ]`, if given, specifies the initial value assigned to the variable when the block is entered. If the clause isn't given then the variable is initialized to the SQL `NULL` value. + +The default value is evaluated every time the block is entered. So, for example, assigning `SYSDATE` to a variable of type `DATE` causes the variable to have the time of the current invocation, not the time when the procedure or function was precompiled. + +## Example: Variable declarations that use defaults + +This procedure shows some variable declarations that use defaults consisting of string and numeric expressions: + +```sql +CREATE OR REPLACE PROCEDURE dept_salary_rpt ( + p_deptno NUMBER +) +IS + todays_date DATE := SYSDATE; + rpt_title VARCHAR2(60) := 'Report For Department # ' || p_deptno + || ' on ' || todays_date; + base_sal INTEGER := 35525; + base_comm_rate NUMBER := 1.33333; + base_annual NUMBER := ROUND(base_sal * base_comm_rate, 2); +BEGIN + DBMS_OUTPUT.PUT_LINE(rpt_title); + DBMS_OUTPUT.PUT_LINE('Base Annual Salary: ' || base_annual); +END; +``` + +The following output of the procedure shows that default values in the variable declarations are assigned to the variables: + +```sql +EXEC dept_salary_rpt(20); +__OUTPUT__ +Report For Department # 20 on 10-JUL-07 16:44:45 +Base Annual Salary: 47366.55 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/02_using__type_in_variable_declarations.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/02_using__type_in_variable_declarations.mdx new file mode 100644 index 00000000000..f319d7a4748 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/02_using__type_in_variable_declarations.mdx @@ -0,0 +1,128 @@ +--- +title: "Using %TYPE in variable declarations" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.058.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.152.html" +redirects: + - /epas/latest/epas_compat_spl/03_variable_declarations/02_using__type_in_variable_declarations/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Often, variables are declared in SPL programs that are used to hold values from tables in the database. To ensure compatibility between the table columns and the SPL variables, make sure their data types are the same. + +However, often a change is made to the table definition. If the data type of the column is changed, you might need to make the corresponding change to the variable in the SPL program. + +Instead of coding the specific column data type into the variable declaration, you can use the column attribute `%TYPE`. Specify a qualified column name in dot notation or the name of a previously declared variable as a prefix to `%TYPE`. The data type of the column or variable prefixed to `%TYPE` is assigned to the variable being declared. If the data type of the given column or variable changes, the new data type is associated with the variable, and you don't need to modify the declaration code. + +!!! Note + You can use the `%TYPE` attribute with formal parameter declarations as well. + +## Syntax + +```sql + { { | }. | }%TYPE; +``` + +- `name` is the identifier assigned to the variable or formal parameter that's being declared. +- `column` is the name of a column in `table` or `view`. +- `variable` is the name of a variable that was declared prior to the variable identified by `name`. + +!!! Note + The variable doesn't inherit any of the column’s other attributes that you specify on the column with the `NOT NULL` clause or the `DEFAULT` clause. + +## Example: Defining parameters using %TYPE + +In this example, a procedure: + +- Queries the `emp` table using an employee number +- Displays the employee’s data +- Finds the average salary of all employees in the department to which the employee belongs +- Compares the chosen employee’s salary with the department average + +```sql +CREATE OR REPLACE PROCEDURE emp_sal_query ( + p_empno IN NUMBER +) +IS + v_ename VARCHAR2(10); + v_job VARCHAR2(9); + v_hiredate DATE; + v_sal NUMBER(7,2); + v_deptno NUMBER(2); + v_avgsal NUMBER(7,2); +BEGIN + SELECT ename, job, hiredate, sal, deptno + INTO v_ename, v_job, v_hiredate, v_sal, v_deptno + FROM emp WHERE empno = p_empno; + DBMS_OUTPUT.PUT_LINE('Employee # : ' || p_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || v_ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || v_job); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || v_hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || v_sal); + DBMS_OUTPUT.PUT_LINE('Dept # : ' || v_deptno); + + SELECT AVG(sal) INTO v_avgsal + FROM emp WHERE deptno = v_deptno; + IF v_sal > v_avgsal THEN + DBMS_OUTPUT.PUT_LINE('Employee''s salary is more than the ' + || 'department average of ' || v_avgsal); + ELSE + DBMS_OUTPUT.PUT_LINE('Employee''s salary does not exceed the ' + || 'department average of ' || v_avgsal); + END IF; +END; +``` + +Alternatively, you can write the procedure without explicitly coding the `emp` table data types into the declaration section of the procedure: + +```sql +CREATE OR REPLACE PROCEDURE emp_sal_query ( + p_empno IN emp.empno%TYPE +) +IS + v_ename emp.ename%TYPE; + v_job emp.job%TYPE; + v_hiredate emp.hiredate%TYPE; + v_sal emp.sal%TYPE; + v_deptno emp.deptno%TYPE; + v_avgsal v_sal%TYPE; +BEGIN + SELECT ename, job, hiredate, sal, deptno + INTO v_ename, v_job, v_hiredate, v_sal, v_deptno + FROM emp WHERE empno = p_empno; + DBMS_OUTPUT.PUT_LINE('Employee # : ' || p_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || v_ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || v_job); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || v_hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || v_sal); + DBMS_OUTPUT.PUT_LINE('Dept # : ' || v_deptno); + + SELECT AVG(sal) INTO v_avgsal + FROM emp WHERE deptno = v_deptno; + IF v_sal > v_avgsal THEN + DBMS_OUTPUT.PUT_LINE('Employee''s salary is more than the ' + || 'department average of ' || v_avgsal); + ELSE + DBMS_OUTPUT.PUT_LINE('Employee''s salary does not exceed the ' + || 'department average of ' || v_avgsal); + END IF; +END; +``` + +`p_empno` shows an example of a formal parameter defined using `%TYPE`. `v_avgsal` shows the use of `%TYPE` referring to another variable instead of a table column. + +The following is sample output from executing this procedure: + +```sql +EXEC emp_sal_query(7698); +__OUTPUT__ +Employee # : 7698 +Name : BLAKE +Job : MANAGER +Hire Date : 01-MAY-81 00:00:00 +Salary : 2850.00 +Dept # : 30 +Employee's salary is more than the department average of 1566.67 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx new file mode 100644 index 00000000000..3133ce5a563 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx @@ -0,0 +1,59 @@ +--- +title: "Using %ROWTYPE in record declarations" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.059.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.153.html" +redirects: + - /epas/latest/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `%TYPE` attribute provides an easy way to create a variable that depends on a column's data type. Using the `%ROWTYPE` attribute, you can define a record that contains fields that correspond to all columns of a given table. Each field takes on the data type of its corresponding column. The fields in the record don't inherit any of the columns' other attributes like those specified with the `NOT NULL` clause or the `DEFAULT` clause. + +A *record* is a named, ordered collection of fields. A *field* is similar to a variable. It has an identifier and data type but has the additional property of belonging to a record. You must refereence it using dot notation with the record name as its qualifier. + +## Syntax + +You can use the `%ROWTYPE` attribute to declare a record. The `%ROWTYPE` attribute is prefixed by a table name. Each column in the named table defines an identically named field in the record with the same data type as the column. + +```text +
%ROWTYPE; +``` + +- `record` is an identifier assigned to the record. +- `table` is the name of a table or view whose columns define the fields in the record. + +## Example + +This example shows how you can modify the `emp_sal_query` procedure from [Using %TYPE in variable declarations](02_using__type_in_variable_declarations) to use `emp%ROWTYPE` to create a record named `r_emp` instead of declaring individual variables for the columns in `emp`: + +```sql +CREATE OR REPLACE PROCEDURE emp_sal_query ( + p_empno IN emp.empno%TYPE +) +IS + r_emp emp%ROWTYPE; + v_avgsal emp.sal%TYPE; +BEGIN + SELECT ename, job, hiredate, sal, deptno + INTO r_emp.ename, r_emp.job, r_emp.hiredate, r_emp.sal, r_emp.deptno + FROM emp WHERE empno = p_empno; + DBMS_OUTPUT.PUT_LINE('Employee # : ' || p_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || r_emp.ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || r_emp.job); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || r_emp.hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || r_emp.sal); + DBMS_OUTPUT.PUT_LINE('Dept # : ' || r_emp.deptno); + SELECT AVG(sal) INTO v_avgsal + FROM emp WHERE deptno = r_emp.deptno; + IF r_emp.sal > v_avgsal THEN + DBMS_OUTPUT.PUT_LINE('Employee''s salary is more than the ' + || 'department average of ' || v_avgsal); + ELSE + DBMS_OUTPUT.PUT_LINE('Employee''s salary does not exceed the ' + || 'department average of ' || v_avgsal); + END IF; +END; +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/04_user_defined_record_types_and_record_variables.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/04_user_defined_record_types_and_record_variables.mdx new file mode 100644 index 00000000000..55669da0581 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/04_user_defined_record_types_and_record_variables.mdx @@ -0,0 +1,111 @@ +--- +title: "User-defined record types and record variables" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.060.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.154.html" +redirects: + - /epas/latest/epas_compat_spl/03_variable_declarations/04_user_defined_record_types_and_record_variables/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can declare records based on a table definition using the `%ROWTYPE` attribute, as shown in [Using %ROWTYPE in record declarations](03_using__row_type_in_record_declarations). You can also define a new record structure that isn't tied to a particular table definition. + +You use the `TYPE IS RECORD` statement to create the definition of a record type. A *record type* is a definition of a record made up of one or more identifiers and their corresponding data types. You can't use a record type by itself to manipulate data. + +## Syntax + +The syntax for a `TYPE IS RECORD` statement is: + +```sql +TYPE IS RECORD ( ) +``` + +Where `fields` is a comma-separated list of one or more field definitions of the following form: + +```sql + [NOT NULL][{:= | DEFAULT} ] +``` + +Where: + +- `rec_type` is an identifier assigned to the record type. +- `field_name` is the identifier assigned to the field of the record type. +- `data_type` specifies the data type of `field_name`. +- The `DEFAULT` clause assigns a default data value for the corresponding field. The data type of the default expression must match the data type of the column. If you don't specify a default, then the default is `NULL`. + +A *record variable* or *record* is an instance of a record type. A record is declared from a record type. The properties of the record such as its field names and types are inherited from the record type. + +The following is the syntax for a record declaration: + +```text + +``` + +`record` is an identifier assigned to the record variable. `rectype` is the identifier of a previously defined record type. Once declared, you can't then use a record to hold data. + +Use dot notation to reference the fields in the record: + +```text +. +``` + +`record` is a previously declared record variable and `field` is the identifier of a field belonging to the record type from which `record` is defined. + +## Example + +This `emp_sal_query` procedure uses a user-defined record type and record variable: + +```sql +CREATE OR REPLACE PROCEDURE emp_sal_query ( + p_empno IN emp.empno%TYPE +) +IS + TYPE emp_typ IS RECORD ( + ename emp.ename%TYPE, + job emp.job%TYPE, + hiredate emp.hiredate%TYPE, + sal emp.sal%TYPE, + deptno emp.deptno%TYPE + ); + r_emp emp_typ; + v_avgsal emp.sal%TYPE; +BEGIN + SELECT ename, job, hiredate, sal, deptno + INTO r_emp.ename, r_emp.job, r_emp.hiredate, r_emp.sal, r_emp.deptno + FROM emp WHERE empno = p_empno; + DBMS_OUTPUT.PUT_LINE('Employee # : ' || p_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || r_emp.ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || r_emp.job); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || r_emp.hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || r_emp.sal); + DBMS_OUTPUT.PUT_LINE('Dept # : ' || r_emp.deptno); + + SELECT AVG(sal) INTO v_avgsal + FROM emp WHERE deptno = r_emp.deptno; + IF r_emp.sal > v_avgsal THEN + DBMS_OUTPUT.PUT_LINE('Employee''s salary is more than the ' + || 'department average of ' || v_avgsal); + ELSE + DBMS_OUTPUT.PUT_LINE('Employee''s salary does not exceed the ' + || 'department average of ' || v_avgsal); + END IF; +END; +``` + +Instead of specifying data type names, you can use the `%TYPE` attribute for the field data types in the record type definition. + +The following is the output from executing this stored procedure: + +```sql +EXEC emp_sal_query(7698); +__OUTPUT__ +Employee # : 7698 +Name : BLAKE +Job : MANAGER +Hire Date : 01-MAY-81 00:00:00 +Salary : 2850.00 +Dept # : 30 +Employee's salary is more than the department average of 1566.67 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/index.mdx new file mode 100644 index 00000000000..4005c2a5b6f --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/03_variable_declarations/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Using variable declarations" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.057.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.151.html" +redirects: + - /epas/latest/epas_compat_spl/03_variable_declarations/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +SPL is a block-structured language. The first section that can appear in a block is the declaration. The declaration contains the definition of variables, cursors, and other types that you can use in SPL statements contained in the block. + +
+ +declaring_a_variable using\_%\_type_in_variable_declarations using\_%\_row_type_in_record_declarations user_defined_record_types_and_record_variables + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/01_commit.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/01_commit.mdx new file mode 100644 index 00000000000..c9a78de11d4 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/01_commit.mdx @@ -0,0 +1,66 @@ +--- +title: "COMMIT" +redirects: + - /epas/latest/epas_compat_spl/06_transaction_control/01_commit/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `COMMIT` command makes all database updates from the current transaction permanent and ends the current transaction. + +```sql +COMMIT [ WORK ]; +``` + +You can use the `COMMIT` command in anonymous blocks, stored procedures, or functions. In an SPL program, it can appear in the executable section and the exception section. + +In this example, the third `INSERT` command in the anonymous block results in an error. The effect of the first two `INSERT` commands is retained as shown by the first `SELECT` command. Even after issuing a `ROLLBACK` command, the two rows remain in the table, as shown by the second `SELECT` command verifying that they were indeed committed. + +!!! Note + You can set the `edb_stmt_level_tx` configuration parameter shown in the example for the entire database using the `ALTER DATABASE` command. Alternatively, you can set it for the entire database server by changing it in the `postgresql.conf` file. + +```sql +\set AUTOCOMMIT off +SET edb_stmt_level_tx TO on; + +BEGIN + INSERT INTO dept VALUES (50, 'FINANCE', 'DALLAS'); + INSERT INTO dept VALUES (60, 'MARKETING', 'CHICAGO'); + COMMIT; + INSERT INTO dept VALUES (70, 'HUMAN RESOURCES', 'CHICAGO'); +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('SQLERRM: ' || SQLERRM); + DBMS_OUTPUT.PUT_LINE('SQLCODE: ' || SQLCODE); +END; + +SQLERRM: value too long for type character varying(14) +SQLCODE: 22001 + +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON + 50 | FINANCE | DALLAS + 60 | MARKETING | CHICAGO +(6 rows) +``` +```sql +ROLLBACK; + +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON + 50 | FINANCE | DALLAS + 60 | MARKETING | CHICAGO +(6 rows) +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/02_rollback.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/02_rollback.mdx new file mode 100644 index 00000000000..75dd37430c5 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/02_rollback.mdx @@ -0,0 +1,183 @@ +--- +title: "ROLLBACK" +redirects: + - /epas/latest/epas_compat_spl/06_transaction_control/02_rollback/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `ROLLBACK` command undoes all database updates made during the current transaction and ends the current transaction. + +```sql +ROLLBACK [ WORK ]; +``` + +You can use the `ROLLBACK` command in anonymous blocks, stored procedures, or functions. In an SPL program, it can appear in the executable section and the exception section. + +In this example, the exception section contains a `ROLLBACK` command. Even though the first two `INSERT` commands execute successfully, the third causes an exception that results in the rollback of all the `INSERT` commands in the anonymous block. + +```sql +\set AUTOCOMMIT off +SET edb_stmt_level_tx TO on; + +BEGIN + INSERT INTO dept VALUES (50, 'FINANCE', 'DALLAS'); + INSERT INTO dept VALUES (60, 'MARKETING', 'CHICAGO'); + INSERT INTO dept VALUES (70, 'HUMAN RESOURCES', 'CHICAGO'); +EXCEPTION + WHEN OTHERS THEN + ROLLBACK; + DBMS_OUTPUT.PUT_LINE('SQLERRM: ' || SQLERRM); + DBMS_OUTPUT.PUT_LINE('SQLCODE: ' || SQLCODE); +END; + +SQLERRM: value too long for type character varying(14) +SQLCODE: 22001 + +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON +(4 rows) +``` + +This example uses both `COMMIT` and `ROLLBACK`. First, the following stored procedure is created. It inserts a new employee. + +```sql +\set AUTOCOMMIT off +SET edb_stmt_level_tx TO on; + +CREATE OR REPLACE PROCEDURE emp_insert ( + p_empno IN emp.empno%TYPE, + p_ename IN emp.ename%TYPE, + p_job IN emp.job%TYPE, + p_mgr IN emp.mgr%TYPE, + p_hiredate IN emp.hiredate%TYPE, + p_sal IN emp.sal%TYPE, + p_comm IN emp.comm%TYPE, + p_deptno IN emp.deptno%TYPE +) +IS +BEGIN + INSERT INTO emp VALUES ( + p_empno, + p_ename, + p_job, + p_mgr, + p_hiredate, + p_sal, + p_comm, + p_deptno); + + DBMS_OUTPUT.PUT_LINE('Added employee...'); + DBMS_OUTPUT.PUT_LINE('Employee # : ' || p_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || p_ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || p_job); + DBMS_OUTPUT.PUT_LINE('Manager : ' || p_mgr); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || p_hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || p_sal); + DBMS_OUTPUT.PUT_LINE('Commission : ' || p_comm); + DBMS_OUTPUT.PUT_LINE('Dept # : ' || p_deptno); + DBMS_OUTPUT.PUT_LINE('----------------------'); +END; +``` + +This procedure has no exception section. Any errors are propagated up to the calling program. + +Then the following anonymous block runs. The `COMMIT` command is used after all calls to the `emp_insert` procedure and the `ROLLBACK` command in the exception section. + +```sql +BEGIN + emp_insert(9601,'FARRELL','ANALYST',7902,'03-MAR-08',5000,NULL,40); + emp_insert(9602,'TYLER','ANALYST',7900,'25-JAN-08',4800,NULL,40); + COMMIT; +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('SQLERRM: ' || SQLERRM); + DBMS_OUTPUT.PUT_LINE('An error occurred - roll back inserts'); + ROLLBACK; +END; +__OUTPUT__ +Added employee... +Employee # : 9601 +Name : FARRELL +Job : ANALYST +Manager : 7902 +Hire Date : 03-MAR-08 00:00:00 +Salary : 5000 +Commission : +Dept # : 40 +---------------------- +Added employee... +Employee # : 9602 +Name : TYLER +Job : ANALYST +Manager : 7900 +Hire Date : 25-JAN-08 00:00:00 +Salary : 4800 +Commission : +Dept # : 40 +---------------------- +``` + +The following `SELECT` command shows that employees Farrell and Tyler were successfully added: + +```sql +SELECT * FROM emp WHERE empno > 9600; +__OUTPUT__ +empno | ename | job | mgr | hiredate | sal | comm | deptno +------+--------+--------+-----+--------------------+---------+------+-------- + 9601| FARRELL| ANALYST|7902 | 03-MAR-08 00:00:00 | 5000.00 | | 40 + 9602| TYLER | ANALYST|7900 | 25-JAN-08 00:00:00 | 4800.00 | | 40 +(2 rows) +``` + +Next, execute the following anonymous block: + +```sql +BEGIN + emp_insert(9603,'HARRISON','SALESMAN',7902,'13-DEC-07',5000,3000,20); + emp_insert(9604,'JARVIS','SALESMAN',7902,'05-MAY-08',4800,4100,11); + COMMIT; +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('SQLERRM: ' || SQLERRM); + DBMS_OUTPUT.PUT_LINE('An error occurred - roll back inserts'); + ROLLBACK; +END; +__OUTPUT__ +Added employee... +Employee # : 9603 +Name : HARRISON +Job : SALESMAN +Manager : 7902 +Hire Date : 13-DEC-07 00:00:00 +Salary : 5000 +Commission : 3000 +Dept # : 20 +---------------------- +SQLERRM: insert or update on table "emp" violates foreign key constraint +"emp_ref_dept_fk" +An error occurred - roll back inserts +``` + +A `SELECT` command run against the table produces the following: + +```sql +SELECT * FROM emp WHERE empno > 9600; +__OUTPUT__ +empno | ename | job | mgr | hiredate | sal | comm | deptno +------+--------+--------+-----+--------------------+---------+------+-------- + 9601| FARRELL| ANALYST|7902 | 03-MAR-08 00:00:00 | 5000.00 | | 40 + 9602| TYLER | ANALYST|7900 | 25-JAN-08 00:00:00 | 4800.00 | | 40 +(2 rows) +``` + +The `ROLLBACK` command in the exception section successfully undoes the insert of employee Harrison. Employees Farrell and Tyler are still in the table as their inserts were made permanent by the `COMMIT` command in the first anonymous block. + +!!! Note + Executing a `COMMIT` or `ROLLBACK` in a plpgsql procedure throws an error if an Oracle-style SPL procedure is on the runtime stack. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/03_pragma_autonomous_transaction.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/03_pragma_autonomous_transaction.mdx new file mode 100644 index 00000000000..e26d2edddf1 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/03_pragma_autonomous_transaction.mdx @@ -0,0 +1,419 @@ +--- +title: "PRAGMA AUTONOMOUS_TRANSACTION" +redirects: + - /epas/latest/epas_compat_spl/06_transaction_control/03_pragma_autonomous_transaction/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A stored procedural language (SPL) program can be declared as an *autonomous transaction* by specifying the following directive in the declaration section of the SPL block. An autonomous transaction is an independent transaction started by a calling program. + +```sql +PRAGMA AUTONOMOUS_TRANSACTION; +``` + +A commit or rollback of SQL commands in the autonomous transaction has no effect on the commit or rollback in any transaction of the calling program. A commit or rollback in the calling program has no effect on the commit or rollback of SQL commands in the autonomous transaction. + +## Requirements and restrictions + +The following SPL programs can include `PRAGMA AUTONOMOUS_TRANSACTION`: + +- Standalone procedures and functions +- Anonymous blocks +- Procedures and functions declared as subprograms in packages and other calling procedures, functions, and anonymous blocks +- Triggers +- Object type methods + +The following are issues and restrictions related to autonomous transactions: + +- Each autonomous transaction consumes a connection slot for as long as it's in progress. In some cases, this might mean that you need to raise the `max_connections` parameter in the `postgresql.conf` file. +- In most respects, an autonomous transaction behaves as if it were a completely separate session, but GUCs (settings established with `SET`) are a deliberate exception. Autonomous transactions absorb the surrounding values and can propagate values they commit to the outer transaction. +- Autonomous transactions can be nested, but there is a limit of 16 levels of autonomous transactions in a single session. +- Parallel query isn't supported in autonomous transactions. +- The EDB Postgres Advanced Server implementation of autonomous transactions isn't entirely compatible with Oracle databases. The EDB Postgres Advanced Server autonomous transaction doesn't produce an error if there's an uncommitted transaction at the end of an SPL block. + +## About the examples + +The following set of examples use autonomous transactions. This first set of scenarios shows the default behavior when there are no autonomous transactions. + +Before each scenario, the `dept` table is reset to the following initial values: + +```sql +SELECT * FROM dept; +__OUTPUT__ + deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON +(4 rows) +``` + +## Scenario 1a: No autonomous transactions with only a final COMMIT + +This first set of scenarios shows the insertion of three rows: + +- Starting just after the initial `BEGIN` command of the transaction +- From an anonymous block in the starting transactions +- From a stored procedure executed from the anonymous block + +The stored procedure is the following: + +```sql +CREATE OR REPLACE PROCEDURE insert_dept_70 IS +BEGIN + INSERT INTO dept VALUES (70,'MARKETING','LOS ANGELES'); +END; +``` + +The PSQL session is the following: + +```sql +BEGIN; +INSERT INTO dept VALUES (50,'HR','DENVER'); +BEGIN + INSERT INTO dept VALUES (60,'FINANCE','CHICAGO'); + insert_dept_70; +END; +COMMIT; +``` + +After the final commit, all three rows are inserted: + +```sql +SELECT * FROM dept ORDER BY 1; +__OUTPUT__ + deptno | dname | loc +--------+------------+------------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON + 50 | HR | DENVER + 60 | FINANCE | CHICAGO + 70 | MARKETING | LOS ANGELES +(7 rows) +``` + +## Scenario 1b: No autonomous transactions but a final ROLLBACK + +The next scenario shows that a final `ROLLBACK` command after all inserts results in the rollback of all three insertions: + +```sql +BEGIN; +INSERT INTO dept VALUES (50,'HR','DENVER'); +BEGIN + INSERT INTO dept VALUES (60,'FINANCE','CHICAGO'); + insert_dept_70; +END; +ROLLBACK; + +SELECT * FROM dept ORDER BY 1; +__OUTPUT__ + deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON +(4 rows) +``` + +## Scenario 1c: No autonomous transactions, but anonymous block ROLLBACK + +A `ROLLBACK` command given at the end of the anonymous block also eliminates all three prior insertions: + +```sql +BEGIN; +INSERT INTO dept VALUES (50,'HR','DENVER'); +BEGIN + INSERT INTO dept VALUES (60,'FINANCE','CHICAGO'); + insert_dept_70; + ROLLBACK; +END; +COMMIT; + +SELECT * FROM dept ORDER BY 1; +__OUTPUT__ + deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON +(4 rows) +``` + +The next set of scenarios shows the effect of using autonomous transactions with `PRAGMA AUTONOMOUS_TRANSACTION` in various locations. + +## Scenario 2a: Autonomous transaction of anonymous block with COMMIT + +The procedure remains as initially created: + +```sql +CREATE OR REPLACE PROCEDURE insert_dept_70 IS +BEGIN + INSERT INTO dept VALUES (70,'MARKETING','LOS ANGELES'); +END; +``` + +The `PRAGMA AUTONOMOUS_TRANSACTION` is given with the anonymous block along with the `COMMIT` command at the end of the anonymous block: + +```sql +BEGIN; +INSERT INTO dept VALUES (50,'HR','DENVER'); +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + INSERT INTO dept VALUES (60,'FINANCE','CHICAGO'); + insert_dept_70; + COMMIT; +END; +ROLLBACK; +``` + +After the `ROLLBACK` at the end of the transaction, only the first row insertion at the beginning of the transaction is discarded. The other two row insertions in the anonymous block with `PRAGMA AUTONOMOUS_TRANSACTION` were independently committed. + +```sql +SELECT * FROM dept ORDER BY 1; +__OUTPUT__ + deptno | dname | loc +--------+------------+------------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON + 60 | FINANCE | CHICAGO + 70 | MARKETING | LOS ANGELES +(6 rows) +``` + +## Scenario 2b: Autonomous transaction anonymous block with COMMIT, including procedure with ROLLBACK but not an autonomous transaction procedure + +This procedure has the `ROLLBACK` command at the end. However, the `PRAGMA ANONYMOUS_TRANSACTION` isn't included in this procedure. + +```sql +CREATE OR REPLACE PROCEDURE insert_dept_70 IS +BEGIN + INSERT INTO dept VALUES (70,'MARKETING','LOS ANGELES'); + ROLLBACK; +END; +``` + +The rollback in the procedure removes the two rows inserted in the anonymous block (`deptno` 60 and 70) before the final `COMMIT` command in the anonymous block: + +```sql +BEGIN; +INSERT INTO dept VALUES (50,'HR','DENVER'); +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + INSERT INTO dept VALUES (60,'FINANCE','CHICAGO'); + insert_dept_70; + COMMIT; +END; +COMMIT; +``` + +After the final commit at the end of the transaction, the only row inserted is the first one from the beginning of the transaction. Since the anonymous block is an autonomous transaction, the rollback in the enclosed procedure has no effect on the insertion that occurs before the anonymous block is executed. + +```sql +SELECT * FROM dept ORDER by 1; +__OUTPUT__ + deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON + 50 | HR | DENVER +(5 rows) +``` + +## Scenario 2c: Autonomous transaction anonymous block with COMMIT, including procedure with ROLLBACK that is also an autonomous transaction procedure + +The procedure with the `ROLLBACK` command at the end also has `PRAGMA ANONYMOUS_TRANSACTION` included. This isolates the effect of the `ROLLBACK` command in the procedure. + +```sql +CREATE OR REPLACE PROCEDURE insert_dept_70 IS + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + INSERT INTO dept VALUES (70,'MARKETING','LOS ANGELES'); + ROLLBACK; +END; +``` + +The rollback in the procedure removes the row inserted by the procedure but not the other row inserted in the anonymous block. + +```sql +BEGIN; +INSERT INTO dept VALUES (50,'HR','DENVER'); +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + INSERT INTO dept VALUES (60,'FINANCE','CHICAGO'); + insert_dept_70; + COMMIT; +END; +COMMIT; +``` + +After the final commit at the end of the transaction, the row inserted is the first one from the beginning of the transaction as well as the row inserted at the beginning of the anonymous block. The only insertion rolled back is the one in the procedure. + +```sql +SELECT * FROM dept ORDER by 1; +__OUTPUT__ + deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON + 50 | HR | DENVER + 60 | FINANCE | CHICAGO +(6 rows) +``` + +The following examples show `PRAGMA AUTONOMOUS_TRANSACTION` in a couple of other SPL program types. + +## Autonomous transaction trigger + +This example shows the effect of declaring a trigger with `PRAGMA AUTONOMOUS_TRANSACTION`. + +The following table is created to log changes to the `emp` table: + +```sql +CREATE TABLE empauditlog ( + audit_date DATE, + audit_user VARCHAR2(20), + audit_desc VARCHAR2(20) +); +``` + +The trigger attached to the `emp` table that inserts these changes into the `empauditlog` table is the following. `PRAGMA AUTONOMOUS_TRANSACTION` is included in the declaration section. + +```sql +CREATE OR REPLACE TRIGGER emp_audit_trig + AFTER INSERT OR UPDATE OR DELETE ON emp +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + v_action VARCHAR2(20); +BEGIN + IF INSERTING THEN + v_action := 'Added employee(s)'; + ELSIF UPDATING THEN + v_action := 'Updated employee(s)'; + ELSIF DELETING THEN + v_action := 'Deleted employee(s)'; + END IF; + INSERT INTO empauditlog VALUES (SYSDATE, USER, + v_action); +END; +``` + +The following two inserts are made into the `emp` table in a transaction started by the `BEGIN` command: + +```sql +BEGIN; +INSERT INTO emp VALUES (9001,'SMITH','ANALYST',7782,SYSDATE,NULL,NULL,10); +INSERT INTO emp VALUES (9002,'JONES','CLERK',7782,SYSDATE,NULL,NULL,10); +``` + +The following shows the two new rows in the `emp` table as well as the two entries in the `empauditlog` table: + +```sql +SELECT * FROM emp WHERE empno > 9000; +__OUTPUT__ + empno | ename | job | mgr | hiredate | sal | comm | deptno +-------+-------+---------+------+--------------------+-----+------+-------- + 9001 | SMITH | ANALYST | 7782 | 23-AUG-18 07:12:27 | | | 10 + 9002 | JONES | CLERK | 7782 | 23-AUG-18 07:12:27 | | | 10 +(2 rows) +``` +```sql +SELECT TO_CHAR(AUDIT_DATE,'DD-MON-YY HH24:MI:SS') AS "audit date", + audit_user, audit_desc FROM empauditlog ORDER BY 1 ASC; +__OUTPUT__ + audit date | audit_user | audit_desc +--------------------+--------------+------------------- + 23-AUG-18 07:12:27 | enterprisedb | Added employee(s) + 23-AUG-18 07:12:27 | enterprisedb | Added employee(s) +(2 rows) +``` + +But then the `ROLLBACK` command is given during this session. The `emp` table no longer contains the two rows, but the `empauditlog` table still contains its two entries. The trigger implicitly performed a commit, and `PRAGMA AUTONOMOUS_TRANSACTION` commits those changes independent from the rollback given in the calling transaction. + +```sql +ROLLBACK; + +SELECT * FROM emp WHERE empno > 9000; +__OUTPUT__ + empno | ename | job | mgr | hiredate | sal | comm | deptno +-------+-------+-----+-----+----------+-----+------+-------- +(0 rows) +``` +```sql +SELECT TO_CHAR(AUDIT_DATE,'DD-MON-YY HH24:MI:SS') AS "audit date", + audit_user, audit_desc FROM empauditlog ORDER BY 1 ASC; +__OUTPUT__ + audit date | audit_user | audit_desc +--------------------+--------------+------------------- + 23-AUG-18 07:12:27 | enterprisedb | Added employee(s) + 23-AUG-18 07:12:27 | enterprisedb | Added employee(s) +(2 rows) +``` + +## Autonomous transaction object type method + +This example shows the effect of declaring an object method with `PRAGMA AUTONOMOUS_TRANSACTION`. + +The following object type and object type body are created. The member procedure in the object type body contains the `PRAGMA AUTONOMOUS_TRANSACTION` in the declaration section along with `COMMIT` at the end of the procedure. + +```sql +CREATE OR REPLACE TYPE insert_dept_typ AS OBJECT ( + deptno NUMBER(2), + dname VARCHAR2(14), + loc VARCHAR2(13), + MEMBER PROCEDURE insert_dept +); + +CREATE OR REPLACE TYPE BODY insert_dept_typ AS + MEMBER PROCEDURE insert_dept + IS + PRAGMA AUTONOMOUS_TRANSACTION; + BEGIN + INSERT INTO dept VALUES (SELF.deptno,SELF.dname,SELF.loc); + COMMIT; + END; +END; +``` + +In the following anonymous block, an insert is performed into the `dept` table, followed by invoking the `insert_dept` method of the object and ending with a `ROLLBACK` command in the anonymous block. + +```sql +BEGIN; +DECLARE + v_dept INSERT_DEPT_TYP := + insert_dept_typ(60,'FINANCE','CHICAGO'); +BEGIN + INSERT INTO dept VALUES (50,'HR','DENVER'); + v_dept.insert_dept; + ROLLBACK; +END; +``` + +Since `insert_dept` was declared as an autonomous transaction, its insert of department number 60 remains in the table, but the rollback removes the insertion of department 50: + +```sql +SELECT * FROM dept ORDER BY 1; +__OUTPUT__ + deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON + 60 | FINANCE | CHICAGO +(5 rows) +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/about_transactions.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/about_transactions.mdx new file mode 100644 index 00000000000..2469034108f --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/about_transactions.mdx @@ -0,0 +1,17 @@ +--- +title: "About transactions" +--- + +A transaction begins when the first SQL command is encountered in the SPL program. All subsequent SQL commands are included as part of that transaction. + +The transaction ends when one of the following occurs: + +- An unhandled exception occurs. In this case, the effects of all database updates made during the transaction are rolled back, and the transaction is aborted. +- A `COMMIT` command is encountered. In this case, the effect of all database updates made during the transaction become permanent. +- A `ROLLBACK` command is encountered. In this case, the effects of all database updates made during the transaction are rolled back, and the transaction is aborted. If a new SQL command is encountered, a new transaction begins. +- Control returns to the calling application, such as Java or PSQL. In this case, the action of the application determines whether the transaction is committed or rolled back. The exception is when the transaction is in a block in which `PRAGMA AUTONOMOUS_TRANSACTION` was declared. In this case, the commitment or rollback of the transaction occurs independently of the calling program. + +!!! Note + Unlike Oracle, DDL commands such as `CREATE TABLE` don't implicitly occur in their own transaction. Therefore, DDL commands don't cause an immediate database commit as in Oracle, and you can roll back DDL commands just like DML commands. + +A transaction can span one or more `BEGIN/END` blocks, or a single `BEGIN/END` block can contain one or more transactions. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/index.mdx new file mode 100644 index 00000000000..404c31aa24d --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/06_transaction_control/index.mdx @@ -0,0 +1,34 @@ +--- +title: "Working with transactions" +indexCards: simple +navigation: + - about_transactions + - 01_commit + - 02_rollback + - 03_pragma_autonomous_transaction +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.072.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.166.html" +redirects: + - /epas/latest/epas_compat_spl/06_transaction_control/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +There might be times when you want all updates to a database to occur successfully or for none to occur in case of any error. A set of database updates that occur successfully as a single unit or not at all is called a *transaction*. + +A common example in banking is a funds transfer between two accounts. The two parts of the transaction are the withdrawal of funds from one account and the deposit of the funds in another account. Both parts of this transaction must occur for the bank’s books to balance. The deposit and withdrawal are one transaction. + +You can create an SPL application that uses a style of transaction control compatible with Oracle databases if the following conditions are met: + +- The `edb_stmt_level_tx` parameter is set to `TRUE`. This prevents the action of unconditionally rolling back all database updates in the `BEGIN/END` block if any exception occurs. +- The application isn't running in autocommit mode. If autocommit mode is on, each successful database update is immediately committed and can't be undone. The manner in which autocommit mode is turned on or off depends on the application. + +The three main transaction commands are COMMIT, ROLLBACK, and PRAGM_AUTONOMOUS_TRANSACTION. + +
+ +commit rollback pragma_autonomous_transaction + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/07_dynamic_sql.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/07_dynamic_sql.mdx new file mode 100644 index 00000000000..07f98567f0a --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/07_dynamic_sql.mdx @@ -0,0 +1,122 @@ +--- +title: "Using dynamic SQL" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.073.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.167.html" +redirects: + - /epas/latest/epas_compat_spl/07_dynamic_sql/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +*Dynamic SQL* is a technique that lets you execute SQL commands that aren't known until the commands are about to be executed. In static SQL commands, the full command (with the exception of variables) must be known and coded into the program before the program can begin to execute. Using dynamic SQL, the executed SQL can change during program runtime. + +In addition, dynamic SQL is the only method by which data definition commands, such as `CREATE TABLE`, can be executed from an SPL program. + +However, the runtime performance of dynamic SQL is slower than static SQL. + +## Syntax + +The `EXECUTE IMMEDIATE` command is used to run SQL commands dynamically: + +```sql +EXECUTE IMMEDIATE ';' + [ INTO { [, ...] | } ] + [ USING {[] } [, ...]} ]; +``` + +Where: + +- `sql_expression` is a string expression containing the SQL command to dynamically execute +- `variable` receives the output of the result set, typically from a `SELECT` command, created as a result of executing the SQL command in `sql_expression`. The number, order, and type of variables must match the number and order and be type-compatible with the fields of the result set. + +When using the `USING` clause, the value of `expression` is passed to a *placeholder*. Placeholders appear embedded in the SQL command in `sql_expression` where you can use variables. Placeholders are denoted by an identifier with a colon (:) prefix, for example, `:name`. The number and order of the evaluated expressions must match the number, order of the placeholders in `sql_expression`. The resulting data types must also be compatible with the placeholders. You don't declare placeholders in the SPL program. They appear only in `sql_expression`. + +Currently `bind_type` is ignored, and `bind_argument` is treated as `IN OUT`. + +Alternatively, a `record` can be specified as long as the record’s fields match the number and order and are type-compatible with the result set. + +When using the `INTO` clause, exactly one row must be returned in the result set. Otherwise an exception occurs. When using the `USING` clause, the value of `expression` is passed to a *placeholder*. Placeholders appear embedded in the SQL command in `sql_expression` where variables can be used. Placeholders are denoted by an identifier with a colon (:) prefix, such as `:name`. The number, order, and resultant data types of the evaluated expressions must match the number and order and be type-compatible with the placeholders in `sql_expression`. + +Placeholders aren't declared anywhere in the SPL program. They appear only in `sql_expression`. + +Currently all options for `bind_type` are ignored and `bind_argument` is treated as `IN OUT`. + +## Example: SQL commands as string literals + +This example shows basic dynamic SQL commands as string literals: + +```sql +DECLARE + v_sql VARCHAR2(50); +BEGIN + EXECUTE IMMEDIATE 'CREATE TABLE job (jobno NUMBER(3),' || + ' jname VARCHAR2(9))'; + v_sql := 'INSERT INTO job VALUES (100, ''ANALYST'')'; + EXECUTE IMMEDIATE v_sql; + v_sql := 'INSERT INTO job VALUES (200, ''CLERK'')'; + EXECUTE IMMEDIATE v_sql; +END; +``` + +This example uses the `USING` clause to pass values to placeholders in the SQL string: + +```sql +DECLARE + v_sql VARCHAR2(50) := 'INSERT INTO job VALUES ' || + '(:p_jobno, :p_jname)'; + v_jobno job.jobno%TYPE; + v_jname job.jname%TYPE; +BEGIN + v_jobno := 300; + v_jname := 'MANAGER'; + EXECUTE IMMEDIATE v_sql USING v_jobno, v_jname; + v_jobno := 400; + v_jname := 'SALESMAN'; + EXECUTE IMMEDIATE v_sql USING v_jobno, v_jname; + v_jobno := 500; + v_jname := 'PRESIDENT'; + EXECUTE IMMEDIATE v_sql USING v_jobno, v_jname; +END; +``` + +This example shows both the `INTO` and `USING` clauses. The last execution of the `SELECT` command returns the results into a record instead of individual variables. + +```sql +DECLARE + v_sql VARCHAR2(60); + v_jobno job.jobno%TYPE; + v_jname job.jname%TYPE; + r_job job%ROWTYPE; +BEGIN + DBMS_OUTPUT.PUT_LINE('JOBNO JNAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + v_sql := 'SELECT jobno, jname FROM job WHERE jobno = :p_jobno'; + EXECUTE IMMEDIATE v_sql INTO v_jobno, v_jname USING 100; + DBMS_OUTPUT.PUT_LINE(v_jobno || ' ' || v_jname); + EXECUTE IMMEDIATE v_sql INTO v_jobno, v_jname USING 200; + DBMS_OUTPUT.PUT_LINE(v_jobno || ' ' || v_jname); + EXECUTE IMMEDIATE v_sql INTO v_jobno, v_jname USING 300; + DBMS_OUTPUT.PUT_LINE(v_jobno || ' ' || v_jname); + EXECUTE IMMEDIATE v_sql INTO v_jobno, v_jname USING 400; + DBMS_OUTPUT.PUT_LINE(v_jobno || ' ' || v_jname); + EXECUTE IMMEDIATE v_sql INTO r_job USING 500; + DBMS_OUTPUT.PUT_LINE(r_job.jobno || ' ' || r_job.jname); +END; +``` + +The following is the output from this anonymous block: + +```sql +__OUTPUT__ +JOBNO JNAME +----- ------- +100 ANALYST +200 CLERK +300 MANAGER +400 SALESMAN +500 PRESIDENT +``` + +You can use the `BULK COLLECT` clause to assemble the result set from an `EXECUTE IMMEDIATE` statement into a named collection. See [Using the BULK COLLECT clause](12_working_with_collections/04_using_the_bulk_collect_clause/#using_the_bulk_collect_clause), `EXECUTE IMMEDIATE BULK COLLECT` for more information. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/01_declaring_a_cursor.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/01_declaring_a_cursor.mdx new file mode 100644 index 00000000000..53901cbeeea --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/01_declaring_a_cursor.mdx @@ -0,0 +1,35 @@ +--- +title: "Declaring a cursor" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/01_declaring_a_cursor/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Before you can use a cursor, you must first declare it in the declaration section of the SPL program. A cursor declaration appears as follows: + +```sql +CURSOR IS ; +``` + +Where: + +- `name` is an identifier used to reference the cursor and its result set later in the program. +- `query` is a SQL `SELECT` command that determines the result set retrievable by the cursor. + +!!! Note + An extension of this syntax allows the use of parameters. For details, see [Parameterized cursors](08_parameterized_cursors/#parameterized_cursors). + +This example shows some cursor declarations: + +```sql +CREATE OR REPLACE PROCEDURE cursor_example +IS + CURSOR emp_cur_1 IS SELECT * FROM emp; + CURSOR emp_cur_2 IS SELECT empno, ename FROM emp; + CURSOR emp_cur_3 IS SELECT empno, ename FROM emp WHERE deptno = 10 + ORDER BY empno; +BEGIN + ... +END; +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/02_opening_a_cursor.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/02_opening_a_cursor.mdx new file mode 100644 index 00000000000..d32f5673458 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/02_opening_a_cursor.mdx @@ -0,0 +1,28 @@ +--- +title: "Opening a cursor" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/02_opening_a_cursor/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Before you can use a cursor to retrieve rows, you must open it using the `OPEN` statement. + +```sql +OPEN ; +``` + +`name` is the identifier of a cursor that was previously declared in the declaration section of the SPL program. Don't execute the `OPEN` statement on a cursor that is already open. + +This example shows an `OPEN` statement with its corresponding cursor declaration: + +```sql +CREATE OR REPLACE PROCEDURE cursor_example +IS + CURSOR emp_cur_3 IS SELECT empno, ename FROM emp WHERE deptno = 10 + ORDER BY empno; +BEGIN + OPEN emp_cur_3; + ... +END; +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/03_fetching_rows_from_a_cursor.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/03_fetching_rows_from_a_cursor.mdx new file mode 100644 index 00000000000..8771c3886f0 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/03_fetching_rows_from_a_cursor.mdx @@ -0,0 +1,75 @@ +--- +title: "Fetching rows from a cursor" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/03_fetching_rows_from_a_cursor/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Once a cursor is open, you can retrieve rows from the cursor’s result set by using the `FETCH` statement. + +## Syntax + +```sql +FETCH INTO { | [, ]... }; +``` + +Where: + +- `name` is the identifier of a previously opened cursor. +- `record` is the identifier of a previously defined record (for example, using `table%ROWTYPE`). + +`variable, variable_2...` are SPL variables that receive the field data from the fetched row. The fields in `record` or `variable, variable_2...` must match in number and order the fields returned in the `SELECT` list of the query given in the cursor declaration. The data types of the fields in the `SELECT` list must match or be implicitly convertible to the data types of the fields in `record` or the data types of `variable, variable_2...` + +!!! Note + A variation of `FETCH INTO` using the `BULK COLLECT` clause can return multiple rows at a time into a collection. See [Using the BULK COLLECT clause](../12_working_with_collections/04_using_the_bulk_collect_clause/#using_the_bulk_collect_clause) for more information on using the `BULK COLLECT` clause with the `FETCH INTO` statement. + +## Example + +The following shows the `FETCH` statement: + +```sql +CREATE OR REPLACE PROCEDURE cursor_example +IS + v_empno NUMBER(4); + v_ename VARCHAR2(10); + CURSOR emp_cur_3 IS SELECT empno, ename FROM emp WHERE deptno = 10 + ORDER BY empno; +BEGIN + OPEN emp_cur_3; + FETCH emp_cur_3 INTO v_empno, v_ename; + ... +END; +``` + +Instead of explicitly declaring the data type of a target variable, you can use `%TYPE` instead. In this way, if the data type of the database column changes, the target variable declaration in the SPL program doesn't have to change. `%TYPE` picks up the new data type of the specified column. + +```sql +CREATE OR REPLACE PROCEDURE cursor_example +IS + v_empno emp.empno%TYPE; + v_ename emp.ename%TYPE; + CURSOR emp_cur_3 IS SELECT empno, ename FROM emp WHERE deptno = 10 + ORDER BY empno; +BEGIN + OPEN emp_cur_3; + FETCH emp_cur_3 INTO v_empno, v_ename; + ... +END; +``` + +If all the columns in a table are retrieved in the order defined in the table, you can use `%ROWTYPE` to define a record into which the `FETCH` statement places the retrieved data. You can then access each field in the record using dot notation: + +```sql +CREATE OR REPLACE PROCEDURE cursor_example +IS + v_emp_rec emp%ROWTYPE; + CURSOR emp_cur_1 IS SELECT * FROM emp; +BEGIN + OPEN emp_cur_1; + FETCH emp_cur_1 INTO v_emp_rec; + DBMS_OUTPUT.PUT_LINE('Employee Number: ' || v_emp_rec.empno); + DBMS_OUTPUT.PUT_LINE('Employee Name : ' || v_emp_rec.ename); + ... +END; +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/04_closing_a_cursor.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/04_closing_a_cursor.mdx new file mode 100644 index 00000000000..0be146a08ca --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/04_closing_a_cursor.mdx @@ -0,0 +1,42 @@ +--- +title: "Closing a cursor" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/04_closing_a_cursor/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Once all the desired rows are retrieved from the cursor result set, close the cursor. After you close the cursor, you can no longer access the result set. + +The `CLOSE` statement appears as follows: + +```sql +CLOSE ; +``` + +`name` is the identifier of a cursor that's currently open. After you close a cursor, don't close it again. However, after you close the cursor, you can use the `OPEN` statement again on the closed cursor and rebuild the query result set. After that, the `FETCH` statement can then retrieve the rows of the new result set. + +This example uses the `CLOSE` statement: + +```sql +CREATE OR REPLACE PROCEDURE cursor_example +IS + v_emp_rec emp%ROWTYPE; + CURSOR emp_cur_1 IS SELECT * FROM emp; +BEGIN + OPEN emp_cur_1; + FETCH emp_cur_1 INTO v_emp_rec; + DBMS_OUTPUT.PUT_LINE('Employee Number: ' || v_emp_rec.empno); + DBMS_OUTPUT.PUT_LINE('Employee Name : ' || v_emp_rec.ename); + CLOSE emp_cur_1; +END; +``` + +This procedure produces the following output. Employee number `7369, SMITH` is the first row of the result set. + +```sql +EXEC cursor_example; +__OUTPUT__ +Employee Number: 7369 +Employee Name : SMITH +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/05_using__rowtype_with_cursors.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/05_using__rowtype_with_cursors.mdx new file mode 100644 index 00000000000..6022162be01 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/05_using__rowtype_with_cursors.mdx @@ -0,0 +1,59 @@ +--- +title: "Using %ROWTYPE with cursors" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.075.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.169.html" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/05_using__rowtype_with_cursors/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Using the `%ROWTYPE` attribute, you can define a record that contains fields corresponding to all columns fetched from a cursor or cursor variable. Each field takes on the data type of its corresponding column. The `%ROWTYPE` attribute is prefixed by a cursor name or cursor variable name. + +```text + %ROWTYPE; +``` + +`record` is an identifier assigned to the record. `cursor` is an explicitly declared cursor in the current scope. + +This example shows how you can use a cursor with `%ROWTYPE` to get information about which employee works in which department: + +```sql +CREATE OR REPLACE PROCEDURE emp_info +IS + CURSOR empcur IS SELECT ename, deptno FROM emp; + myvar empcur%ROWTYPE; +BEGIN + OPEN empcur; + LOOP + FETCH empcur INTO myvar; + EXIT WHEN empcur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE( myvar.ename || ' works in department ' + || myvar.deptno ); + END LOOP; + CLOSE empcur; +END; +``` + +The following is the output from this procedure: + +```sql +EXEC emp_info; +__OUTPUT__ +SMITH works in department 20 +ALLEN works in department 30 +WARD works in department 30 +JONES works in department 20 +MARTIN works in department 30 +BLAKE works in department 30 +CLARK works in department 10 +SCOTT works in department 20 +KING works in department 10 +TURNER works in department 30 +ADAMS works in department 20 +JAMES works in department 30 +FORD works in department 20 +MILLER works in department 10 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/01_isopen.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/01_isopen.mdx new file mode 100644 index 00000000000..bb53f7f94fd --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/01_isopen.mdx @@ -0,0 +1,35 @@ +--- +title: "%ISOPEN" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/06_cursor_attributes/01_isopen/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Use the `%ISOPEN` attribute to test whether a cursor is open. + +```text +%ISOPEN +``` + +`cursor_name` is the name of the cursor for which a `BOOLEAN` data type of `TRUE` is returned if the cursor is open, `FALSE` otherwise. + +This example uses `%ISOPEN`: + +```sql +CREATE OR REPLACE PROCEDURE cursor_example +IS + ... + CURSOR emp_cur_1 IS SELECT * FROM emp; + ... +BEGIN + ... + IF emp_cur_1%ISOPEN THEN + NULL; + ELSE + OPEN emp_cur_1; + END IF; + FETCH emp_cur_1 INTO ... + ... +END; +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/02_found.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/02_found.mdx new file mode 100644 index 00000000000..87ede01dbe6 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/02_found.mdx @@ -0,0 +1,64 @@ +--- +title: "%FOUND" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/06_cursor_attributes/02_found/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `%FOUND` attribute tests whether a row is retrieved from the result set of the specified cursor after a `FETCH` on the cursor. + +```text +%FOUND +``` + +`cursor_name` is the name of the cursor for which a `BOOLEAN` data type of `TRUE` is returned if a row is retrieved from the result set of the cursor after a `FETCH`. + +After the last row of the result set is fetched, the next `FETCH` results in `%FOUND` returning `FALSE`. `FALSE` is also returned after the first `FETCH` if the result set has no rows to begin with. + +Referencing `%FOUND` on a cursor before it's opened or after it's closed results in an `INVALID_CURSOR` exception. + +`%FOUND` returns `null` if it's referenced when the cursor is open but before the first `FETCH`. + +This example uses `%FOUND`: + +```sql +CREATE OR REPLACE PROCEDURE cursor_example +IS + v_emp_rec emp%ROWTYPE; + CURSOR emp_cur_1 IS SELECT * FROM emp; +BEGIN + OPEN emp_cur_1; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + FETCH emp_cur_1 INTO v_emp_rec; + WHILE emp_cur_1%FOUND LOOP + DBMS_OUTPUT.PUT_LINE(v_emp_rec.empno || ' ' || v_emp_rec.ename); + FETCH emp_cur_1 INTO v_emp_rec; + END LOOP; + CLOSE emp_cur_1; +END; +``` + +The following is the output from this example: + +```sql +EXEC cursor_example; +__OUTPUT__ +EMPNO ENAME +----- ------ +7369 SMITH +7499 ALLEN +7521 WARD +7566 JONES +7654 MARTIN +7698 BLAKE +7782 CLARK +7788 SCOTT +7839 KING +7844 TURNER +7876 ADAMS +7900 JAMES +7902 FORD +7934 MILLER +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/03_notfound.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/03_notfound.mdx new file mode 100644 index 00000000000..315886b85dc --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/03_notfound.mdx @@ -0,0 +1,64 @@ +--- +title: "%NOTFOUND" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/06_cursor_attributes/03_notfound/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `%NOTFOUND` attribute is the logical opposite of `%FOUND`. + +```text +%NOTFOUND +``` + +`cursor_name` is the name of the cursor for which a `BOOLEAN` data type of `FALSE` is returned if a row is retrieved from the result set of the cursor after a `FETCH`. + +After the last row of the result set is fetched, the next `FETCH` results in `%NOTFOUND` returning `TRUE`. `TRUE` is also returned after the first `FETCH` if the result set has no rows to begin with. + +Referencing `%NOTFOUND` on a cursor before it's opened or after it's closed results in an `INVALID_CURSOR` exception. + +`%NOTFOUND` returns `null` if it's referenced when the cursor is open but before the first `FETCH`. + +This example uses `%NOTFOUND`: + +```sql +CREATE OR REPLACE PROCEDURE cursor_example +IS + v_emp_rec emp%ROWTYPE; + CURSOR emp_cur_1 IS SELECT * FROM emp; +BEGIN + OPEN emp_cur_1; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH emp_cur_1 INTO v_emp_rec; + EXIT WHEN emp_cur_1%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_emp_rec.empno || ' ' || v_emp_rec.ename); + END LOOP; + CLOSE emp_cur_1; +END; +``` + +The following is the output from this example: + +```sql +EXEC cursor_example; +__OUTPUT__ +EMPNO ENAME +----- ------ +7369 SMITH +7499 ALLEN +7521 WARD +7566 JONES +7654 MARTIN +7698 BLAKE +7782 CLARK +7788 SCOTT +7839 KING +7844 TURNER +7876 ADAMS +7900 JAMES +7902 FORD +7934 MILLER +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/04_rowcount.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/04_rowcount.mdx new file mode 100644 index 00000000000..f57ec0a22ee --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/04_rowcount.mdx @@ -0,0 +1,66 @@ +--- +title: "%ROWCOUNT" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/06_cursor_attributes/04_rowcount/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `%ROWCOUNT` attribute returns an integer showing the number of rows fetched so far from the specified cursor. + +```text +%ROWCOUNT +``` + +`cursor_name` is the name of the cursor for which `%ROWCOUNT` returns the number of rows retrieved thus far. After the last row is retrieved, `%ROWCOUNT` remains set to the total number of rows returned until the cursor is closed. At that point, `%ROWCOUNT` throws an `INVALID_CURSOR` exception if referenced. + +Referencing `%ROWCOUNT` on a cursor before it's opened or after it's closed results in an `INVALID_CURSOR` exception. + +`%ROWCOUNT` returns `0` if it's referenced when the cursor is open but before the first `FETCH`. `%ROWCOUNT` also returns `0` after the first `FETCH` when the result set has no rows to begin with. + +This example uses `%ROWCOUNT`: + +```sql +CREATE OR REPLACE PROCEDURE cursor_example +IS + v_emp_rec emp%ROWTYPE; + CURSOR emp_cur_1 IS SELECT * FROM emp; +BEGIN + OPEN emp_cur_1; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH emp_cur_1 INTO v_emp_rec; + EXIT WHEN emp_cur_1%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_emp_rec.empno || ' ' || v_emp_rec.ename); + END LOOP; + DBMS_OUTPUT.PUT_LINE('**********************'); + DBMS_OUTPUT.PUT_LINE(emp_cur_1%ROWCOUNT || ' rows were retrieved'); + CLOSE emp_cur_1; +END; +``` + +This procedure prints the total number of rows retrieved at the end of the employee list as follows: + +```sql +EXEC cursor_example; +__OUTPUT__ +EMPNO ENAME +----- ------- +7369 SMITH +7499 ALLEN +7521 WARD +7566 JONES +7654 MARTIN +7698 BLAKE +7782 CLARK +7788 SCOTT +7839 KING +7844 TURNER +7876 ADAMS +7900 JAMES +7902 FORD +7934 MILLER +********************** +14 rows were retrieved +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/05_summary_of_cursor_states_and_attributes.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/05_summary_of_cursor_states_and_attributes.mdx new file mode 100644 index 00000000000..1e823b16942 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/05_summary_of_cursor_states_and_attributes.mdx @@ -0,0 +1,18 @@ +--- +title: "Summary of cursor states and attributes" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/06_cursor_attributes/05_summary_of_cursor_states_and_attributes/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The table summarizes the possible cursor states and the values returned by the cursor attributes. + +| Cursor state | %ISOPEN | %FOUND | %NOTFOUND | %ROWCOUNT | +| ----------------------------------------- | ------- | -------------------------- | -------------------------- | -------------------------- | +| Before `OPEN` | False | `INVALID_CURSOR` exception | `INVALID_CURSOR` exception | `INVALID_CURSOR` Exception | +| After `OPEN` & before 1st `FETCH` | True | Null | Null | 0 | +| After 1st successful `FETCH` | True | True | False | 1 | +| After `nth` successful `FETCH` (last row) | True | True | False | n | +| After n+1st `FETCH` (after last row) | True | False | True | n | +| After `CLOSE` | False | `INVALID_CURSOR` exception | `INVALID_CURSOR` exception | `INVALID_CURSOR` Exception | diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/index.mdx new file mode 100644 index 00000000000..599fe7da13c --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/06_cursor_attributes/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Cursor attributes" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.076.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.170.html" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/06_cursor_attributes/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Each cursor has a set of attributes associated with it that allows the program to test its state. These attributes are `%ISOPEN`, `%FOUND`, `%NOTFOUND`, and `%ROWCOUNT`. + +
+ +%isopen %found %notfound %rowcount summary_of_cursor_states_and_attributes + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/07_cursor_for_loop.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/07_cursor_for_loop.mdx new file mode 100644 index 00000000000..c8ffd00aa7d --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/07_cursor_for_loop.mdx @@ -0,0 +1,66 @@ +--- +title: "Using a cursor FOR loop" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/07_cursor_for_loop/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The programming logic required to process the result set of a cursor usually includes a statement to open the cursor, a loop construct to retrieve each row of the result set, a test for the end of the result set, and a statement to close the cursor. The *cursor FOR loop* is a loop construct that eliminates the need to individually code these statements. + +The cursor `FOR` loop opens a previously declared cursor, fetches all rows in the cursor result set, and then closes the cursor. + +The syntax for creating a cursor `FOR` loop is as follows: + +```sql +FOR IN +LOOP + +END LOOP; +``` + +Where: + +`record` is an identifier assigned to an implicitly declared record with definition `cursor%ROWTYPE`. + +`cursor` is the name of a previously declared cursor. + +`statements` are one or more SPL statements. There must be at least one statement. + +This example uses a cursor `FOR` loop: + +```sql +CREATE OR REPLACE PROCEDURE cursor_example +IS + CURSOR emp_cur_1 IS SELECT * FROM emp; +BEGIN + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + FOR v_emp_rec IN emp_cur_1 LOOP + DBMS_OUTPUT.PUT_LINE(v_emp_rec.empno || ' ' || v_emp_rec.ename); + END LOOP; +END; +``` + +The following is the output from this procedure: + +```sql +EXEC cursor_example; +__OUTPUT__ +EMPNO ENAME +----- ------- +7369 SMITH +7499 ALLEN +7521 WARD +7566 JONES +7654 MARTIN +7698 BLAKE +7782 CLARK +7788 SCOTT +7839 KING +7844 TURNER +7876 ADAMS +7900 JAMES +7902 FORD +7934 MILLER +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/08_parameterized_cursors.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/08_parameterized_cursors.mdx new file mode 100644 index 00000000000..0fe84a7fa1b --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/08_parameterized_cursors.mdx @@ -0,0 +1,39 @@ +--- +title: "Declaring parameterized cursors" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/08_parameterized_cursors/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can declare a static cursor that accepts parameters and can pass values for those parameters when opening that cursor. This example creates a parameterized cursor that displays the name and salary of all employees from the `emp` table that have a salary less than a specified value. This information is passed as a parameter. + +```sql +DECLARE + my_record emp%ROWTYPE; + CURSOR c1 (max_wage NUMBER) IS + SELECT * FROM emp WHERE sal < max_wage; +BEGIN + OPEN c1(2000); + LOOP + FETCH c1 INTO my_record; + EXIT WHEN c1%NOTFOUND; + DBMS_OUTPUT.PUT_LINE('Name = ' || my_record.ename || ', salary = ' + || my_record.sal); + END LOOP; + CLOSE c1; +END; +``` + +For example, if you pass the value 2000 as `max_wage`, then you see only the name and salary of all employees that have a salary less than 2000. The following is the result of the above query: + +```sql +Name = SMITH, salary = 800.00 +Name = ALLEN, salary = 1600.00 +Name = WARD, salary = 1250.00 +Name = MARTIN, salary = 1250.00 +Name = TURNER, salary = 1500.00 +Name = ADAMS, salary = 1100.00 +Name = JAMES, salary = 950.00 +Name = MILLER, salary = 1300.00 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/index.mdx new file mode 100644 index 00000000000..d86d239aeaa --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/08_static_cursors/index.mdx @@ -0,0 +1,22 @@ +--- +title: "Working with static cursors" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.074.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.168.html" +redirects: + - /epas/latest/epas_compat_spl/08_static_cursors/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Rather than executing a whole query at once, you can set up a *cursor* that encapsulates the query and then read the query result set one row at a time. This approach allows you to create SPL program logic that retrieves a row from the result set, does some processing on the data in that row, and then retrieves the next row and repeats the process. + +Cursors are most often used in the context of a `FOR` or `WHILE` loop. Include a conditional test in the SPL logic that detects when the end of the result set was reached so the program can exit the loop. + +
+ +declaring_a_cursor opening_a_cursor fetching_rows_from_a_cursor closing_a_cursor using\_%\_rowtype_with_cursors cursor_attributes cursor_for_loop parameterized_cursors + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/01_ref_cursor_overview.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/01_ref_cursor_overview.mdx new file mode 100644 index 00000000000..eb4cd2e25bc --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/01_ref_cursor_overview.mdx @@ -0,0 +1,13 @@ +--- +title: "REF CURSOR overview" +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/01_ref_cursor_overview/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A *cursor variable* is a cursor that contains a pointer to a query result set. The result set is determined by executing the `OPEN FOR` statement using the cursor variable. + +A cursor variable isn't tied to a single query like a static cursor is. You can open the same cursor variable a number of times with `OPEN FOR` statements containing different queries. Each time, a new result set is created from that query and made available by way of the cursor variable. + +You can pass `REF CURSOR` types as parameters to or from stored procedures and functions. The return type of a function can also be a `REF CURSOR` type. This ability lets you modularize the operations on a cursor into separate programs by passing a cursor variable between programs. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/01_declaring_a_sys_refcursor_cursor_variable.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/01_declaring_a_sys_refcursor_cursor_variable.mdx new file mode 100644 index 00000000000..59d1b2e4aa0 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/01_declaring_a_sys_refcursor_cursor_variable.mdx @@ -0,0 +1,23 @@ +--- +title: "Declaring a SYS_REFCURSOR cursor variable" +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/01_declaring_a_sys_refcursor_cursor_variable/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The following is the syntax for declaring a `SYS_REFCURSOR` cursor variable: + +```sql + SYS_REFCURSOR; +``` + +Where `name` is an identifier assigned to the cursor variable. + +This example shows a `SYS_REFCURSOR` variable declaration: + +```sql +DECLARE + emp_refcur SYS_REFCURSOR; + ... +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/02_declaring_a_user_defined_ref_cursor_type_variable.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/02_declaring_a_user_defined_ref_cursor_type_variable.mdx new file mode 100644 index 00000000000..b50e7b1c140 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/02_declaring_a_user_defined_ref_cursor_type_variable.mdx @@ -0,0 +1,27 @@ +--- +title: "Declaring a user-defined REF CURSOR type variable" +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/02_declaring_a_user_defined_ref_cursor_type_variable/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You must perform two distinct declaration steps to use a user-defined `REF CURSOR` variable: + +- Create a referenced cursor `TYPE`. +- Declare the actual cursor variable based on that `TYPE`. + +The syntax for creating a user-defined `REF CURSOR` type: + +```sql +TYPE IS REF CURSOR [RETURN ]; +``` + +This example shows a cursor variable declaration: + +```sql +DECLARE + TYPE emp_cur_type IS REF CURSOR RETURN emp%ROWTYPE; + my_rec emp_cur_type; + ... +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/index.mdx new file mode 100644 index 00000000000..1f1226f1810 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/index.mdx @@ -0,0 +1,23 @@ +--- +title: "Declaring a cursor variable" +indexCards: simple +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +SPL supports two ways of declaring a cursor variable: +- Using the `SYS_REFCURSOR` built-in data type. `SYS_REFCURSOR` is a `REF CURSOR` type that allows any result set to be associated with it. This is known as a *weakly-typed* `REF CURSOR`. +- Creating a type of `REF CURSOR` and then declaring a variable of that type. + +Only the declaration of `SYS_REFCURSOR` and user-defined `REF CURSOR` variables differ. The remaining usage, such as opening the cursor, selecting into the cursor, and closing the cursor, is the same for both the cursor types. The examples primarily make use of the `SYS_REFCURSOR` cursors. To make the examples work for a user-defined `REF CURSOR`, change the declaration section. + +!!! Note + A *strongly-typed* `REF CURSOR` requires the result set to conform to a declared number and order of fields with compatible data types. It can also optionally return a result set. + +
+ +declaring_a_sys_refcursor_cursor_variable declaring_a_user_defined_ref_cursor_type_variable + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/03_opening_a_cursor_variable.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/03_opening_a_cursor_variable.mdx new file mode 100644 index 00000000000..0ebea20ae9d --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/03_opening_a_cursor_variable.mdx @@ -0,0 +1,34 @@ +--- +title: "Opening a cursor variable" +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/03_opening_a_cursor_variable/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Once you declare a cursor variable, you must open it with an associated `SELECT` command. The `OPEN FOR` statement specifies the `SELECT` command to use to create the result set: + +```sql +OPEN FOR query; +``` + +Where: + +`name` is the identifier of a previously declared cursor variable. + +`query` is a `SELECT` command that determines the result set when the statement is executed. + +The value of the cursor variable after the `OPEN FOR` statement is executed identifies the result set. + +This example shows a result set that's a list of employee numbers and names from a selected department. You can use a variable or parameter in the `SELECT` command anywhere an expression can normally appear. In this case, a parameter is used in the equality test for department number. + +```sql +CREATE OR REPLACE PROCEDURE emp_by_dept ( + p_deptno emp.deptno%TYPE +) +IS + emp_refcur SYS_REFCURSOR; +BEGIN + OPEN emp_refcur FOR SELECT empno, ename FROM emp WHERE deptno = p_deptno; + ... +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/04_fetching_rows_from_a_cursor_variable.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/04_fetching_rows_from_a_cursor_variable.mdx new file mode 100644 index 00000000000..c5bb3ed5601 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/04_fetching_rows_from_a_cursor_variable.mdx @@ -0,0 +1,31 @@ +--- +title: "Fetching rows From a cursor variable" +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/04_fetching_rows_from_a_cursor_variable/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +After you open a cursor variable, you can retrieve rows from the result set using the `FETCH` statement. For details, see [Fetching rows from a cursor](../08_static_cursors/03_fetching_rows_from_a_cursor/#fetching_rows_from_a_cursor). + +This example uses a `FETCH` statement to cause the result set to be returned into two variables and then displayed. You can also use the cursor attributes used to determine cursor state of static cursors with cursor variables. For details, see [Cursor attributes](../08_static_cursors/06_cursor_attributes/#cursor_attributes). + +```sql +CREATE OR REPLACE PROCEDURE emp_by_dept ( + p_deptno emp.deptno%TYPE +) +IS + emp_refcur SYS_REFCURSOR; + v_empno emp.empno%TYPE; + v_ename emp.ename%TYPE; +BEGIN + OPEN emp_refcur FOR SELECT empno, ename FROM emp WHERE deptno = p_deptno; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH emp_refcur INTO v_empno, v_ename; + EXIT WHEN emp_refcur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || v_ename); + END LOOP; + ... +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/05_closing_a_cursor_variable.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/05_closing_a_cursor_variable.mdx new file mode 100644 index 00000000000..8846b19ba76 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/05_closing_a_cursor_variable.mdx @@ -0,0 +1,49 @@ +--- +title: "Closing a cursor variable" +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/05_closing_a_cursor_variable/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Use the `CLOSE` statement described in [Closing a cursor](../08_static_cursors/04_closing_a_cursor/#closing_a_cursor) to release the result set. + +!!! Note + Unlike static cursors, you don't have to close a cursor variable before you can reopen it. When you reopen it, the result set from the previous open is lost. + +This example includes the `CLOSE` statement: + +```sql +CREATE OR REPLACE PROCEDURE emp_by_dept ( + p_deptno emp.deptno%TYPE +) +IS + emp_refcur SYS_REFCURSOR; + v_empno emp.empno%TYPE; + v_ename emp.ename%TYPE; +BEGIN + OPEN emp_refcur FOR SELECT empno, ename FROM emp WHERE deptno = p_deptno; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH emp_refcur INTO v_empno, v_ename; + EXIT WHEN emp_refcur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || v_ename); + END LOOP; + CLOSE emp_refcur; +END; +``` + +The following is the output from this procedure: + +```sql +EXEC emp_by_dept(20) +__OUTPUT__ +EMPNO ENAME +----- ------- +7369 SMITH +7566 JONES +7788 SCOTT +7876 ADAMS +7902 FORD +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/06_usage_restrictions.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/06_usage_restrictions.mdx new file mode 100644 index 00000000000..8aa7e5e5d83 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/06_usage_restrictions.mdx @@ -0,0 +1,24 @@ +--- +title: "Usage restrictions" +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/06_usage_restrictions/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The following are restrictions on cursor variable usage: + +- You can't use comparison operators to test cursor variables for equality, inequality, null, or not null. +- You can't assign null to a cursor variable. +- You can't store the value of a cursor variable in a database column. +- Static cursors and cursor variables aren't interchangeable. For example, you can't use a static cursor in an `OPEN FOR` statement. + +The table shows the permitted parameter modes for a cursor variable used as a procedure or function parameter. This use depends on the operations on the cursor variable in the procedure or function. + +| Operation | IN | IN OUT | OUT | +| --------- | ----- | ------ | ---- | +| `OPEN` | `No` | `Yes` | `No` | +| `FETCH` | `Yes` | `Yes` | `No` | +| `CLOSE` | `Yes` | `Yes` | `No` | + +For example, if a procedure performs all three operations—`OPEN FOR`, `FETCH`, and `CLOSE`—on a cursor variable declared as the procedure’s formal parameter, then that parameter must be declared with `IN OUT` mode. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/01_returning_a_ref_cursor_from_a_function.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/01_returning_a_ref_cursor_from_a_function.mdx new file mode 100644 index 00000000000..132fbf7c221 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/01_returning_a_ref_cursor_from_a_function.mdx @@ -0,0 +1,55 @@ +--- +title: "Returning a REF CURSOR from a function" +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/01_returning_a_ref_cursor_from_a_function/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +This example opens the cursor variable with a query that selects employees with a given job. The cursor variable is specified in this function’s `RETURN` statement, which makes the result set available to the caller of the function. + +```sql +CREATE OR REPLACE FUNCTION emp_by_job (p_job VARCHAR2) +RETURN SYS_REFCURSOR +IS + emp_refcur SYS_REFCURSOR; +BEGIN + OPEN emp_refcur FOR SELECT empno, ename FROM emp WHERE job = p_job; + RETURN emp_refcur; +END; +``` + +This function is invoked in the following anonymous block by assigning the function’s return value to a cursor variable declared in the anonymous block’s declaration section. The result set is fetched using this cursor variable, and then it is closed. + +```sql +DECLARE + v_empno emp.empno%TYPE; + v_ename emp.ename%TYPE; + v_job emp.job%TYPE := 'SALESMAN'; + v_emp_refcur SYS_REFCURSOR; +BEGIN + DBMS_OUTPUT.PUT_LINE('EMPLOYEES WITH JOB ' || v_job); + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + v_emp_refcur := emp_by_job(v_job); + LOOP + FETCH v_emp_refcur INTO v_empno, v_ename; + EXIT WHEN v_emp_refcur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || v_ename); + END LOOP; + CLOSE v_emp_refcur; +END; +``` + +The following is the output when the anonymous block is executed: + +```sql +__OUTPUT__ +EMPLOYEES WITH JOB SALESMAN +EMPNO ENAME +----- ------- +7499 ALLEN +7521 WARD +7654 MARTIN +7844 TURNER +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/02_modularizing_cursor_operations.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/02_modularizing_cursor_operations.mdx new file mode 100644 index 00000000000..99a4c48d9d6 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/02_modularizing_cursor_operations.mdx @@ -0,0 +1,165 @@ +--- +title: "Modularizing cursor operations" +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/02_modularizing_cursor_operations/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +This example shows how you can modularize the various operations on cursor variables into separate programs. + +The following procedure opens the given cursor variable with a `SELECT` command that retrieves all rows: + +```sql +CREATE OR REPLACE PROCEDURE open_all_emp ( + p_emp_refcur IN OUT SYS_REFCURSOR +) +IS +BEGIN + OPEN p_emp_refcur FOR SELECT empno, ename FROM emp; +END; +``` + +This variation opens the given cursor variable with a `SELECT` command that retrieves all rows of a given department: + +```sql +CREATE OR REPLACE PROCEDURE open_emp_by_dept ( + p_emp_refcur IN OUT SYS_REFCURSOR, + p_deptno emp.deptno%TYPE +) +IS +BEGIN + OPEN p_emp_refcur FOR SELECT empno, ename FROM emp + WHERE deptno = p_deptno; +END; +``` + +This variation opens the given cursor variable with a `SELECT` command that retrieves all rows but from a different table. The function’s return value is the opened cursor variable. + +```sql +CREATE OR REPLACE FUNCTION open_dept ( + p_dept_refcur IN OUT SYS_REFCURSOR +) RETURN SYS_REFCURSOR +IS + v_dept_refcur SYS_REFCURSOR; +BEGIN + v_dept_refcur := p_dept_refcur; + OPEN v_dept_refcur FOR SELECT deptno, dname FROM dept; + RETURN v_dept_refcur; +END; +``` + +This procedure fetches and displays a cursor variable result set consisting of employee number and name: + +```sql +CREATE OR REPLACE PROCEDURE fetch_emp ( + p_emp_refcur IN OUT SYS_REFCURSOR +) +IS + v_empno emp.empno%TYPE; + v_ename emp.ename%TYPE; +BEGIN + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH p_emp_refcur INTO v_empno, v_ename; + EXIT WHEN p_emp_refcur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || v_ename); + END LOOP; +END; +``` + +This procedure fetches and displays a cursor variable result set consisting of department number and name: + +```sql +CREATE OR REPLACE PROCEDURE fetch_dept ( + p_dept_refcur IN SYS_REFCURSOR +) +IS + v_deptno dept.deptno%TYPE; + v_dname dept.dname%TYPE; +BEGIN + DBMS_OUTPUT.PUT_LINE('DEPT DNAME'); + DBMS_OUTPUT.PUT_LINE('---- ---------'); + LOOP + FETCH p_dept_refcur INTO v_deptno, v_dname; + EXIT WHEN p_dept_refcur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_deptno || ' ' || v_dname); + END LOOP; +END; +``` + +This procedure closes the given cursor variable: + +```sql +CREATE OR REPLACE PROCEDURE close_refcur ( + p_refcur IN OUT SYS_REFCURSOR +) +IS +BEGIN + CLOSE p_refcur; +END; +``` + +This anonymous block executes all the previous programs: + +```sql +DECLARE + gen_refcur SYS_REFCURSOR; +BEGIN + DBMS_OUTPUT.PUT_LINE('ALL EMPLOYEES'); + open_all_emp(gen_refcur); + fetch_emp(gen_refcur); + DBMS_OUTPUT.PUT_LINE('****************'); + + DBMS_OUTPUT.PUT_LINE('EMPLOYEES IN DEPT #10'); + open_emp_by_dept(gen_refcur, 10); + fetch_emp(gen_refcur); + DBMS_OUTPUT.PUT_LINE('****************'); + + DBMS_OUTPUT.PUT_LINE('DEPARTMENTS'); + fetch_dept(open_dept(gen_refcur)); + DBMS_OUTPUT.PUT_LINE('*****************'); + + close_refcur(gen_refcur); +END; +``` + +The following is the output from the anonymous block: + +```sql +__OUTPUT__ +ALL EMPLOYEES +EMPNO ENAME +----- ------- +7369 SMITH +7499 ALLEN +7521 WARD +7566 JONES +7654 MARTIN +7698 BLAKE +7782 CLARK +7788 SCOTT +7839 KING +7844 TURNER +7876 ADAMS +7900 JAMES +7902 FORD +7934 MILLER +**************** +EMPLOYEES IN DEPT #10 +EMPNO ENAME +----- ------- +7782 CLARK +7839 KING +7934 MILLER +**************** +DEPARTMENTS +DEPT DNAME +---- --------- +10 ACCOUNTING +20 RESEARCH +30 SALES +40 OPERATIONS +***************** +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/index.mdx new file mode 100644 index 00000000000..78ed4b8aea4 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/index.mdx @@ -0,0 +1,16 @@ +--- +title: "Cursor variable examples" +indexCards: simple +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The examples that follow show cursor variable usage. + +
+ +returning_a_ref_cursor_from_a_function modularizing_cursor_operations + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/08_dynamic_queries_with_ref_cursors.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/08_dynamic_queries_with_ref_cursors.mdx new file mode 100644 index 00000000000..e92536afa04 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/08_dynamic_queries_with_ref_cursors.mdx @@ -0,0 +1,127 @@ +--- +title: "Using dynamic queries with REF CURSOR" +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/08_dynamic_queries_with_ref_cursors/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +EDB Postgres Advanced Server supports dynamic queries by way of the `OPEN FOR USING` statement. A string literal or string variable is supplied in the `OPEN FOR USING` statement to the `SELECT` command: + +```sql +OPEN FOR + [ USING [, ] ...]; +``` + +Where: + +`name` is the identifier of a previously declared cursor variable. + +`dynamic_string` is a string literal or string variable containing a `SELECT` command without the terminating semi-colon. + +`bind_arg, bind_arg_2...` are bind arguments that pass variables to corresponding placeholders in the `SELECT` command when the cursor variable is opened. The placeholders are identifiers prefixed by a colon character. + +This example shows a dynamic query using a string literal: + +```sql +CREATE OR REPLACE PROCEDURE dept_query +IS + emp_refcur SYS_REFCURSOR; + v_empno emp.empno%TYPE; + v_ename emp.ename%TYPE; +BEGIN + OPEN emp_refcur FOR 'SELECT empno, ename FROM emp WHERE deptno = 30' || + ' AND sal >= 1500'; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH emp_refcur INTO v_empno, v_ename; + EXIT WHEN emp_refcur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || v_ename); + END LOOP; + CLOSE emp_refcur; +END; +``` + +The following is the output from this procedure: + +```sql +EXEC dept_query; +__OUTPUT__ +EMPNO ENAME +----- ------- +7499 ALLEN +7698 BLAKE +7844 TURNER +``` + +This example query uses bind arguments to pass the query parameters: + +```sql +CREATE OR REPLACE PROCEDURE dept_query ( + p_deptno emp.deptno%TYPE, + p_sal emp.sal%TYPE +) +IS + emp_refcur SYS_REFCURSOR; + v_empno emp.empno%TYPE; + v_ename emp.ename%TYPE; +BEGIN + OPEN emp_refcur FOR 'SELECT empno, ename FROM emp WHERE deptno = :dept' + || ' AND sal >= :sal' USING p_deptno, p_sal; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH emp_refcur INTO v_empno, v_ename; + EXIT WHEN emp_refcur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || v_ename); + END LOOP; + CLOSE emp_refcur; +END; +``` + +The following is the resulting output: + +```sql +EXEC dept_query(30, 1500); +__OUTPUT__ +EMPNO ENAME +----- ------- +7499 ALLEN +7698 BLAKE +7844 TURNER +``` + +Finally, a string variable is used to pass the `SELECT`, providing the most flexibility: + +```sql +CREATE OR REPLACE PROCEDURE dept_query ( + p_deptno emp.deptno%TYPE, + p_sal emp.sal%TYPE +) +IS + emp_refcur SYS_REFCURSOR; + v_empno emp.empno%TYPE; + v_ename emp.ename%TYPE; + p_query_string VARCHAR2(100); +BEGIN + p_query_string := 'SELECT empno, ename FROM emp WHERE ' || + 'deptno = :dept AND sal >= :sal'; + OPEN emp_refcur FOR p_query_string USING p_deptno, p_sal; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH emp_refcur INTO v_empno, v_ename; + EXIT WHEN emp_refcur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || v_ename); + END LOOP; + CLOSE emp_refcur; +END; +EXEC dept_query(20, 1500); +__OUTPUT__ +EMPNO ENAME +----- ------- +7566 JONES +7788 SCOTT +7902 FORD +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/index.mdx new file mode 100644 index 00000000000..deb5797a616 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Working with REF CURSOR and cursor variables" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.077.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.171.html" +redirects: + - /epas/latest/epas_compat_spl/09_ref_cursors_and_cursor_variables/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `REF CURSOR` provides greater flexibility than static cursors. + +
+ +ref_cursor_overview declaring_a_cursor_variable opening_a_cursor_variable fetching_rows_from_a_cursor_variable closing_a_cursor_variable usage_restrictions examples dynamic_queries_with_ref_cursors + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/01_associative_arrays.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/01_associative_arrays.mdx new file mode 100644 index 00000000000..1b7d75ab1ef --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/01_associative_arrays.mdx @@ -0,0 +1,229 @@ +--- +title: "Using associative arrays" +redirects: + - /epas/latest/epas_compat_spl/10_collections/01_associative_arrays/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +An *associative array* is a type of collection that associates a unique key with a value. The key doesn't have to be numeric. It can be character data as well. + +## Associative array overview + +An associative array has the following characteristics: + +- You must define an *associative array type* after which you can declare *array variables* of that array type. Data manipulation occurs using the array variable. +- When an array variable is declared, the associative array is created, but it is empty. Start assigning values to key values. +- The key can be any negative integer, positive integer, or zero if you specify `INDEX BY BINARY_INTEGER` or `PLS_INTEGER`. +- The key can be character data if you specify `INDEX BY VARCHAR2`. +- There's no predefined limit on the number of elements in the array. It grows dynamically as elements are added. +- The array can be sparse. There can be gaps in the assignment of values to keys. +- An attempt to reference an array element that hasn't been assigned a value results in an exception. + +## Defining an associative array + +The `TYPE IS TABLE OF ... INDEX BY` statement is used to define an associative array type: + +```sql +TYPE IS TABLE OF { | | } + INDEX BY { BINARY_INTEGER | PLS_INTEGER | VARCHAR2() }; +``` + +Where: + +`assoctype` is an identifier assigned to the array type. + +`datatype` is a scalar data type such as `VARCHAR2` or `NUMBER`. + +`rectype` is a previously defined record type. + +`objtype` is a previously defined object type. + +`n` is the maximum length of a character key. + +## Declaring a variable + +To make use of the array, you must declare a *variable* with that array type. The following is the syntax for declaring an array variable: + +```text + +``` + +Where: + +`array` is an identifier assigned to the associative array. + +`assoctype` is the identifier of a previously defined array type. + +## Referencing an element of the array + +Reference an element of the array using the following syntax: + +```text +()[<.field> ] +``` + +`array` is the identifier of a previously declared array. + +`n` is the key value, type-compatible with the data type given in the `INDEX BY` clause. + +If the array type of `array` is defined from a record type or object type, then `[.field ]` must reference an individual field in the record type or attribute in the object type from which the array type is defined. Alternatively, you can reference the entire record by omitting `[.field ]`. + +## Examples + +This example reads the first 10 employee names from the `emp` table, stores them in an array, and then displays the results from the array: + +```sql +DECLARE + TYPE emp_arr_typ IS TABLE OF VARCHAR2(10) INDEX BY BINARY_INTEGER; + emp_arr emp_arr_typ; + CURSOR emp_cur IS SELECT ename FROM emp WHERE ROWNUM <= 10; + i INTEGER := 0; +BEGIN + FOR r_emp IN emp_cur LOOP + i := i + 1; + emp_arr(i) := r_emp.ename; + END LOOP; + FOR j IN 1..10 LOOP + DBMS_OUTPUT.PUT_LINE(emp_arr(j)); + END LOOP; +END; +``` + +This example produces the following output: + +```sql +__OUTPUT__ +SMITH +ALLEN +WARD +JONES +MARTIN +BLAKE +CLARK +SCOTT +KING +TURNER +``` + +This example uses a record type in the array definition: + +```sql +DECLARE + TYPE emp_rec_typ IS RECORD ( + empno NUMBER(4), + ename VARCHAR2(10) + ); + TYPE emp_arr_typ IS TABLE OF emp_rec_typ INDEX BY BINARY_INTEGER; + emp_arr emp_arr_typ; + CURSOR emp_cur IS SELECT empno, ename FROM emp WHERE ROWNUM <= 10; + i INTEGER := 0; +BEGIN + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + FOR r_emp IN emp_cur LOOP + i := i + 1; + emp_arr(i).empno := r_emp.empno; + emp_arr(i).ename := r_emp.ename; + END LOOP; + FOR j IN 1..10 LOOP + DBMS_OUTPUT.PUT_LINE(emp_arr(j).empno || ' ' || + emp_arr(j).ename); + END LOOP; +END; +``` + +The following is the output from this anonymous block: + +```sql +__OUTPUT__ +EMPNO ENAME +----- ------- +7369 SMITH +7499 ALLEN +7521 WARD +7566 JONES +7654 MARTIN +7698 BLAKE +7782 CLARK +7788 SCOTT +7839 KING +7844 TURNER +``` + +This example uses the `emp%ROWTYPE` attribute to define `emp_arr_typ` instead of using the `emp_rec_typ` record type: + +```sql +DECLARE + TYPE emp_arr_typ IS TABLE OF emp%ROWTYPE INDEX BY BINARY_INTEGER; + emp_arr emp_arr_typ; + CURSOR emp_cur IS SELECT empno, ename FROM emp WHERE ROWNUM <= 10; + i INTEGER := 0; +BEGIN + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + FOR r_emp IN emp_cur LOOP + i := i + 1; + emp_arr(i).empno := r_emp.empno; + emp_arr(i).ename := r_emp.ename; + END LOOP; + FOR j IN 1..10 LOOP + DBMS_OUTPUT.PUT_LINE(emp_arr(j).empno || ' ' || + emp_arr(j).ename); + END LOOP; +END; +``` + +The results are the same as using a record type in the array definition. + +Instead of assigning each field of the record individually, you can make a record-level assignment from `r_emp` to `emp_arr`: + +```sql +DECLARE + TYPE emp_rec_typ IS RECORD ( + empno NUMBER(4), + ename VARCHAR2(10) + ); + TYPE emp_arr_typ IS TABLE OF emp_rec_typ INDEX BY BINARY_INTEGER; + emp_arr emp_arr_typ; + CURSOR emp_cur IS SELECT empno, ename FROM emp WHERE ROWNUM <= 10; + i INTEGER := 0; +BEGIN + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + FOR r_emp IN emp_cur LOOP + i := i + 1; + emp_arr(i) := r_emp; + END LOOP; + FOR j IN 1..10 LOOP + DBMS_OUTPUT.PUT_LINE(emp_arr(j).empno || ' ' || + emp_arr(j).ename); + END LOOP; +END; +``` + +This example uses the key of an associative array as character data: + +```sql +DECLARE + TYPE job_arr_typ IS TABLE OF NUMBER INDEX BY VARCHAR2(9); + job_arr job_arr_typ; +BEGIN + job_arr('ANALYST') := 100; + job_arr('CLERK') := 200; + job_arr('MANAGER') := 300; + job_arr('SALESMAN') := 400; + job_arr('PRESIDENT') := 500; + DBMS_OUTPUT.PUT_LINE('ANALYST : ' || job_arr('ANALYST')); + DBMS_OUTPUT.PUT_LINE('CLERK : ' || job_arr('CLERK')); + DBMS_OUTPUT.PUT_LINE('MANAGER : ' || job_arr('MANAGER')); + DBMS_OUTPUT.PUT_LINE('SALESMAN : ' || job_arr('SALESMAN')); + DBMS_OUTPUT.PUT_LINE('PRESIDENT: ' || job_arr('PRESIDENT')); +END; + +ANALYST : 100 +CLERK : 200 +MANAGER : 300 +SALESMAN : 400 +PRESIDENT: 500 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/02_nested_tables.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/02_nested_tables.mdx new file mode 100644 index 00000000000..1438206d820 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/02_nested_tables.mdx @@ -0,0 +1,233 @@ +--- +title: "Working with nested tables" +redirects: + - /epas/latest/epas_compat_spl/10_collections/02_nested_tables/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A *nested table* is a type of collection that associates a positive integer with a value. + +## Nested tables overview + +A nested table has the following characteristics: + +- You must define a *nested table type*. After that, you can declare *nested table variables* of that nested table type. Data manipulation occurs using the nested table variable, also known simply as a table. +- When you declare a nested table variable, the nested table doesn't yet exist. It is a null collection. You must initialize the null table with a *constructor*. You can also initialize the table by using an assignment statement where the right-hand side of the assignment is an initialized table of the same type. +!!! Note + Initialization of a nested table is mandatory in Oracle but optional in SPL. +- The key is a positive integer. +- The constructor establishes the number of elements in the table. The `EXTEND` method adds elements to the table. For details, see [Collection methods](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/11_collection_methods/#collection_methods). +!!! Note + Using the constructor to establish the number of elements in the table and using the `EXTEND` method to add elements to the table are mandatory in Oracle but optional in SPL. +- The table can be sparse. There can be gaps in assigning values to keys. +- An attempt to reference a table element beyond its initialized or extended size results in a `SUBSCRIPT_BEYOND_COUNT` exception. + +## Defining a nested table + +Use the `TYPE IS TABLE` statement to define a nested table type in the declaration section of an SPL program: + +```sql +TYPE IS TABLE OF { | | }; +``` + +Where: + +`tbltype` is an identifier assigned to the nested table type. + +`datatype` is a scalar data type such as `VARCHAR2` or `NUMBER`. + +`rectype` is a previously defined record type. + +`objtype` is a previously defined object type. + +!!! Note + You can use the `CREATE TYPE` command to define a nested table type that's available to all SPL programs in the database. See [SQL reference](../../../reference/oracle_compatibility_reference/epas_compat_sql/39_create_type/) for more information about the `CREATE TYPE` command. + +## Declaring a variable + +To use the table, you must declare a *variable* of that nested table type. The following is the syntax for declaring a table variable: + +```text +
+``` + +Where: + +`table` is an identifier assigned to the nested table. + +`tbltype` is the identifier of a previously defined nested table type. + +## Initializing the nested table + +Initialize a nested table using the nested table type’s constructor: + +```sql + ([ { | NULL } [, { | NULL } ] [, ...] ]) +``` + +Where: + +`tbltype` is the identifier of the nested table type’s constructor, which has the same name as the nested table type. + +`expr1, expr2, …` are expressions that are type-compatible with the element type of the table. If you specify `NULL`, the corresponding element is set to null. If the parameter list is empty, then an empty nested table is returned, which means the table has no elements. If the table is defined from an object type, then `exprn` must return an object of that object type. The object can be the return value of a function or the object type’s constructor. Or the object can be an element of another nested table of the same type. + +If you apply a collection method other than `EXISTS` to an uninitialized nested table, a `COLLECTION_IS_NULL` exception is thrown. For details, see [Collection methods](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/11_collection_methods/#collection_methods). + +This example shows a constructor for a nested table: + +```sql +DECLARE + TYPE nested_typ IS TABLE OF CHAR(1); + v_nested nested_typ := nested_typ('A','B'); +``` + +## Referencing an element of the table + +Reference an element of the table using the following syntax: + +```text +
()[<.element> ] +``` + +Where: + +`table` is the identifier of a previously declared table. + +`n` is a positive integer. + +If the table type of `table` is defined from a record type or object type, then `[.element ]` must reference an individual field in the record type or attribute in the object type from which the nested table type is defined. Alternatively, you can reference the entire record or object by omitting `[.element ]`. + +## Examples + +This example shows a nested table where it's known that there are four elements: + +```sql +DECLARE + TYPE dname_tbl_typ IS TABLE OF VARCHAR2(14); + dname_tbl dname_tbl_typ; + CURSOR dept_cur IS SELECT dname FROM dept ORDER BY dname; + i INTEGER := 0; +BEGIN + dname_tbl := dname_tbl_typ(NULL, NULL, NULL, NULL); + FOR r_dept IN dept_cur LOOP + i := i + 1; + dname_tbl(i) := r_dept.dname; + END LOOP; + DBMS_OUTPUT.PUT_LINE('DNAME'); + DBMS_OUTPUT.PUT_LINE('----------'); + FOR j IN 1..i LOOP + DBMS_OUTPUT.PUT_LINE(dname_tbl(j)); + END LOOP; +END; +``` + +The following is the output from the example: + +```sql +__OUTPUT__ +DNAME +---------- +ACCOUNTING +OPERATIONS +RESEARCH +SALES +``` + +This example reads the first 10 employee names from the `emp` table, stores them in a nested table, and then displays the results from the table. The SPL code is written to assume that the number of employees to return isn't known beforehand. + +```sql +DECLARE + TYPE emp_rec_typ IS RECORD ( + empno NUMBER(4), + ename VARCHAR2(10) + ); + TYPE emp_tbl_typ IS TABLE OF emp_rec_typ; + emp_tbl emp_tbl_typ; + CURSOR emp_cur IS SELECT empno, ename FROM emp WHERE ROWNUM <= 10; + i INTEGER := 0; +BEGIN + emp_tbl := emp_tbl_typ(); + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + FOR r_emp IN emp_cur LOOP + i := i + 1; + emp_tbl.EXTEND; + emp_tbl(i) := r_emp; + END LOOP; + FOR j IN 1..10 LOOP + DBMS_OUTPUT.PUT_LINE(emp_tbl(j).empno || ' ' || + emp_tbl(j).ename); + END LOOP; +END; +``` + +An empty table with the constructor `emp_tbl_typ()` is created as the first statement in the executable section of the anonymous block. The `EXTEND` collection method is then used to add an element to the table for each employee returned from the result set. See [Extend](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/11_collection_methods/04_extend/#extend). + +The following is the output: + +```sql +__OUTPUT__ +EMPNO ENAME +----- ------- +7369 SMITH +7499 ALLEN +7521 WARD +7566 JONES +7654 MARTIN +7698 BLAKE +7782 CLARK +7788 SCOTT +7839 KING +7844 TURNER +``` + +This example shows how you can use a nested table of an object type. First, create an object type with attributes for the department name and location: + +```sql +CREATE TYPE dept_obj_typ AS OBJECT ( + dname VARCHAR2(14), + loc VARCHAR2(13) +); +``` + +This anonymous block defines a nested table type whose element consists of the `dept_obj_typ` object type. A nested table variable is declared, initialized, and then populated from the `dept` table. Finally, the elements from the nested table are displayed. + +```sql +DECLARE + TYPE dept_tbl_typ IS TABLE OF dept_obj_typ; + dept_tbl dept_tbl_typ; + CURSOR dept_cur IS SELECT dname, loc FROM dept ORDER BY dname; + i INTEGER := 0; +BEGIN + dept_tbl := dept_tbl_typ( + dept_obj_typ(NULL,NULL), + dept_obj_typ(NULL,NULL), + dept_obj_typ(NULL,NULL), + dept_obj_typ(NULL,NULL) + ); + FOR r_dept IN dept_cur LOOP + i := i + 1; + dept_tbl(i).dname := r_dept.dname; + dept_tbl(i).loc := r_dept.loc; + END LOOP; + DBMS_OUTPUT.PUT_LINE('DNAME LOC'); + DBMS_OUTPUT.PUT_LINE('---------- ----------'); + FOR j IN 1..i LOOP + DBMS_OUTPUT.PUT_LINE(RPAD(dept_tbl(j).dname,14) || ' ' || + dept_tbl(j).loc); + END LOOP; +END; +``` + +The parameters that make up the nested table’s constructor, `dept_tbl_typ`, are calls to the object type’s constructor `dept_obj_typ.` The following is the output from the anonymous block: + +```sql +__OUTPUT__ +DNAME LOC +---------- ---------- +ACCOUNTING NEW YORK +OPERATIONS BOSTON +RESEARCH DALLAS +SALES CHICAGO +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/03_varrays.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/03_varrays.mdx new file mode 100644 index 00000000000..f25b03d9e43 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/03_varrays.mdx @@ -0,0 +1,127 @@ +--- +title: "Using varrays" +redirects: + - /epas/latest/epas_compat_spl/10_collections/03_varrays/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A *varray* or *variable-size array* is a type of collection that associates a positive integer with a value. In many respects, it's similar to a nested table. + +## Varray overview + +A varray has the following characteristics: + +- You must define a *varray type* with a maximum size limit. After you define the varray type, you can declare *varray variables* of that varray type. Data manipulation occurs using the varray variable, also known simply as a varray. The number of elements in the varray can't exceed the maximum size limit set in the varray type definition. +- When you declare a varray variable, the varray at first is a null collection. You must initialize the null varray with a *constructor*. You can also initialize the varray by using an assignment statement where the right-hand side of the assignment is an initialized varray of the same type. +- The key is a positive integer. +- The constructor sets the number of elements in the varray, which must not exceed the maximum size limit. The `EXTEND` method can add elements to the varray up to the maximum size limit. For details, see [Collection methods](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/11_collection_methods/#collection_methods). +- Unlike a nested table, a varray cannot be sparse. There are no gaps when assigning values to keys. +- An attempt to reference a varray element beyond its initialized or extended size but within the maximum size limit results in a `SUBSCRIPT_BEYOND_COUNT` exception. +- An attempt to reference a varray element beyond the maximum size limit or extend a varray beyond the maximum size limit results in a `SUBSCRIPT_OUTSIDE_LIMIT` exception. + +## Defining a varray type + +The `TYPE IS VARRAY` statement is used to define a varray type in the declaration section of an SPL program: + +```sql +TYPE IS { VARRAY | VARYING ARRAY }() + OF { | }; +``` + +Where: + +`varraytype` is an identifier assigned to the varray type. + +`datatype` is a scalar data type such as `VARCHAR2` or `NUMBER`. + +`maxsize` is the maximum number of elements permitted in varrays of that type. + +`objtype` is a previously defined object type. + +You can use the `CREATE TYPE` command to define a varray type that's available to all SPL programs in the database. To make use of the varray, you must declare a *variable* of that varray type. The following is the syntax for declaring a varray variable: + +```text + +``` + +Where: + +`varray` is an identifier assigned to the varray. + +`varraytype` is the identifier of a previously defined varray type. + +## Initializing a varray + +Initialize a varray using the varray type’s constructor: + +```text + ([ { | NULL } [, { | NULL } ] + [, ...] ]) +``` + +Where: + +`varraytype` is the identifier of the varray type’s constructor, which has the same name as the varray type. + +`expr1, expr2, …` are expressions that are type-compatible with the element type of the varray. If you specify `NULL`, the corresponding element is set to null. If the parameter list is empty, then an empty varray is returned, which means there are no elements in the varray. If the varray is defined from an object type, then `exprn` must return an object of that object type. The object can be the return value of a function or the return value of the object type’s constructor. The object can also be an element of another varray of the same varray type. + +If you apply a collection method other than `EXISTS` to an uninitialized varray, a `COLLECTION_IS_NULL` exception is thrown. For details, see [Collection methods](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/11_collection_methods/#collection_methods). + +The following is an example of a constructor for a varray: + +```sql +DECLARE + TYPE varray_typ IS VARRAY(2) OF CHAR(1); + v_varray varray_typ := varray_typ('A','B'); +``` + +## Referencing an element of the varray + +Reference an element of the varray using this syntax: + +```text +()[<.element> ] +``` + +Where: + +`varray` is the identifier of a previously declared varray. + +`n` is a positive integer. + +If the varray type of `varray` is defined from an object type, then `[.element ]` must reference an attribute in the object type from which the varray type is defined. Alternatively, you cam reference the entire object by omitting `[.element ].` + +This example shows a varray where it is known that there are four elements: + +```sql +DECLARE + TYPE dname_varray_typ IS VARRAY(4) OF VARCHAR2(14); + dname_varray dname_varray_typ; + CURSOR dept_cur IS SELECT dname FROM dept ORDER BY dname; + i INTEGER := 0; +BEGIN + dname_varray := dname_varray_typ(NULL, NULL, NULL, NULL); + FOR r_dept IN dept_cur LOOP + i := i + 1; + dname_varray(i) := r_dept.dname; + END LOOP; + DBMS_OUTPUT.PUT_LINE('DNAME'); + DBMS_OUTPUT.PUT_LINE('----------'); + FOR j IN 1..i LOOP + DBMS_OUTPUT.PUT_LINE(dname_varray(j)); + END LOOP; +END; +``` + +The following is the output from this example: + +```sql +__OUTPUT__ +DNAME +---------- +ACCOUNTING +OPERATIONS +RESEARCH +SALES +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/about_collections.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/about_collections.mdx new file mode 100644 index 00000000000..1071875345f --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/about_collections.mdx @@ -0,0 +1,43 @@ +--- +title: "About collection types" +--- + +The most commonly known type of collection is an array. In EDB Postgres Advanced Server, the supported collection types are: +- [Associative arrays](01_associative_arrays) (formerly called *index-by-tables* in Oracle) +- [Nested tables](02_nested_tables) +- [Varrays](03_varrays) + +## Defining the collection type + +To set up a collection: + +1. Define a collection of the desired type. You can do this in the declaration section of an SPL program, which results in a *local type* that you can access only in that program. For nested table and varray types, you can also do this using the `CREATE TYPE` command, which creates a persistent, *standalone type* that any SPL program in the database can reference. +2. Declare variables of the collection type. The collection associated with the declared variable is uninitialized at this point if no value assignment is made as part of the variable declaration. + +## Initializing a null collection + +- Uninitialized collections of nested tables and varrays are null. A *null collection* doesn't yet exist. Generally, a `COLLECTION_IS_NULL` exception is thrown if a collection method is invoked on a null collection. +- To initialize a null collection, you must either make it an empty collection or assign a non-null value to it. Generally, a null collection is initialized by using its *constructor*. + +## Adding elements to an associative array + +- Uninitialized collections of associative arrays exist but have no elements. An existing collection with no elements is called an *empty collection*. +- To add elements to an empty associative array, you can assign values to its keys. For nested tables and varrays, generally its constructor is used to assign initial values to the nested table or varray. For nested tables and varrays, you then use the `EXTEND` method to grow the collection beyond its initial size set by the constructor. + +## Limitations + +- Multilevel collections (that is, where the data item of a collection is another collection) aren't supported. + +- Columns of collection types aren't supported. + + For example, you can create an array `varchar2_t`, but you can't create a table using array `varchar2_t` as a column data type. + + ```sql + --Create an array + edb=# CREATE TYPE varchar2_t AS TABLE OF character varying; + CREATE TYPE + --Create a table using array as the column data type + edb=# CREATE TABLE t(a varchar2_t); + __OUTPUT__ + ERROR: column "a" has collection type varchar2_t, columns of collection types are not supported. + ``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/index.mdx new file mode 100644 index 00000000000..380f4a99aeb --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/10_collections/index.mdx @@ -0,0 +1,25 @@ +--- +title: "Working with collection types" +indexCards: simple +navigation: + - about_collections + - 01_associative_arrays + - 02_nested_tables + - 03_varrays +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.078.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.172.html" +redirects: + - /epas/latest/epas_compat_spl/10_collections/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A *collection* is a set of ordered data items with the same data type. Generally, the data item is a scalar field. It can also be a user-defined type such as a record type or an object type. In this case, the structure and the data types that make up each field of the user-defined type must be the same for each element in the set. Reference each data item in the set by using subscript notation inside a pair of parentheses. + +
+ +associative_arrays nested_tables varrays + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/01_table.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/01_table.mdx new file mode 100644 index 00000000000..82557b489cd --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/01_table.mdx @@ -0,0 +1,32 @@ +--- +title: "Using the TABLE function" +redirects: + - /epas/latest/epas_compat_spl/12_working_with_collections/01_table/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Use the `TABLE()` function to transform the members of an array into a set of rows. The signature is: + +```sql +TABLE() +``` + +Where `collection_value` is an expression that evaluates to a value of collection type. + +The `TABLE()` function expands the nested contents of a collection into a table format. You can use the `TABLE()` function anywhere you use a regular table expression. + +The `TABLE()` function returns a `SETOF ANYELEMENT`, which is a set of values of any type. For example, if the argument passed to this function is an array of `dates`, `TABLE()` returns a `SETOF dates`. If the argument passed to this function is an array of `paths`, `TABLE()` returns a `SETOF paths`. + +You can use the `TABLE()` function to expand the contents of a collection into table form: + +```sql +postgres=# SELECT * FROM TABLE(monthly_balance(445.00, 980.20, 552.00)); +__OUTPUT__ + monthly_balance +---------------- + 445.00 + 980.20 + 552.00 +(3 rows) +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/02_using_the_multiset_union_operator.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/02_using_the_multiset_union_operator.mdx new file mode 100644 index 00000000000..3b0c5b4aa7c --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/02_using_the_multiset_union_operator.mdx @@ -0,0 +1,264 @@ +--- +title: "Using the MULTISET operators" +redirects: + - /epas/latest/epas_compat_spl/12_working_with_collections/02_using_the_multiset_union_operator/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `MULTISET` operators combine two collections to form a third collection. + +## Syntax + +```sql + MULTISET [ UNION | INTERSECT | EXCEPT ] [ ALL | DISTINCT | UNIQUE ] +``` + +Where `coll_1` and `coll_2` specify the names of the collections to combine. + +Include the `ALL` keyword to specify to represent duplicate elements (elements that are present in both `coll_1` and `coll_2`) in the result, once for each time they're present in the original collections. This is the default behavior. + +Include the `DISTINCT` or `UNIQUE` keyword to include duplicate elements in the result only once. There is no difference between the `DISTINCT` and `UNIQUE` keywords. + +There are three forms of MULTISET operators: + +- MULTISET UNION +- MULTISET INTERSECT +- MULTISET EXCEPT + +## MULTISET UNION + +`MULTISET UNION` takes as arguments two nested tables and returns a nested table whose values from both the input nested tables. The two input nested tables must be of the same type, and the returned nested table is of the same type as well. + +This example uses the `MULTISET UNION` operator to combine `collection_1` and `collection_2` into a third collection, `collection_3`: + +```sql +DECLARE + TYPE int_arr_typ IS TABLE OF NUMBER(2); + collection_1 int_arr_typ; + collection_2 int_arr_typ; + collection_3 int_arr_typ; + v_results VARCHAR2(50); +BEGIN + collection_1 := int_arr_typ(10,20,30); + collection_2 := int_arr_typ(30,40); + collection_3 := collection_1 MULTISET UNION ALL collection_2; + DBMS_OUTPUT.PUT_LINE('COUNT: ' || collection_3.COUNT); + FOR i IN collection_3.FIRST .. collection_3.LAST LOOP + IF collection_3(i) IS NULL THEN + v_results := v_results || 'NULL '; + ELSE + v_results := v_results || collection_3(i) || ' '; + END IF; + END LOOP; + DBMS_OUTPUT.PUT_LINE('Results: ' || v_results); +END; + +COUNT: 5 +Results: 10 20 30 30 40 +``` + +The resulting collection includes one entry for each element in `collection_1` and `collection_2`. If you use the `DISTINCT` keyword, the results are as follows: + +```sql +DECLARE + TYPE int_arr_typ IS TABLE OF NUMBER(2); + collection_1 int_arr_typ; + collection_2 int_arr_typ; + collection_3 int_arr_typ; + v_results VARCHAR2(50); +BEGIN + collection_1 := int_arr_typ(10,20,30); + collection_2 := int_arr_typ(30,40); + collection_3 := collection_1 MULTISET UNION DISTINCT collection_2; + DBMS_OUTPUT.PUT_LINE('COUNT: ' || collection_3.COUNT); + FOR i IN collection_3.FIRST .. collection_3.LAST LOOP + IF collection_3(i) IS NULL THEN + v_results := v_results || 'NULL '; + ELSE + v_results := v_results || collection_3(i) || ' '; + END IF; + END LOOP; + DBMS_OUTPUT.PUT_LINE('Results: ' || v_results); +END; + +COUNT: 4 +Results: 10 20 30 40 +``` + +The resulting collection includes only those members with distinct values. + +In this example, the `MULTISET UNION DISTINCT` operator removes duplicate entries that are stored in the same collection: + +```sql +DECLARE + TYPE int_arr_typ IS TABLE OF NUMBER(2); + collection_1 int_arr_typ; + collection_2 int_arr_typ; + collection_3 int_arr_typ; + v_results VARCHAR2(50); +BEGIN + collection_1 := int_arr_typ(10,20,30,30); + collection_2 := int_arr_typ(40,50); + collection_3 := collection_1 MULTISET UNION DISTINCT collection_2; + DBMS_OUTPUT.PUT_LINE('COUNT: ' || collection_3.COUNT); + FOR i IN collection_3.FIRST .. collection_3.LAST LOOP + IF collection_3(i) IS NULL THEN + v_results := v_results || 'NULL '; + ELSE + v_results := v_results || collection_3(i) || ' '; + END IF; + END LOOP; + DBMS_OUTPUT.PUT_LINE('Results: ' || v_results); +END; + +COUNT: 5 +Results: 10 20 30 40 50 +``` + +### MULTISET INTERSECT + +`MULTISET INTERSECT` takes as arguments two nested tables and returns a nested table whose values are common in the two input nested tables. The two input nested tables must be of the same type, and the returned nested table is of the same type as well. + +This example uses the `MULTISET INTERSECT` operator to combine `color_name` and `fruit_name` into a third collection, `common_name`: + +```sql +DECLARE + TYPE name_typ IS TABLE OF VARCHAR(50); + color_name name_typ; + fruit_name name_typ; + common_name name_typ; +BEGIN + color_name := name_typ('Red', 'Green', 'Blue', 'Orange', 'Peach', 'Yellow', 'Peach'); + fruit_name := name_typ('Mango', 'Orange', 'Grapes', 'Banana', 'Peach', 'Peach'); + common_name := color_name MULTISET INTERSECT UNIQUE fruit_name; + FOR i IN common_name.FIRST .. common_name.LAST LOOP + DBMS_OUTPUT.PUT_LINE(common_name(i)); + END LOOP; +END; +__OUTPUT__ +Orange +Peach +``` + +This example shows the use of `MULTISET INTERSECT DISTINCT`: + +```sql +DECLARE + TYPE name_typ IS TABLE OF VARCHAR(50); + color_name name_typ; + fruit_name name_typ; + common_name name_typ; +BEGIN + color_name := name_typ('Red', 'Green', 'Blue', 'Orange', 'Peach', 'Yellow', 'Peach'); + fruit_name := name_typ('Mango', 'Orange', 'Grapes', 'Banana', 'Peach', 'Peach'); + common_name := color_name MULTISET INTERSECT DISTINCT fruit_name; + FOR i IN common_name.FIRST .. common_name.LAST LOOP + DBMS_OUTPUT.PUT_LINE(common_name(i)); + END LOOP; +END; +__OUTPUT__ +Orange +Peach +``` + +This example shows the use of `MULTISET INTERSECT ALL`: + +```sql +DECLARE + TYPE name_typ IS TABLE OF VARCHAR(50); + color_name name_typ; + fruit_name name_typ; + common_name name_typ; +BEGIN + color_name := name_typ('Red', 'Green', 'Blue', 'Orange', 'Peach', 'Yellow', 'Peach'); + fruit_name := name_typ('Mango', 'Orange', 'Grapes', 'Banana', 'Peach', 'Peach'); + common_name := color_name MULTISET INTERSECT ALL fruit_name; + FOR i IN common_name.FIRST .. common_name.LAST LOOP + DBMS_OUTPUT.PUT_LINE(common_name(i)); + END LOOP; +END; +__OUTPUT__ +Orange +Peach +Peach +``` + +### MULTISET EXCEPT + +`MULTISET EXCEPT` takes two nested tables as arguments and returns a nested table whose elements are in the first nested table but not in the second nested table. The two input nested tables must be of the same type, and the returned nested table is of the same type as well. + +This example shows the use of `MULTISET EXCEPT UNIQUE`: + +```sql +DECLARE + TYPE name_typ IS TABLE OF VARCHAR(50); + color_name name_typ; + fruit_name name_typ; + common_name name_typ; +BEGIN + color_name := name_typ('Red', 'Green', 'Blue', 'Blue', 'Orange', 'Peach', 'Yellow'); + fruit_name := name_typ('Mango', 'Orange', 'Grapes', 'Banana', 'Peach'); + common_name := color_name MULTISET EXCEPT UNIQUE fruit_name; + + FOR i IN common_name.FIRST .. common_name.LAST LOOP + DBMS_OUTPUT.PUT_LINE(common_name(i)); + END LOOP; +END; +__OUTPUT__ +Blue +Green +Red +Yellow +``` + +This example shows the use of `MULTISET EXCEPT DISTINCT`: + +```sql +DECLARE + TYPE name_typ IS TABLE OF VARCHAR(50); + color_name name_typ; + fruit_name name_typ; + common_name name_typ; +BEGIN + color_name := name_typ('Red', 'Green', 'Blue', 'Blue', 'Orange', 'Peach', 'Yellow'); + fruit_name := name_typ('Mango', 'Orange', 'Grapes', 'Banana', 'Peach'); + common_name := color_name MULTISET EXCEPT DISTINCT fruit_name; + + FOR i IN common_name.FIRST .. common_name.LAST LOOP + DBMS_OUTPUT.PUT_LINE(common_name(i)); + END LOOP; +END; +__OUTPUT__ +Blue +Green +Red +Yellow +``` + +This example shows the use of `MULTISET EXCEPT ALL`: + +```sql +DECLARE + TYPE name_typ IS TABLE OF VARCHAR(50); + color_name name_typ; + fruit_name name_typ; + common_name name_typ; +BEGIN + color_name := name_typ('Red', 'Green', 'Blue', 'Blue', 'Orange', 'Peach', 'Yellow'); + fruit_name := name_typ('Mango', 'Orange', 'Grapes', 'Banana', 'Peach'); + common_name := color_name MULTISET EXCEPT ALL fruit_name; + + FOR i IN common_name.FIRST .. common_name.LAST LOOP + DBMS_OUTPUT.PUT_LINE(common_name(i)); + END LOOP; +END; +__OUTPUT__ +Red +Green +Blue +Blue +Yellow +``` + + diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement.mdx new file mode 100644 index 00000000000..31e29adc603 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement.mdx @@ -0,0 +1,220 @@ +--- +title: "Using the FORALL statement" +redirects: + - /epas/latest/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can use collections to process DML commands more efficiently by passing all the values to be used for repetitive execution of a `INSERT`, `UPDATE`, `DELETE`, or `MERGE` command in one pass to the database server. The alternative is to reiteratively invoke the DML command with new values. Specify the DML command to process this way with the `FORALL` statement. In addition, provide one or more collections in the DML command where you want to substitute different values each time the command is executed. + +## Syntax + +```sql +FORALL IN .. + [SAVE EXCEPTIONS] { | | | }; +``` + +`index` is the position in the collection given in the `insert_stmt`, `update_stmt`, `delete_stmt`, or `merge_stmt` DML command that iterates from the integer value given as `lower_bound` up to and including `upper_bound.` + +Optionally, `SAVE EXCEPTIONS` allows a FORALL statement to continue even if any of the DML statements fail. When a DML statement fails, SPL doesn't raise an exception, it saves information about the failure. After the FORALL statement completes, SPL raises a single exception for the FORALL statement. The error information is stored in the collection of records called `SQL%BULK_EXCEPTIONS`, where: + + - `SQL%BULK_EXCEPTIONS(i).ERROR_INDEX` records the nth number of the DML statement that failed. + - `SQL%BULK_EXCEPTIONS(i).ERROR_CODE` records the database error code for the failure. + - `SQL%BULK_EXCEPTIONS.COUNT` records the total number of the DML statements that failed. + + +## How it works + +If an exception occurs during any iteration of the `FORALL` statement, all updates that occurred since the start of the execution of the `FORALL` statement are rolled back. + +!!! Note + This behavior isn't compatible with Oracle databases. Oracle allows explicit use of the `COMMIT` or `ROLLBACK` commands to control whether to commit or roll back updates that occurred prior to the exception. + +The `FORALL` statement creates a loop. Each iteration of the loop increments the `index` variable. You typically use the `index` in the loop to select a member of a collection. Control the number of iterations with the `lower_bound .. upper_bound` clause. The loop executes once for each integer between the `lower_bound` and `upper_bound` (inclusive), and the index increments by one for each iteration. + +For example: + +`FORALL i IN 2 .. 5` + +This expression creates a loop that executes four times. In the first iteration, `index (i)` is set to the value `2`. In the second iteration, the index is set to the value `3`, and so on. The loop executes for the value `5` and then terminates. + +## Using FORALL with CREATE + +This example creates a table `emp_copy` that's an empty copy of the `emp` table. The example declares a type `emp_tbl` that's an array. Each element in the array is of composite type, composed of the column definitions used to create the table `emp`. The example also creates an index on the `emp_tbl` type. + +`t_emp` is an associative array of type `emp_tbl`. The `SELECT` statement uses the `BULK COLLECT INTO` command to populate the `t_emp` array. After the `t_emp` array is populated, the `FORALL` statement iterates through the values `(i)` in the `t_emp` array index and inserts a row for each record into `emp_copy`. + +```sql +CREATE TABLE emp_copy(LIKE emp); + +DECLARE + + TYPE emp_tbl IS TABLE OF emp%ROWTYPE INDEX BY BINARY_INTEGER; + + t_emp emp_tbl; + +BEGIN + SELECT * FROM emp BULK COLLECT INTO t_emp; + + FORALL i IN t_emp.FIRST .. t_emp.LAST + INSERT INTO emp_copy VALUES t_emp(i); + +END; +``` + +## Using FORALL with UPDATE + +This example uses a `FORALL` statement to update the salary of three employees: + +```sql +DECLARE + TYPE empno_tbl IS TABLE OF emp.empno%TYPE INDEX BY BINARY_INTEGER; + TYPE sal_tbl IS TABLE OF emp.ename%TYPE INDEX BY BINARY_INTEGER; + t_empno EMPNO_TBL; + t_sal SAL_TBL; +BEGIN + t_empno(1) := 9001; + t_sal(1) := 3350.00; + t_empno(2) := 9002; + t_sal(2) := 2000.00; + t_empno(3) := 9003; + t_sal(3) := 4100.00; + FORALL i IN t_empno.FIRST..t_empno.LAST + UPDATE emp SET sal = t_sal(i) WHERE empno = t_empno(i); +END; + +SELECT * FROM emp WHERE empno > 9000; +__OUTPUT__ + empno | ename | job | mgr | hiredate | sal | comm | deptno +-------+--------+---------+-----+----------+---------+------+-------- + 9001 | JONES | ANALYST | | | 3350.00 | | 40 + 9002 | LARSEN | CLERK | | | 2000.00 | | 40 + 9003 | WILSON | MANAGER | | | 4100.00 | | 40 +(3 rows) +``` + +## Using FORALL with DELETE + +This example deletes three employees in a `FORALL` statement: + +```sql +DECLARE + TYPE empno_tbl IS TABLE OF emp.empno%TYPE INDEX BY BINARY_INTEGER; + t_empno EMPNO_TBL; +BEGIN + t_empno(1) := 9001; + t_empno(2) := 9002; + t_empno(3) := 9003; + FORALL i IN t_empno.FIRST..t_empno.LAST + DELETE FROM emp WHERE empno = t_empno(i); +END; + +SELECT * FROM emp WHERE empno > 9000; +__OUTPUT__ + empno | ename | job | mgr | hiredate | sal | comm | deptno +-------+-------+-----+-----+----------+-----+------+-------- +(0 rows) +``` + +## Using FORALL with MERGE + +This example merges (inserts and updates) the records of test_table2 using `FORALL` statement: + +```sql +CREATE TABLE test_table1(a int, b int); +CREATE TABLE test_table2(a int, b int); + +INSERT INTO test_table1 SELECT i, i+1 from generate_series(1, 10) as i; +INSERT INTO test_table2(a) SELECT * from generate_series(1, 5); + +SELECT * from test_table2; +__OUTPUT__ +a | b +---+--- + 1 | + 2 | + 3 | + 4 | + 5 | +(5 rows) +``` + +```sql +DECLARE + TYPE type1 IS TABLE OF test_table1.a%TYPE INDEX BY BINARY_INTEGER; + TYPE type2 IS TABLE OF test_table1.b%TYPE INDEX BY BINARY_INTEGER; + rec1 type1; + rec2 type2; + BEGIN + SELECT * BULK COLLECT INTO rec1, rec2 from test_table1; + + FORALL i in rec1.FIRST..rec1.LAST + MERGE INTO test_table2 tgt USING + (SELECT rec1(i) a, rec2(i) b from dual) src ON (tgt.a = src.a) + WHEN MATCHED THEN + UPDATE SET tgt.b = src.b + WHEN NOT MATCHED THEN + INSERT (a, b) VALUES (src.a, src.b); + END; + +SELECT * from test_table2; +__OUTPUT__ +a | b +----+---- + 1 | 2 + 2 | 3 + 3 | 4 + 4 | 5 + 5 | 6 + 6 | 7 + 7 | 8 + 8 | 9 + 9 | 10 + 10 | 11 +(10 rows) +``` + +## Using FORALL with SAVE EXCEPTIONS + +This example shows how to use the `SAVE EXCEPTIONS` clause with the `FORALL` statement: + +```sql +CREATE TABLE foo(id NUMBER(6) not null, name VARCHAR2(20)); +INSERT INTO foo values(1, 'Peter'); +``` + +```sql +DECLARE + TYPE namelist_t IS TABLE OF VARCHAR2 (5000); + names_with_errors namelist_t := namelist_t (RPAD ('ABCD', 1000, 'ABC'),'George',RPAD ('ABCD', 3000, 'ABC'),'Max'); + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -24381); + +BEGIN + FORALL indx IN 1 .. names_with_errors.COUNT SAVE EXCEPTIONS + UPDATE foo SET name = names_with_errors (indx); + +EXCEPTION + WHEN ex_dml_errors THEN + -- Handling exceptions + FOR i IN 1..SQL%BULK_EXCEPTIONS.COUNT LOOP + DBMS_OUTPUT.PUT_LINE('SAVE EXCEPTIONS: The Error at ' || SQL%BULK_EXCEPTIONS(i).ERROR_INDEX || + ' Error Code ' || SQL%BULK_EXCEPTIONS(i).ERROR_CODE); + END LOOP; +END; + +__OUTPUT__ +SAVE EXCEPTIONS: The Error at 1 Error Code -6502 +SAVE EXCEPTIONS: The Error at 3 Error Code -6502 + +EDB-SPL Procedure successfully completed +``` + +```sql +edb@1924=#select * from foo; + id | name +----+------ + 1 | Max +(1 row) +``` \ No newline at end of file diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/01_select_bulk_collect.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/01_select_bulk_collect.mdx new file mode 100644 index 00000000000..efabad8c21b --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/01_select_bulk_collect.mdx @@ -0,0 +1,112 @@ +--- +title: "SELECT BULK COLLECT" +redirects: + - /epas/latest/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/01_select_bulk_collect/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The following shows the syntax for the `BULK COLLECT` clause with the `SELECT INTO` statement. For details on the `SELECT INTO` statement, see [SELECT INTO](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/06_select_into/#select_into). + +```sql +SELECT BULK COLLECT INTO + [, ...] FROM ...; +``` + +If you specify a single collection, then `collection` can be a collection of a single field, or it can be a collection of a record type. If you specify more than one collection, then each `collection` must consist of a single field. `select_expressions` must match all fields in the target collections in number, order, and type-compatibility. + +This example uses the `BULK COLLECT` clause where the target collections are associative arrays consisting of a single field: + +```sql +DECLARE + TYPE empno_tbl IS TABLE OF emp.empno%TYPE INDEX BY BINARY_INTEGER; + TYPE ename_tbl IS TABLE OF emp.ename%TYPE INDEX BY BINARY_INTEGER; + TYPE job_tbl IS TABLE OF emp.job%TYPE INDEX BY BINARY_INTEGER; + TYPE hiredate_tbl IS TABLE OF emp.hiredate%TYPE INDEX BY BINARY_INTEGER; + TYPE sal_tbl IS TABLE OF emp.sal%TYPE INDEX BY BINARY_INTEGER; + TYPE comm_tbl IS TABLE OF emp.comm%TYPE INDEX BY BINARY_INTEGER; + TYPE deptno_tbl IS TABLE OF emp.deptno%TYPE INDEX BY BINARY_INTEGER; + t_empno EMPNO_TBL; + t_ename ENAME_TBL; + t_job JOB_TBL; + t_hiredate HIREDATE_TBL; + t_sal SAL_TBL; + t_comm COMM_TBL; + t_deptno DEPTNO_TBL; +BEGIN + SELECT empno, ename, job, hiredate, sal, comm, deptno BULK COLLECT + INTO t_empno, t_ename, t_job, t_hiredate, t_sal, t_comm, t_deptno + FROM emp; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME JOB HIREDATE ' || + 'SAL ' || 'COMM DEPTNO'); + DBMS_OUTPUT.PUT_LINE('----- ------- --------- --------- ' || + '-------- ' || '-------- ------'); + FOR i IN 1..t_empno.COUNT LOOP + DBMS_OUTPUT.PUT_LINE(t_empno(i) || ' ' || + RPAD(t_ename(i),8) || ' ' || + RPAD(t_job(i),10) || ' ' || + TO_CHAR(t_hiredate(i),'DD-MON-YY') || ' ' || + TO_CHAR(t_sal(i),'99,999.99') || ' ' || + TO_CHAR(NVL(t_comm(i),0),'99,999.99') || ' ' || + t_deptno(i)); + END LOOP; +END; +__OUTPUT__ +EMPNO ENAME JOB HIREDATE SAL COMM DEPTNO +----- ------- --------- --------- -------- -------- ------ +7369 SMITH CLERK 17-DEC-80 800.00 .00 20 +7499 ALLEN SALESMAN 20-FEB-81 1,600.00 300.00 30 +7521 WARD SALESMAN 22-FEB-81 1,250.00 500.00 30 +7566 JONES MANAGER 02-APR-81 2,975.00 .00 20 +7654 MARTIN SALESMAN 28-SEP-81 1,250.00 1,400.00 30 +7698 BLAKE MANAGER 01-MAY-81 2,850.00 .00 30 +7782 CLARK MANAGER 09-JUN-81 2,450.00 .00 10 +7788 SCOTT ANALYST 19-APR-87 3,000.00 .00 20 +7839 KING PRESIDENT 17-NOV-81 5,000.00 .00 10 +7844 TURNER SALESMAN 08-SEP-81 1,500.00 .00 30 +7876 ADAMS CLERK 23-MAY-87 1,100.00 .00 20 +7900 JAMES CLERK 03-DEC-81 950.00 .00 30 +7902 FORD ANALYST 03-DEC-81 3,000.00 .00 20 +7934 MILLER CLERK 23-JAN-82 1,300.00 .00 10 +``` + +This example produces the same result but uses an associative array on a record type defined with the `%ROWTYPE` attribute: + +```sql +DECLARE + TYPE emp_tbl IS TABLE OF emp%ROWTYPE INDEX BY BINARY_INTEGER; + t_emp EMP_TBL; +BEGIN + SELECT * BULK COLLECT INTO t_emp FROM emp; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME JOB HIREDATE ' || + 'SAL ' || 'COMM DEPTNO'); + DBMS_OUTPUT.PUT_LINE('----- ------- --------- --------- ' || + '-------- ' || '-------- ------'); + FOR i IN 1..t_emp.COUNT LOOP + DBMS_OUTPUT.PUT_LINE(t_emp(i).empno || ' ' || + RPAD(t_emp(i).ename,8) || ' ' || + RPAD(t_emp(i).job,10) || ' ' || + TO_CHAR(t_emp(i).hiredate,'DD-MON-YY') || ' ' || + TO_CHAR(t_emp(i).sal,'99,999.99') || ' ' || + TO_CHAR(NVL(t_emp(i).comm,0),'99,999.99') || ' ' || + t_emp(i).deptno); + END LOOP; +END; +__OUTPUT__ +EMPNO ENAME JOB HIREDATE SAL COMM DEPTNO +----- ------- --------- --------- -------- -------- ------ +7369 SMITH CLERK 17-DEC-80 800.00 .00 20 +7499 ALLEN SALESMAN 20-FEB-81 1,600.00 300.00 30 +7521 WARD SALESMAN 22-FEB-81 1,250.00 500.00 30 +7566 JONES MANAGER 02-APR-81 2,975.00 .00 20 +7654 MARTIN SALESMAN 28-SEP-81 1,250.00 1,400.00 30 +7698 BLAKE MANAGER 01-MAY-81 2,850.00 .00 30 +7782 CLARK MANAGER 09-JUN-81 2,450.00 .00 10 +7788 SCOTT ANALYST 19-APR-87 3,000.00 .00 20 +7839 KING PRESIDENT 17-NOV-81 5,000.00 .00 10 +7844 TURNER SALESMAN 08-SEP-81 1,500.00 .00 30 +7876 ADAMS CLERK 23-MAY-87 1,100.00 .00 20 +7900 JAMES CLERK 03-DEC-81 950.00 .00 30 +7902 FORD ANALYST 03-DEC-81 3,000.00 .00 20 +7934 MILLER CLERK 23-JAN-82 1,300.00 .00 10 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/02_fetch_bulk_collect.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/02_fetch_bulk_collect.mdx new file mode 100644 index 00000000000..4580a46afad --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/02_fetch_bulk_collect.mdx @@ -0,0 +1,110 @@ +--- +title: "FETCH BULK COLLECT" +redirects: + - /epas/latest/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/02_fetch_bulk_collect/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can use the `BULK COLLECT` clause with a `FETCH` statement. Instead of returning a single row at a time from the result set, the `FETCH BULK COLLECT` returns all rows at once from the result set into one or more specified collections (both scalar and composite types) unless restricted by the `LIMIT` clause: + +```sql +FETCH BULK COLLECT INTO [, ...] [ LIMIT ]; +``` + +For information on the `FETCH` statement, see [Fetching rows from a cursor](../../08_static_cursors/03_fetching_rows_from_a_cursor/#fetching_rows_from_a_cursor). + +If you specify a single collection, then `collection` can be a collection of a single field, or it can be a collection of a record type. If you specify more than one collection, then each `collection` must consist of a single field or a record type. The expressions in the `SELECT` list of the cursor identified by `name` must match all fields in the target collections in number, order, and type-compatibility. If you specify `LIMIT n`, the number of rows returned into the collection on each `FETCH` doesn't exceed `n`. + +This example uses the `FETCH BULK COLLECT` statement to retrieve rows into an associative array: + +```sql +DECLARE + TYPE emp_tbl IS TABLE OF emp%ROWTYPE INDEX BY BINARY_INTEGER; + t_emp EMP_TBL; + CURSOR emp_cur IS SELECT * FROM emp; +BEGIN + OPEN emp_cur; + FETCH emp_cur BULK COLLECT INTO t_emp; + CLOSE emp_cur; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME JOB HIREDATE ' || + 'SAL ' || 'COMM DEPTNO'); + DBMS_OUTPUT.PUT_LINE('----- ------- --------- --------- ' || + '-------- ' || '-------- ------'); + FOR i IN 1..t_emp.COUNT LOOP + DBMS_OUTPUT.PUT_LINE(t_emp(i).empno || ' ' || + RPAD(t_emp(i).ename,8) || ' ' || + RPAD(t_emp(i).job,10) || ' ' || + TO_CHAR(t_emp(i).hiredate,'DD-MON-YY') || ' ' || + TO_CHAR(t_emp(i).sal,'99,999.99') || ' ' || + TO_CHAR(NVL(t_emp(i).comm,0),'99,999.99') || ' ' || + t_emp(i).deptno); + END LOOP; +END; +__OUTPUT__ +EMPNO ENAME JOB HIREDATE SAL COMM DEPTNO +----- ------- --------- --------- -------- -------- ------ +7369 SMITH CLERK 17-DEC-80 800.00 .00 20 +7499 ALLEN SALESMAN 20-FEB-81 1,600.00 300.00 30 +7521 WARD SALESMAN 22-FEB-81 1,250.00 500.00 30 +7566 JONES MANAGER 02-APR-81 2,975.00 .00 20 +7654 MARTIN SALESMAN 28-SEP-81 1,250.00 1,400.00 30 +7698 BLAKE MANAGER 01-MAY-81 2,850.00 .00 30 +7782 CLARK MANAGER 09-JUN-81 2,450.00 .00 10 +7788 SCOTT ANALYST 19-APR-87 3,000.00 .00 20 +7839 KING PRESIDENT 17-NOV-81 5,000.00 .00 10 +7844 TURNER SALESMAN 08-SEP-81 1,500.00 .00 30 +7876 ADAMS CLERK 23-MAY-87 1,100.00 .00 20 +7900 JAMES CLERK 03-DEC-81 950.00 .00 30 +7902 FORD ANALYST 03-DEC-81 3,000.00 .00 20 +7934 MILLER CLERK 23-JAN-82 1,300.00 .00 10 +``` + +```sql +-- Create two object types of composite data types +CREATE TYPE db_type1 as OBJECT(a INT, b VARCHAR2(10)); +CREATE TYPE db_type2 as OBJECT(c VARCHAR2(10), d INT); + +-- Create a table using above object types +CREATE TABLE db_tab(x DB_TYPE1, s1 NUMBER, y DB_TYPE2, s2 TIMESTAMP); + +-- Insert the rows into the table +INSERT INTO db_tab values(DB_TYPE1(1, '10'), 1.1, DB_TYPE2('100',1000), '1-Jan-2021 12:30:11 PM'); +INSERT INTO db_tab values(DB_TYPE1(2, '20'), 2.2, DB_TYPE2('200',2000), '2-Feb-2022 08:40:52 AM'); +INSERT INTO db_tab values(DB_TYPE1(3, '30'), 3.3, DB_TYPE2('300',3000), '3-Jan-2023 04:20:33 PM'); +``` + +```sql +-- Use FETCH BULK COLLECT INTO clause for fetching both scalar and composite types +DECLARE + TYPE type1_tbl IS TABLE OF db_type1 INDEX BY BINARY_INTEGER; + TYPE s1_tbl IS TABLE OF number INDEX BY BINARY_INTEGER; + TYPE type2_tbl IS TABLE OF db_type2 INDEX BY BINARY_INTEGER; + TYPE s2_tbl IS TABLE OF timestamp INDEX BY BINARY_INTEGER; + x type1_tbl; + s1 s1_tbl; + y type2_tbl; + s2 s2_tbl; + CURSOR c1 is SELECT * FROM db_tab ORDER BY s1; +BEGIN + OPEN c1; + FETCH c1 BULK COLLECT INTO x, s1, y, s2; + FOR i IN 1..x.count LOOP + DBMS_OUTPUT.PUT_LINE(x(i)||' '||s1(i)||' '||y(i)||'"'||s2(i)||'"'); + END LOOP; + CLOSE c1; + + SELECT * BULK COLLECT INTO x, s1, y, s2 FROM db_tab ORDER BY s1; + FOR i IN 1..x.count LOOP + DBMS_OUTPUT.PUT_LINE(x(i)||' '||s1(i)||' '||y(i)||'"'||s2(i)||'"'); + END LOOP; +end; + +__OUTPUT__ +(1,10) 1.1 (100,1000) "Fri Jan 01 12:30:11 2021" +(2,20) 2.2 (200,2000) "Wed Feb 02 08:40:52 2022" +(3,30) 3.3 (300,3000) "Fri Mar 03 04:20:33 2023" +(1,10) 1.1 (100,1000) "Fri Jan 01 12:30:11 2021" +(2,20) 2.2 (200,2000) "Wed Feb 02 08:40:52 2022" +(3,30) 3.3 (300,3000) "Fri Mar 03 04:20:33 2023" +``` \ No newline at end of file diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/03_execute_immediate_bulk_collect.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/03_execute_immediate_bulk_collect.mdx new file mode 100644 index 00000000000..1c13762a6d4 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/03_execute_immediate_bulk_collect.mdx @@ -0,0 +1,32 @@ +--- +title: "EXECUTE IMMEDIATE BULK COLLECT" +redirects: + - /epas/latest/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/03_execute_immediate_bulk_collect/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can use the `BULK COLLECT` clause with an `EXECUTE IMMEDIATE` statement to specify a collection to receive the returned rows: + +```sql +EXECUTE IMMEDIATE ';' + BULK COLLECT INTO [,...] + [USING {[] } [, ...]}]; +``` + +Where: + +`collection` specifies the name of a collection. + +`bind_type` specifies the parameter mode of the `bind_argument`. + +- A `bind_type` of `IN` specifies that the `bind_argument` contains a value that's passed to the `sql_expression`. +- A `bind_type` of `OUT` specifies that the `bind_argument` receives a value from the `sql_expression`. +- A `bind_type` of `IN OUT` specifies that the `bind_argument` is passed to `sql_expression` and then stores the value returned by `sql_expression`. + +`bind_argument` specifies a parameter that contains a value that either: +- Is passed to the `sql_expression` (specified with a `bind_type` of `IN`) +- Receives a value from the `sql_expression` (specified with a `bind_type` of `OUT`) +- Does both (specified with a `bind_type` of `IN OUT`). Currently `bind_type` is ignored and `bind_argument` is treated as `IN OUT`. + +If you specify a single collection, then `collection` can be a collection of a single field or a collection of a record type. If you specify more than one collection, each `collection` must consist of a single field. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/04_returning_bulk_collect.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/04_returning_bulk_collect.mdx new file mode 100644 index 00000000000..c46fe5daafa --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/04_returning_bulk_collect.mdx @@ -0,0 +1,144 @@ +--- +title: "RETURNING BULK COLLECT" +redirects: + - /epas/latest/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/04_returning_bulk_collect/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +## Syntax + +You can add `BULK COLLECT` to the `RETURNING INTO` clause of a `DELETE`, `INSERT`, or `UPDATE` command: + +```sql +{ | | } + RETURNING { * | [, ] ...} + BULK COLLECT INTO [, ...]; +``` + +For information on the `RETURNING INTO` clause, see [Using the RETURNING INTO clause](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/05_using_the_returning_into_clause/#using_the_returning_into_clause). `insert`, `update`, and `delete` are the same as the `INSERT`, `UPDATE`, and `DELETE` commands described in [INSERT](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/03_insert/#insert), [UPDATE](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/), and [DELETE](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/). + +If you specify a single collection, then `collection` can be a collection of a single field, or it can be a collection of a record type. If you specify more than one collection, then each `collection` must consist of a single field. The expressions following the `RETURNING` keyword must match all fields in the target collections in number, order, and type-compatibility. Specifying `*` returns all columns in the affected table. + +!!! Note + The use of `*` is an EDB Postgres Advanced Server extension and isn't compatible with Oracle databases. + +The `clerkemp` table created by copying the `emp` table is used in the examples that follow. + +```sql +CREATE TABLE clerkemp AS SELECT * FROM emp WHERE job = 'CLERK'; + +SELECT * FROM clerkemp; +__OUTPUT__ + empno | ename | job | mgr | hiredate | sal | comm | deptno +-------+--------+-------+------+--------------------+---------+------+------- +- + 7369 | SMITH | CLERK | 7902 | 17-DEC-80 00:00:00 | 800.00 | | 20 + 7876 | ADAMS | CLERK | 7788 | 23-MAY-87 00:00:00 | 1100.00 | | 20 + 7900 | JAMES | CLERK | 7698 | 03-DEC-81 00:00:00 | 950.00 | | 30 + 7934 | MILLER | CLERK | 7782 | 23-JAN-82 00:00:00 | 1300.00 | | 10 +(4 rows) +``` + +## Examples + +This example increases all employee salaries by 1.5, stores the employees’ numbers, names, and new salaries in three associative arrays, and displays the contents of these arrays: + +```sql +DECLARE + TYPE empno_tbl IS TABLE OF emp.empno%TYPE INDEX BY BINARY_INTEGER; + TYPE ename_tbl IS TABLE OF emp.ename%TYPE INDEX BY BINARY_INTEGER; + TYPE sal_tbl IS TABLE OF emp.sal%TYPE INDEX BY BINARY_INTEGER; + t_empno EMPNO_TBL; + t_ename ENAME_TBL; + t_sal SAL_TBL; +BEGIN + UPDATE clerkemp SET sal = sal * 1.5 RETURNING empno, ename, sal + BULK COLLECT INTO t_empno, t_ename, t_sal; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME SAL '); + DBMS_OUTPUT.PUT_LINE('----- ------- -------- '); + FOR i IN 1..t_empno.COUNT LOOP + DBMS_OUTPUT.PUT_LINE(t_empno(i) || ' ' || RPAD(t_ename(i),8) || + ' ' || TO_CHAR(t_sal(i),'99,999.99')); + END LOOP; +END; +__OUTPUT__ +EMPNO ENAME SAL +----- ------- -------- +7369 SMITH 1,200.00 +7876 ADAMS 1,650.00 +7900 JAMES 1,425.00 +7934 MILLER 1,950.00 +``` + +This example uses a single collection defined with a record type to store the employees’ numbers, names, and new salaries: + +```sql +DECLARE + TYPE emp_rec IS RECORD ( + empno emp.empno%TYPE, + ename emp.ename%TYPE, + sal emp.sal%TYPE + ); + TYPE emp_tbl IS TABLE OF emp_rec INDEX BY BINARY_INTEGER; + t_emp EMP_TBL; +BEGIN + UPDATE clerkemp SET sal = sal * 1.5 RETURNING empno, ename, sal + BULK COLLECT INTO t_emp; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME SAL '); + DBMS_OUTPUT.PUT_LINE('----- ------- -------- '); + FOR i IN 1..t_emp.COUNT LOOP + DBMS_OUTPUT.PUT_LINE(t_emp(i).empno || ' ' || + RPAD(t_emp(i).ename,8) || ' ' || + TO_CHAR(t_emp(i).sal,'99,999.99')); + END LOOP; +END; +__OUTPUT__ +EMPNO ENAME SAL +----- ------- -------- +7369 SMITH 1,200.00 +7876 ADAMS 1,650.00 +7900 JAMES 1,425.00 +7934 MILLER 1,950.00 +``` + +This example deletes all rows from the `clerkemp` table and returns information on the deleted rows into an associative array. It then displays the array. + +```sql +DECLARE + TYPE emp_rec IS RECORD ( + empno emp.empno%TYPE, + ename emp.ename%TYPE, + job emp.job%TYPE, + hiredate emp.hiredate%TYPE, + sal emp.sal%TYPE, + comm emp.comm%TYPE, + deptno emp.deptno%TYPE + ); + TYPE emp_tbl IS TABLE OF emp_rec INDEX BY BINARY_INTEGER; + r_emp EMP_TBL; +BEGIN + DELETE FROM clerkemp RETURNING empno, ename, job, hiredate, sal, + comm, deptno BULK COLLECT INTO r_emp; + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME JOB HIREDATE ' || + 'SAL ' || 'COMM DEPTNO'); + DBMS_OUTPUT.PUT_LINE('----- ------- --------- --------- ' || + '-------- ' || '-------- ------'); + FOR i IN 1..r_emp.COUNT LOOP + DBMS_OUTPUT.PUT_LINE(r_emp(i).empno || ' ' || + RPAD(r_emp(i).ename,8) || ' ' || + RPAD(r_emp(i).job,10) || ' ' || + TO_CHAR(r_emp(i).hiredate,'DD-MON-YY') || ' ' || + TO_CHAR(r_emp(i).sal,'99,999.99') || ' ' || + TO_CHAR(NVL(r_emp(i).comm,0),'99,999.99') || ' ' || + r_emp(i).deptno); + END LOOP; +END; +__OUTPUT__ +EMPNO ENAME JOB HIREDATE SAL COMM DEPTNO +----- ------- --------- --------- -------- -------- ------ +7369 SMITH CLERK 17-DEC-80 1,200.00 .00 20 +7876 ADAMS CLERK 23-MAY-87 1,650.00 .00 20 +7900 JAMES CLERK 03-DEC-81 1,425.00 .00 30 +7934 MILLER CLERK 23-JAN-82 1,950.00 .00 10 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/index.mdx new file mode 100644 index 00000000000..5b522e9e5ce --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Using the BULK COLLECT clause" +indexCards: simple +redirects: + - /epas/latest/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +SQL commands that return a result set consisting of a large number of rows might not operate efficiently. This is due to the constant context switching to transfer the entire result set that occurs between the database server and the client. + +You can mitigate this inefficiency by using a collection to gather the entire result set in memory, which the client can then access. You use the `BULK COLLECT` clause to specify the aggregation of the result set into a collection. + +You can use the `BULK COLLECT` clause with the `SELECT INTO`, `FETCH INTO`, and `EXECUTE IMMEDIATE` commands. You can also use it with the `RETURNING INTO` clause of the `DELETE`, `INSERT`, and `UPDATE` commands. + +
+ +select_bulk_collect fetch_bulk_collect execute_immediate_bulk_collect returning_bulk_collect + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/05_errors_and_messages.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/05_errors_and_messages.mdx new file mode 100644 index 00000000000..79753fba523 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/05_errors_and_messages.mdx @@ -0,0 +1,83 @@ +--- +title: "Errors and messages" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.081.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.175.html" +redirects: + - /epas/latest/epas_compat_spl/12_working_with_collections/05_errors_and_messages/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +## Reporting messages + +Use the `DBMS_OUTPUT.PUT_LINE` statement to report messages: + +```sql +DBMS_OUTPUT.PUT_LINE ( ); +``` + +Where `message` is any expression evaluating to a string. + +This example displays the message on the user’s output display: + +```sql +DBMS_OUTPUT.PUT_LINE('My name is John'); +``` + +The special variables `SQLCODE` and `SQLERRM` contain a numeric code and a text message, respectively, that describe the outcome of the last SQL command issued. If any other error occurs in the program such as division by zero, these variables contain information pertaining to the error. + + +## SQLCODE and SQLERRM functions + +SQLCODE and SQLERRM functions are now available in EDB Postgres Advanced Server. + +In an exception handler, the `SQLCODE` function returns the numeric code of the exception being handled. Outside an exception handler, `SQLCODE` returns `0`. + +The `SQLERRM` function, returns the error messaage associated with an `SQLCODE` variable value. If the error code value is passed to the `SQLERRM` function, it returns an error message associated with the passed error code value, regardless of the current error raised. + +A SQL statement can't invoke `SQLCODE` and `SQLERRM` functions. + +Examples: + +```sql +declare +l_var number; +begin +l_var:=-1476; +dbms_output.put_line(sqlerrm(l_var::int)); +l_var:=0; +dbms_output.put_line(sqlerrm(l_var::int)); +l_var:=12; +dbms_output.put_line(sqlerrm(l_var::int)); +l_var:=01403; +dbms_output.put_line(sqlerrm(l_var::int)); + +end; +__OUTPUT__ +division_by_zero +normal, successful completion +message 12 not found +message 1403 not found +``` + + +```sql +DECLARE +Balance integer := 24; +BEGIN +IF (Balance <= 100) THEN +Raise_Application_Error (-20343, 'The balance is too low.'); +END IF; +exception + when others then + dbms_output.put_line('sqlcode ==>'|| sqlcode); + dbms_output.put_line('sqlerrm ==>'|| sqlerrm); + dbms_output.put_line('sqlerrm(sqlcode) ==>'|| sqlerrm(sqlcode)); +END; +__OUTPUT__ +sqlcode ==>-20343 +sqlerrm ==>EDB-20343: The balance is too low. +sqlerrm(sqlcode) ==>EDB-20343: The balance is too low. +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/index.mdx new file mode 100644 index 00000000000..f51279460a1 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/12_working_with_collections/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Working with collections" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.080.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.174.html" +redirects: + - /epas/latest/epas_compat_spl/12_working_with_collections/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Collection operators allow you to transform, query, and manipulate the contents of a collection. + +
+ +table using_the_multiset_union_operator using_the_forall_statement using_the_bulk_collect_clause errors_and_messages + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/01_overview.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/01_overview.mdx new file mode 100644 index 00000000000..8ae539a4132 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/01_overview.mdx @@ -0,0 +1,17 @@ +--- +title: "Trigger overview" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.083.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.177.html" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/01_overview/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A *trigger* is a named SPL code block that's associated with a table and stored in the database. When a specified event occurs on the associated table, the SPL code block executes. The trigger is said to be *fired* when the code block executes. + +The event that causes a trigger to fire can be any combination of an insert, update, or deletion carried out on the table, either directly or indirectly. If the table is the object of a SQL `INSERT`, `UPDATE`, `DELETE`, or `TRUNCATE` command, the trigger is directly fired when the corresponding insert, update, delete, or truncate event is defined as a *triggering event*. The events that fire the trigger are defined in the `CREATE TRIGGER` command. + +A trigger can fire indirectly if a triggering event occurs on the table as a result of an event initiated on another table. For example, suppose a trigger is defined on a table containing a foreign key defined with the `ON DELETE CASCADE` clause, and a row in the parent table is deleted. In this case, all children of the parent are deleted as well. If deletion is a triggering event on the child table, deleting the children causes the trigger to fire. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/02_types_of_triggers.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/02_types_of_triggers.mdx new file mode 100644 index 00000000000..347856c8faa --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/02_types_of_triggers.mdx @@ -0,0 +1,25 @@ +--- +title: "Types of triggers" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.084.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.178.html" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/02_types_of_triggers/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +EDB Postgres Advanced Server supports *row-level* and *statement-level* triggers. + +- A row-level trigger fires once for each row that's affected by a triggering event. For example, suppose deletion is defined as a triggering event on a table, and a single `DELETE` command is issued that deletes five rows from the table. In this case, the trigger fires five times, once for each row. + +- A statement-level trigger fires once per triggering statement, regardless of the number of rows affected by the triggering event. In the previous example of a single `DELETE` command deleting five rows, a statement-level trigger fires only once. + +You can define the sequence of actions regarding whether the trigger code block executes before or after the triggering statement for statement-level triggers. For row-level triggers, you can define whether the trigger code block executes before or after each row is affected by the triggering statement. + +- In a *before* row-level trigger, the trigger code block executes before the triggering action is carried out on each affected row. In a *before* statement-level trigger, the trigger code block executes before the action of the triggering statement is carried out. + +- In an *after* row-level trigger, the trigger code block executes after the triggering action is carried out on each affected row. In an *after* statement-level trigger, the trigger code block executes after the action of the triggering statement is carried out. + +In a *compound trigger*, you can define a statement-level and a row-level trigger in a single trigger and fire it at more than one timing point. For details, see [Compound triggers](06_compound_triggers/#compound_triggers). diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/03_creating_triggers.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/03_creating_triggers.mdx new file mode 100644 index 00000000000..5c072f9fe2e --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/03_creating_triggers.mdx @@ -0,0 +1,209 @@ +--- +title: "Creating triggers" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.085.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.179.html" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/03_creating_triggers/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `CREATE TRIGGER` command defines and names a trigger that's stored in the database. You can create a simple trigger or a compound trigger. + +## Creating a simple trigger + +`CREATE TRIGGER` — Define a simple trigger. + +```sql +CREATE [ OR REPLACE ] TRIGGER + { BEFORE | AFTER | INSTEAD OF } + { INSERT | UPDATE | DELETE | TRUNCATE } + [ OR { INSERT | UPDATE | DELETE | TRUNCATE } ] [, ...] + ON
+ [ REFERENCING { OLD AS | NEW AS } ...] + [ FOR EACH ROW ] + [ WHEN ] + [ DECLARE + [ PRAGMA AUTONOMOUS_TRANSACTION; ] + ; [, ...] ] + BEGIN + ; [, ...] + [ EXCEPTION + { WHEN [ OR ] [...] THEN + ; [, ...] } [, ...] + ] + END +``` + +## Creating a compound trigger + +`CREATE TRIGGER` — Define a compound trigger. + +```sql +CREATE [ OR REPLACE ] TRIGGER + FOR { INSERT | UPDATE | DELETE | TRUNCATE } + [ OR { INSERT | UPDATE | DELETE | TRUNCATE } ] [, ...] + ON
+ [ REFERENCING { OLD AS | NEW AS . } ...] + [ WHEN ] + COMPOUND TRIGGER + [ ; ] ... + [ ] ... + + END +``` + +Where `private_declaration` is an identifier of a private variable that can be accessed by any procedure or function. There can be zero, one, or more private variables. `private_declaration` can be any of the following: + +- Variable declaration +- Record declaration +- Collection declaration +- `REF CURSOR` and cursor variable declaration +- `TYPE` definitions for records, collections, and `REF CURSOR` +- Exception +- Object variable declaration + +Where `procedure_or_function_definition :=` + +`procedure_definition | function_definition` + +Where `procedure_definition :=` + +```sql +PROCEDURE proc_name[ argument_list ] + [ options_list ] + { IS | AS } + procedure_body + END [ proc_name ] ; +``` + +Where `procedure_body :=` + +```sql +[ ; ] [, ...] +BEGIN + ; [...] +[ EXCEPTION + { WHEN [OR ] [...]] THEN ; } + [...] +] +``` + +Where `function_definition :=` + +```sql +FUNCTION func_name [ argument_list ] + RETURN rettype [ DETERMINISTIC ] + [ options_list ] + { IS | AS } + function_body + END [ func_name ] ; +``` + +Where `function_body :=` + +```sql +[ ; ] [, ...] +BEGIN + ; [...] +[ EXCEPTION + { WHEN [ OR ] [...] THEN ; } + [...] +] +``` + +Where `compound_trigger_definition` is: + +```sql +{ compound_trigger_event } { IS | AS } + compound_trigger_body +END [ compound_trigger_event ] [ ... ] +``` + +Where `compound_trigger_event:=` + +```sql +[ BEFORE STATEMENT | BEFORE EACH ROW | AFTER EACH ROW | AFTER STATEMENT | INSTEAD OF EACH ROW ] +``` + +Where `compound_trigger_body:=` + +```sql +[ ; ] [, ...] +BEGIN + ; [...] +[ EXCEPTION + { WHEN [OR ] [...] THEN ; } + [...] +] +``` + +## Description + +`CREATE TRIGGER` defines a new trigger. `CREATE OR REPLACE TRIGGER` creates a new trigger or replaces an existing definition. + +If you're using the `CREATE TRIGGER` keywords to create a trigger, the name of the new trigger must not match any existing trigger defined on the same table. New triggers are created in the same schema as the table on which the triggering event is defined. + +If you're updating the definition of an existing trigger, use the `CREATE OR REPLACE TRIGGER` keywords. + +When you use syntax compatible with Oracle databases to create a trigger, the trigger runs as a `SECURITY DEFINER` function. + +## Parameters + +`name` + + The name of the trigger to create. + +`BEFORE | AFTER` + + Determines whether the trigger is fired before or after the triggering event. + +`INSTEAD OF` + + Trigger that modifies an updatable view. The trigger executes to update the underlying tables appropriately. The `INSTEAD OF` trigger executes for each row of the view that's updated or modified. + +`INSERT | UPDATE | DELETE | TRUNCATE` + + Defines the triggering event. + +`table` + + The name of the table or view on which the triggering event occurs. + +`condition` + + A Boolean expression that determines if the trigger actually executes. If `condition` evaluates to `TRUE`, the trigger fires. + +- If the simple trigger definition includes the `FOR EACH ROW` keywords, the `WHEN` clause can refer to columns of the old or new row values by writing `OLD.column_name` or `NEW.column_name` respectively. `INSERT` triggers can't refer to `OLD`, and `DELETE` triggers can't refer to `NEW`. + +- If the compound trigger definition includes a statement-level trigger having a `WHEN` clause, then the trigger executes without evaluating the expression in the `WHEN` clause. Similarly, if a compound trigger definition includes a row-level trigger having a `WHEN` clause, then the trigger executes if the expression evaluates to `TRUE`. + +- If the trigger includes the `INSTEAD OF` keywords, it can't include a `WHEN` clause. A `WHEN` clause can't contain subqueries. + +`REFERENCING { OLD AS old | NEW AS new } ...` + + `REFERENCING` clause to reference old rows and new rows but restricted in that `old` can be replaced only by an identifier named `old` or any equivalent that's saved in all lowercase. Examples include `REFERENCING OLD AS old`, `REFERENCING OLD AS OLD`, or `REFERENCING OLD AS "old"`. Also, `new` can be replaced only by an identifier named `new` or any equivalent that's saved in all lowercase. Examples include `REFERENCING NEW AS new`, `REFERENCING NEW AS NEW`, or `REFERENCING NEW AS "new"`. + + You can specify one or both phrases `OLD AS old` and `NEW AS new` in the `REFERENCING` clause, such as `REFERENCING NEW AS New OLD AS Old`. This clause isn't compatible with Oracle databases in that you can't use identifiers other than `old` or `new`. + +`FOR EACH ROW` + + Determines whether to fire the trigger once for every row affected by the triggering event or once per SQL statement. If specified, the trigger is fired once for every affected row (row-level trigger). Otherwise the trigger is a statement-level trigger. + +`PRAGMA AUTONOMOUS_TRANSACTION` + + `PRAGMA AUTONOMOUS_TRANSACTION` is the directive that sets the trigger as an autonomous transaction. + +`declaration` + + A variable, type, `REF CURSOR`, or subprogram declaration. If subprogram declarations are included, you must declare them after all other variable, type, and `REF CURSOR` declarations. + +`statement` + + An SPL program statement. A `DECLARE - BEGIN - END` block is considered an SPL statement. Thus, the trigger body can contain nested blocks. + +`exception` + + An exception condition name such as `NO_DATA_FOUND`. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/04_trigger_variables.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/04_trigger_variables.mdx new file mode 100644 index 00000000000..160b7221415 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/04_trigger_variables.mdx @@ -0,0 +1,51 @@ +--- +title: "Trigger variables" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.086.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.180.html" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/04_trigger_variables/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +In the trigger code block, several special variables are available for use. + +## NEW + + `NEW` is a pseudo-record name that refers to the new table row for insert and update operations in row-level triggers. This variable doesn't apply to statement-level triggers and delete operations of row-level triggers. + + Its usage is: + + ```text + :NEW.column + ``` + + Where `column` is the name of a column in the table where the trigger is defined. + + The initial content of `:NEW.column` is the value in the named column of the new row to insert. Or, when used in a before row-level trigger, it's the value of the new row that replaces the old one. When used in an after row-level trigger, this value is already stored in the table since the action already occurred on the affected row. + + In the trigger code block, you can use `:NEW.column` like any other variable. If a value is assigned to `:NEW.column` in the code block of a before row-level trigger, the assigned value is used in the new inserted or updated row. + +## OLD + + `OLD` is a pseudo-record name that refers to the old table row for update and delete operations in row-level triggers. This variable doesn't apply in statement-level triggers and in insert operations of row-level triggers. + + Its usage is: `:OLD.column`, where `column` is the name of a column in the table on which the trigger is defined. + + The initial content of `:OLD.column` is the value in the named column of the row to delete or of the old row to replace with the new one when used in a before row-level trigger. When used in an after row-level trigger, this value is no longer stored in the table since the action already occurred on the affected row. + + In the trigger code block, you can use `:OLD.column` like any other variable. Assigning a value to `:OLD.column` has no effect on the action of the trigger. + +## INSERTING + + `INSERTING` is a conditional expression that returns `TRUE` if an insert operation fired the trigger. Otherwise it returns `FALSE`. + +## UPDATING + + `UPDATING` is a conditional expression that returns `TRUE` if an update operation fired the trigger. Otherwise it returns `FALSE`. + +## DELETING + + `DELETING` is a conditional expression that returns `TRUE` if a delete operation fired the trigger. Otherwise it returns `FALSE`. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/05_transactions_and_exceptions.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/05_transactions_and_exceptions.mdx new file mode 100644 index 00000000000..6b128768968 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/05_transactions_and_exceptions.mdx @@ -0,0 +1,17 @@ +--- +title: "Transactions and exceptions" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.087.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.181.html" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/05_transactions_and_exceptions/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A trigger is always executed as part of the same transaction in which the triggering statement is executing. When no exceptions occur in the trigger code block, the effects of any triggering commands in the trigger are committed only if the transaction containing the triggering statement is committed. Therefore, if the transaction is rolled back, the effects of any triggering commands in the trigger are also rolled back. + +If an exception does occur in the trigger code block, but it is caught and handled in an exception section, the effects of any triggering commands in the trigger are still rolled back. The triggering statement, however, is rolled back only if the application forces a rollback of the containing transaction. + +If an unhandled exception occurs in the trigger code block, the transaction that contains the trigger is aborted and rolled back. Therefore, the effects of any triggering commands in the trigger and the triggering statement are all rolled back. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/06_compound_triggers.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/06_compound_triggers.mdx new file mode 100644 index 00000000000..e7cf9d6c4d2 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/06_compound_triggers.mdx @@ -0,0 +1,74 @@ +--- +title: "Compound triggers" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/06_compound_triggers/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +EDB Postgres Advanced Server has compatible syntax to support compound triggers. + +## Compound trigger overview + +A compound trigger combines all the triggering timings under one trigger body that you can invoke at one or more *timing points*. A timing point is a point in time related to a triggering statement, which is an `INSERT`, `UPDATE`, `DELETE`, or `TRUNCATE` statement that modifies data. The supported timing points are: + +- `BEFORE STATEMENT` — Before the triggering statement executes. +- `BEFORE EACH ROW` — Before each row that the triggering statement affects. +- `AFTER EACH ROW` — After each row that the triggering statement affects. +- `AFTER STATEMENT` — After the triggering statement executes. +- `INSTEAD OF EACH ROW` — Trigger fires once for every row affected by the triggering statement. + +A compound trigger can include any combination of timing points defined in a single trigger. + +The optional declaration section in a compound trigger allows you to declare trigger-level variables and subprograms. The content of the declaration is accessible to all timing points referenced by the trigger definition. The variables and subprograms created by the declaration persist only for the duration of the triggering statement. + +## Syntax + +A compound trigger contains a declaration followed by a PL block for each timing point: + +```sql +CREATE OR REPLACE TRIGGER compound_trigger_name +FOR INSERT OR UPDATE OR DELETE ON table_name +COMPOUND TRIGGER + -- Global Declaration Section (optional) + -- Variables declared here can be used inside any timing-point blocks. + + BEFORE STATEMENT IS + BEGIN + NULL; + END BEFORE STATEMENT; + + BEFORE EACH ROW IS + BEGIN + NULL; + END BEFORE EACH ROW; + + AFTER EACH ROW IS + BEGIN + NULL; + END AFTER EACH ROW; + + + AFTER STATEMENT IS + BEGIN + NULL; + END AFTER STATEMENT; +END compound_trigger_name; +/ +Trigger created. +``` + +!!! Note + You don't have to have all the four timing blocks. You can create a compound trigger for any of the required timing points. + +## Restrictions + +A compound trigger has the following restrictions: + +- A compound trigger body is made up of a compound trigger block. +- You can define a compound trigger on a table or a view. +- You can't transfer exceptions to another timing-point section. They must be handled separately in that section only by each compound trigger block. +- If a `GOTO` statement is specified in a timing-point section, then the target of the `GOTO` statement must also be specified in the same timing-point section. +- `:OLD` and `:NEW` variable identifiers can't exist in the declarative section, the `BEFORE STATEMENT` section, or the `AFTER STATEMENT` section. +- `:NEW` values are modified only by the `BEFORE EACH ROW` block. +- The sequence of compound trigger timing-point execution is specific. However, if a simple trigger is in the same timing point, then the simple trigger is fired first, followed by the compound triggers. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/01_before_statement_level_trigger.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/01_before_statement_level_trigger.mdx new file mode 100644 index 00000000000..6ae46c32315 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/01_before_statement_level_trigger.mdx @@ -0,0 +1,35 @@ +--- +title: "Before statement-level trigger" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/07_trigger_examples/01_before_statement_level_trigger/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +This example shows a simple before statement-level trigger that displays a message before an insert operation on the `emp` table: + +```sql +CREATE OR REPLACE TRIGGER emp_alert_trig + BEFORE INSERT ON emp +BEGIN + DBMS_OUTPUT.PUT_LINE('New employees are about to be added'); +END; +``` + +The following `INSERT` is constructed so that several new rows are inserted upon a single execution of the command. For each row that has an employee id between 7900 and 7999, a new row is inserted with an employee id incremented by 1000. The following are the results of executing the command when three new rows are inserted: + +```sql +INSERT INTO emp (empno, ename, deptno) SELECT empno + 1000, ename, 40 + FROM emp WHERE empno BETWEEN 7900 AND 7999; +New employees are about to be added + +SELECT empno, ename, deptno FROM emp WHERE empno BETWEEN 8900 AND 8999; +__OUTPUT__ + EMPNO ENAME DEPTNO +---------- ---------- ---------- + 8900 JAMES 40 + 8902 FORD 40 + 8934 MILLER 40 +``` + +The message `New employees are about to be added` is displayed once by the firing of the trigger even though the result adds three rows. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/02_after_statement_level_trigger.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/02_after_statement_level_trigger.mdx new file mode 100644 index 00000000000..57275266c9c --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/02_after_statement_level_trigger.mdx @@ -0,0 +1,60 @@ +--- +title: "After statement-level trigger" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/07_trigger_examples/02_after_statement_level_trigger/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +This example shows an after statement-level trigger. When an insert, update, or delete operation occurs on the `emp` table, a row is added to the `empauditlog` table recording the date, user, and action. + +```sql +CREATE TABLE empauditlog ( + audit_date DATE, + audit_user VARCHAR2(20), + audit_desc VARCHAR2(20) +); +CREATE OR REPLACE TRIGGER emp_audit_trig + AFTER INSERT OR UPDATE OR DELETE ON emp +DECLARE + v_action VARCHAR2(20); +BEGIN + IF INSERTING THEN + v_action := 'Added employee(s)'; + ELSIF UPDATING THEN + v_action := 'Updated employee(s)'; + ELSIF DELETING THEN + v_action := 'Deleted employee(s)'; + END IF; + INSERT INTO empauditlog VALUES (SYSDATE, USER, + v_action); +END; +``` + +In the following sequence of commands, two rows are inserted into the `emp` table using two `INSERT` commands. One `UPDATE` command updates the `sal` and `comm` columns of both rows. Then, one `DELETE` command deletes both rows. + +```sql +INSERT INTO emp VALUES (9001,'SMITH','ANALYST',7782,SYSDATE,NULL,NULL,10); + +INSERT INTO emp VALUES (9002,'JONES','CLERK',7782,SYSDATE,NULL,NULL,10); + +UPDATE emp SET sal = 4000.00, comm = 1200.00 WHERE empno IN (9001, 9002); + +DELETE FROM emp WHERE empno IN (9001, 9002); + +SELECT TO_CHAR(AUDIT_DATE,'DD-MON-YY HH24:MI:SS') AS "AUDIT DATE", + audit_user, audit_desc FROM empauditlog ORDER BY 1 ASC; +__OUTPUT__ +AUDIT DATE AUDIT_USER AUDIT_DESC +------------------ -------------------- -------------------- +31-MAR-05 14:59:48 SYSTEM Added employee(s) +31-MAR-05 15:00:07 SYSTEM Added employee(s) +31-MAR-05 15:00:19 SYSTEM Updated employee(s) +31-MAR-05 15:00:34 SYSTEM Deleted employee(s) +``` + +The contents of the `empauditlog` table show how many times the trigger was fired: + +- Once each for the two inserts +- Once for the update (even though two rows were changed) +- Once for the deletion (even though two rows were deleted) diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/03_before_row_level_trigger.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/03_before_row_level_trigger.mdx new file mode 100644 index 00000000000..346e3634e0c --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/03_before_row_level_trigger.mdx @@ -0,0 +1,35 @@ +--- +title: "Before row-level trigger" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/07_trigger_examples/03_before_row_level_trigger/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +This example shows a before row-level trigger that calculates the commission of every new employee belonging to department 30 that's inserted into the `emp` table: + +```sql +CREATE OR REPLACE TRIGGER emp_comm_trig + BEFORE INSERT ON emp + FOR EACH ROW +BEGIN + IF :NEW.deptno = 30 THEN + :NEW.comm := :NEW.sal * .4; + END IF; +END; +``` + +The listing following the addition of the two employees shows that the trigger computed their commissions and inserted it as part of the new employee rows: + +```sql +INSERT INTO emp VALUES (9005,'ROBERS','SALESMAN',7782,SYSDATE,3000.00,NULL,30); + +INSERT INTO emp VALUES (9006,'ALLEN','SALESMAN',7782,SYSDATE,4500.00,NULL,30); + +SELECT * FROM emp WHERE empno IN (9005, 9006); +__OUTPUT__ + EMPNO ENAME JOB MGR HIREDATE SAL COMM DEPTNO +------ ------ -------- ------ ---------- --------- ---------- ------------- + 9005 ROBERS SALESMAN 7782 01-APR-05 3000 1200 30 + 9006 ALLEN SALESMAN 7782 01-APR-05 4500 1800 30 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/04_after_row_level_trigger.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/04_after_row_level_trigger.mdx new file mode 100644 index 00000000000..43ac9b3eba4 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/04_after_row_level_trigger.mdx @@ -0,0 +1,114 @@ +--- +title: "After row-level trigger" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/07_trigger_examples/04_after_row_level_trigger/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +This example shows an after row-level trigger. When a new employee row is inserted, the trigger adds a row to the `jobhist` table for that employee. When an existing employee is updated, the trigger sets the `enddate` column of the latest `jobhist` row (assumed to be the one with a null `enddate`) to the current date and inserts a new `jobhist` row with the employee’s new information. + +Then, the trigger adds a row to the `empchglog` table with a description of the action. + +```sql +CREATE TABLE empchglog ( + chg_date DATE, + chg_desc VARCHAR2(30) +); +CREATE OR REPLACE TRIGGER emp_chg_trig + AFTER INSERT OR UPDATE OR DELETE ON emp + FOR EACH ROW +DECLARE + v_empno emp.empno%TYPE; + v_deptno emp.deptno%TYPE; + v_dname dept.dname%TYPE; + v_action VARCHAR2(7); + v_chgdesc jobhist.chgdesc%TYPE; +BEGIN + IF INSERTING THEN + v_action := 'Added'; + v_empno := :NEW.empno; + v_deptno := :NEW.deptno; + INSERT INTO jobhist VALUES (:NEW.empno, SYSDATE, NULL, + :NEW.job, :NEW.sal, :NEW.comm, :NEW.deptno, 'New Hire'); + ELSIF UPDATING THEN + v_action := 'Updated'; + v_empno := :NEW.empno; + v_deptno := :NEW.deptno; + v_chgdesc := ''; + IF NVL(:OLD.ename, '-null-') != NVL(:NEW.ename, '-null-') THEN + v_chgdesc := v_chgdesc || 'name, '; + END IF; + IF NVL(:OLD.job, '-null-') != NVL(:NEW.job, '-null-') THEN + v_chgdesc := v_chgdesc || 'job, '; + END IF; + IF NVL(:OLD.sal, -1) != NVL(:NEW.sal, -1) THEN + v_chgdesc := v_chgdesc || 'salary, '; + END IF; + IF NVL(:OLD.comm, -1) != NVL(:NEW.comm, -1) THEN + v_chgdesc := v_chgdesc || 'commission, '; + END IF; + IF NVL(:OLD.deptno, -1) != NVL(:NEW.deptno, -1) THEN + v_chgdesc := v_chgdesc || 'department, '; + END IF; + v_chgdesc := 'Changed ' || RTRIM(v_chgdesc, ', '); + UPDATE jobhist SET enddate = SYSDATE WHERE empno = :OLD.empno + AND enddate IS NULL; + INSERT INTO jobhist VALUES (:NEW.empno, SYSDATE, NULL, + :NEW.job, :NEW.sal, :NEW.comm, :NEW.deptno, v_chgdesc); + ELSIF DELETING THEN + v_action := 'Deleted'; + v_empno := :OLD.empno; + v_deptno := :OLD.deptno; + END IF; + + INSERT INTO empchglog VALUES (SYSDATE, + v_action || ' employee # ' || v_empno); +END; +``` + +In the first sequence of the following commands, two employees are added using two separate `INSERT` commands. Then both are updated using a single `UPDATE` command. The contents of the `jobhist` table show the action of the trigger for each affected row: two new-hire entries for the two new employees and two changed commission records for the updated commissions on the two employees. The `empchglog` table also shows the trigger was fired a total of four times, once for each action on the two rows. + +```sql +INSERT INTO emp VALUES (9003,'PETERS','ANALYST',7782,SYSDATE,5000.00,NULL,40); + +INSERT INTO emp VALUES (9004,'AIKENS','ANALYST',7782,SYSDATE,4500.00,NULL,40); + +UPDATE emp SET comm = sal * 1.1 WHERE empno IN (9003, 9004); + +SELECT * FROM jobhist WHERE empno IN (9003, 9004); +__OUTPUT__ + EMPNO STARTDATE ENDDATE JOB SAL COMM DEPTNO CHGDESC +---------- --------- --------- --------- ---------- ---------- ---------- ------------- + 9003 31-MAR-05 31-MAR-05 ANALYST 5000 40 New Hire + 9004 31-MAR-05 31-MAR-05 ANALYST 4500 40 New Hire + 9003 31-MAR-05 ANALYST 5000 5500 40 Changed commission + 9004 31-MAR-05 ANALYST 4500 4950 40 Changed commission +``` +```sql +SELECT * FROM empchglog; +__OUTPUT__ +CHG_DATE CHG_DESC +--------- ------------------------------ +31-MAR-05 Added employee # 9003 +31-MAR-05 Added employee # 9004 +31-MAR-05 Updated employee # 9003 +31-MAR-05 Updated employee # 9004 +``` + +Then, a single `DELETE` command deletes both employees. The `empchglog` table shows the trigger was fired twice, once for each deleted employee. + +```sql +DELETE FROM emp WHERE empno IN (9003, 9004); + +SELECT * FROM empchglog; +__OUTPUT__ +CHG_DATE CHG_DESC +--------- ------------------------------ +31-MAR-05 Added employee # 9003 +31-MAR-05 Added employee # 9004 +31-MAR-05 Updated employee # 9003 +31-MAR-05 Updated employee # 9004 +31-MAR-05 Deleted employee # 9003 +31-MAR-05 Deleted employee # 9004 +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/05_instead_of_trigger.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/05_instead_of_trigger.mdx new file mode 100644 index 00000000000..1df7dac19a3 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/05_instead_of_trigger.mdx @@ -0,0 +1,63 @@ +--- +title: "INSTEAD OF trigger" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/07_trigger_examples/05_instead_of_trigger/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +This example shows an `INSTEAD OF` trigger for inserting a new employee row into the `emp_vw` view. The `CREATE VIEW` statement creates the `emp_vw` view by joining the two tables. The trigger adds the corresponding new rows into the `emp` and `dept` tables, respectively, for a specific employee. + +```sql +CREATE VIEW emp_vw AS SELECT * FROM emp e JOIN dept d USING(deptno); +CREATE VIEW + +CREATE OR REPLACE TRIGGER empvw_instead_of_trig + INSTEAD OF INSERT ON emp_vw + FOR EACH ROW +DECLARE + v_empno emp.empno%TYPE; + v_ename emp.ename%TYPE; + v_deptno emp.deptno%TYPE; + v_dname dept.dname%TYPE; + v_loc dept.loc%TYPE; + v_action VARCHAR2(7); +BEGIN + v_empno := :NEW.empno; + v_ename := :New.ename; + v_deptno := :NEW.deptno; + v_dname := :NEW.dname; + v_loc := :NEW.loc; + INSERT INTO emp(empno, ename, deptno) VALUES(v_empno, v_ename, v_deptno); + INSERT INTO dept(deptno, dname, loc) VALUES(v_deptno, v_dname, v_loc); +END; +CREATE TRIGGER +``` + +Next, insert the values into the `emp_vw` view. The insert action inserts a new row and produces the following output: + +```sql +INSERT INTO emp_vw (empno, ename, deptno, dname, loc ) VALUES(1234, 'ASHTON', 50, 'IT', 'NEW JERSEY'); +__OUTPUT__ +INSERT 0 1 +``` + +```sql +SELECT empno, ename, deptno FROM emp WHERE deptno = 50; +__OUTPUT__ + empno | ename | deptno +-------+--------+-------- + 1234 | ASHTON | 50 +(1 row) +``` + +```sql +SELECT * FROM dept WHERE deptno = 50; +__OUTPUT__ + deptno | dname | loc +--------+-------+------------ + 50 | IT | NEW JERSEY +(1 row) +``` + +Similarly, if you specify an `UPDATE` or `DELETE` statement, the trigger performs the appropriate actions for `UPDATE` or `DELETE` events. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger.mdx new file mode 100644 index 00000000000..9f267d486f1 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger.mdx @@ -0,0 +1,260 @@ +--- +title: "Compound triggers" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +## Defining a compound trigger on a table + +This example shows a compound trigger that records a change to the employee salary by defining a compound trigger named `hr_trigger` on the `emp` table. + +1. Create a table named `emp`: + + ```sql + CREATE TABLE emp(EMPNO INT, ENAME TEXT, SAL INT, DEPTNO INT); + CREATE TABLE + ``` + +2. Create a compound trigger named `hr_trigger`. The trigger uses each of the four timing points to modify the salary with an `INSERT`, `UPDATE`, or `DELETE` statement. In the global declaration section, the initial salary is declared as `10,000`. + + ```sql + CREATE OR REPLACE TRIGGER hr_trigger + FOR INSERT OR UPDATE OR DELETE ON emp + COMPOUND TRIGGER + -- Global declaration. + var_sal NUMBER := 10000; + + BEFORE STATEMENT IS + BEGIN + var_sal := var_sal + 1000; + DBMS_OUTPUT.PUT_LINE('Before Statement: ' || var_sal); + END BEFORE STATEMENT; + + BEFORE EACH ROW IS + BEGIN + var_sal := var_sal + 1000; + DBMS_OUTPUT.PUT_LINE('Before Each Row: ' || var_sal); + END BEFORE EACH ROW; + + AFTER EACH ROW IS + BEGIN + var_sal := var_sal + 1000; + DBMS_OUTPUT.PUT_LINE('After Each Row: ' || var_sal); + END AFTER EACH ROW; + + AFTER STATEMENT IS + BEGIN + var_sal := var_sal + 1000; + DBMS_OUTPUT.PUT_LINE('After Statement: ' || var_sal); + END AFTER STATEMENT; + + END hr_trigger; + + Output: Trigger created. + ``` + +3. Insert the record into table `emp`: + + ```sql + INSERT INTO emp (EMPNO, ENAME, SAL, DEPTNO) VALUES(1111,'SMITH', 10000, 20); + ``` + + The `INSERT` statement produces the following output: + + ```sql + __OUTPUT__ + Before Statement: 11000 + Before each row: 12000 + After each row: 13000 + After statement: 14000 + INSERT 0 1 + ``` + +4. The `UPDATE` statement updates the employee salary record, setting the salary to `15000` for a specific employee number: + + ```sql + UPDATE emp SET SAL = 15000 where EMPNO = 1111; + ``` + + The `UPDATE` statement produces the following output: + + ```sql + Before Statement: 11000 + Before each row: 12000 + After each row: 13000 + After statement: 14000 + UPDATE 1 + + SELECT * FROM emp; + __OUTPUT__ + EMPNO | ENAME | SAL | DEPTNO + -------+-------+-------+-------- + 1111 | SMITH | 15000 | 20 + (1 row) + ``` + +### DELETE + +The `DELETE` statement deletes the employee salary record: + +```sql +DELETE from emp where EMPNO = 1111; +``` + +The `DELETE` statement produces the following output: + +```sql +Before Statement: 11000 +Before each row: 12000 +After each row: 13000 +After statement: 14000 +DELETE 1 + +SELECT * FROM emp; +__OUTPUT__ + EMPNO | ENAME | SAL | DEPTNO +-------+-------+-----+-------- +(0 rows) +``` +### TRUNCATE + +The `TRUNCATE` statement removes all the records from the `emp` table: + +```sql +CREATE OR REPLACE TRIGGER hr_trigger + FOR TRUNCATE ON emp + COMPOUND TRIGGER + -- Global declaration. + var_sal NUMBER := 10000; + BEFORE STATEMENT IS + BEGIN + var_sal := var_sal + 1000; + DBMS_OUTPUT.PUT_LINE('Before Statement: ' || var_sal); + END BEFORE STATEMENT; + + AFTER STATEMENT IS + BEGIN + var_sal := var_sal + 1000; + DBMS_OUTPUT.PUT_LINE('After Statement: ' || var_sal); + END AFTER STATEMENT; + +END hr_trigger; + +Output: Trigger created. +``` + +The `TRUNCATE` statement produces the following output: + +```sql +TRUNCATE emp; +__OUTPUT__ +Before Statement: 11000 +After statement: 12000 +TRUNCATE TABLE +``` + +!!! Note + You can use the `TRUNCATE` statement only at a `BEFORE STATEMENT` or `AFTER STATEMENT` timing point. + +## Creating a compound trigger on a table with a WHEN condition + +This example creates a compound trigger named `hr_trigger` on the `emp` table with a `WHEN` condition. The `WHEN` condition checks and prints the employee salary when an `INSERT`, `UPDATE`, or `DELETE` statement affects the `emp` table. The database evaluates the `WHEN` condition for a row-level trigger, and the trigger executes once per row if the `WHEN` condition evaluates to `TRUE`. The statement-level trigger executes regardless of the `WHEN` condition. + +```sql +CREATE OR REPLACE TRIGGER hr_trigger + FOR INSERT OR UPDATE OR DELETE ON emp + REFERENCING NEW AS new OLD AS old + WHEN (old.sal > 5000 OR new.sal < 8000) + COMPOUND TRIGGER + + BEFORE STATEMENT IS + BEGIN + DBMS_OUTPUT.PUT_LINE('Before Statement'); + END BEFORE STATEMENT; + + BEFORE EACH ROW IS + BEGIN + DBMS_OUTPUT.PUT_LINE('Before Each Row: ' || :OLD.sal ||' ' || :NEW.sal); + END BEFORE EACH ROW; + + AFTER EACH ROW IS + BEGIN + DBMS_OUTPUT.PUT_LINE('After Each Row: ' || :OLD.sal ||' ' || :NEW.sal); + END AFTER EACH ROW; + + AFTER STATEMENT IS + BEGIN + DBMS_OUTPUT.PUT_LINE('After Statement'); + END AFTER STATEMENT; + +END hr_trigger; +``` +### INSERT + +Insert the record into table `emp`: + +```sql +INSERT INTO emp(EMPNO, ENAME, SAL, DEPTNO) VALUES(1111, 'SMITH', 1600, 20); +``` + +The `INSERT` statement produces the following output: + +```sql +__OUTPUT__ +Before Statement +Before Each Row: 1600 +After Each Row: 1600 +After Statement +INSERT 0 1 +``` + +### UPDATE + +The `UPDATE` statement updates the employee salary record, setting the salary to `7500`: + +```sql +UPDATE emp SET SAL = 7500 where EMPNO = 1111; +``` + +The `UPDATE` statement produces the following output: + +```sql +Before Statement +Before Each Row: 1600 7500 +After Each Row: 1600 7500 +After Statement +UPDATE 1 + +SELECT * from emp; +__OUTPUT__ + empno | ename | sal | deptno +-------+-------+------+-------- + 1111 | SMITH | 7500 | 20 +(1 row) +``` + +### DELETE + +The `DELETE` statement deletes the employee salary record: + +```sql +DELETE from emp where EMPNO = 1111; +``` + +The `DELETE` statement produces the following output: + +```sql +Before Statement +Before Each Row: 7500 +After Each Row: 7500 +After Statement +DELETE 1 + +SELECT * from emp; +__OUTPUT__ + empno | ename | sal | deptno +-------+-------+-----+-------- +(0 rows) +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/index.mdx new file mode 100644 index 00000000000..6b6a1eed179 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Trigger examples" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.088.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.182.html" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/07_trigger_examples/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The examples that follow show each type of trigger. + +
+ +before_statement_level_trigger after_statement_level_trigger before_row_level_trigger after_row_level_trigger instead_of_trigger compound_trigger + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/index.mdx new file mode 100644 index 00000000000..e54cec7267b --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/13_triggers/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Working with triggers" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.082.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.176.html" +redirects: + - /epas/latest/epas_compat_spl/13_triggers/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +As with procedures and functions, you write triggers in the SPL language. + +
+ +overview types_of_triggers creating_triggers trigger_variables transactions_and_exceptions compound_triggers trigger_examples + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/14_packages.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/14_packages.mdx new file mode 100644 index 00000000000..96a2be851a3 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/14_packages.mdx @@ -0,0 +1,22 @@ +--- +title: "Working with packages" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.089.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.183.html" +redirects: + - /epas/latest/epas_compat_spl/14_packages/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +EDB Postgres Advanced Server provides a collection of packages that provide compatibility with Oracle packages. + +A *package* is a named collection of functions, procedures, variables, cursors, user-defined record types, and records that are referenced using a common qualifier, known as the package identifier. Packages have the following characteristics: + +- Packages provide a convenient means of organizing the functions and procedures that perform a related purpose. Permission to use the package functions and procedures depends on one privilege granted to the entire package. All of the package programs must be referenced with a common name. +- Certain functions, procedures, variables, types, and so on in the package can be declared as *public*. Public entities are visible and can be referenced by other programs that are given `EXECUTE` privilege on the package. For public functions and procedures, only their signatures are visible, that is, the program names, parameters, if any, and return types of functions. The SPL code of these functions and procedures isn't accessible to others, therefore applications that use a package depend only on the information available in the signature and not in the procedural logic itself. +- You can declare Other functions, procedures, variables, types, and so on in the package as *private*. Private entities can be referenced and used by function and procedures in the package but not by other external applications. Private entities are for use only by programs in the package. +- You can overload function and procedure names in a package. One or more functions or procedures can be defined with the same name but with different signatures. This ability lets you create identically named programs that perform the same job but on different types of input. + +For more information about the package support provided by EDB Postgres Advanced Server, see [Built-in packages](../../reference/oracle_compatibility_reference/epas_compat_bip_guide/). \ No newline at end of file diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/01_attributes.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/01_attributes.mdx new file mode 100644 index 00000000000..92cdc00cb46 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/01_attributes.mdx @@ -0,0 +1,15 @@ +--- +title: "Attributes" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/01_attributes/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Every object type must contain at least one attribute. The data type of an attribute can be any of the following: + +- A base data type such as `NUMBER` or `VARCHAR2` +- Another object type +- A globally defined collection type (created by the `CREATE TYPE` command) such as a nested table or varray + +An attribute gets its initial value, which can be null, when an object instance is first created. Each object instance has its own set of attribute values. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods.mdx new file mode 100644 index 00000000000..4e7ba1654be --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods.mdx @@ -0,0 +1,13 @@ +--- +title: "Methods" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Methods are SPL procedures or functions defined in an object type. Methods are categorized into three general types: + +- **Member methods** — Procedures or functions that operate in the context of an object instance. Member methods have access to and can change the attributes of the object instance on which they're operating. +- **Static methods** — Procedures or functions that operate independently of any particular object instance. Static methods don't have access to and can't change the attributes of an object instance. +- **Constructor methods** — Functions used to create an instance of an object type. A default constructor method is always provided when an object type is defined. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/03_overloading_methods.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/03_overloading_methods.mdx new file mode 100644 index 00000000000..24e3dadc253 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/03_overloading_methods.mdx @@ -0,0 +1,11 @@ +--- +title: "Overloading methods" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/03_overloading_methods/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +In an object type you can define two or more identically named methods (that is, a procedure or function) of the same type but with different signatures. Such methods are referred to as *overloaded* methods. + +A method’s signature consists of the number of formal parameters, the data types of its formal parameters, and their order. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/index.mdx new file mode 100644 index 00000000000..89c4ee511b3 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/index.mdx @@ -0,0 +1,23 @@ +--- +title: "Basic object concepts" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.091.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.213.html" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +An object type is a description or definition of some entity. This definition of an object type is characterized by two components: + +- Attributes — Fields that describe particular characteristics of an object instance. For a person object, examples are name, address, gender, date of birth, height, weight, eye color, and occupation. +- Methods — Programs that perform some type of function or operation on or are related to an object. For a person object, examples are calculating the person’s age, displaying the person’s attributes, and changing the values assigned to the person’s attributes. + +
+ +attributes methods overloading_methods + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/01_object_type_specification_syntax.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/01_object_type_specification_syntax.mdx new file mode 100644 index 00000000000..180f9fbf8a0 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/01_object_type_specification_syntax.mdx @@ -0,0 +1,127 @@ +--- +title: "Object type specification syntax" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/02_object_type_components/01_object_type_specification_syntax/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The following is the syntax of the object type specification: + +```sql +CREATE [ OR REPLACE ] TYPE + [ AUTHID { DEFINER | CURRENT_USER } ] + { IS | AS } OBJECT +( { { | | } } + [, ...] + [ ] [, ...] + [ ] [, ...] +) [ [ NOT ] { FINAL | INSTANTIABLE } ] ...; +``` + +Where `method_spec` is the following: + +```sql +[ [ NOT ] { FINAL | INSTANTIABLE } ] ... +[ OVERRIDING ] + +``` + +Where `subprogram_spec` is the following: + +```sql +{ MEMBER | STATIC } +{ PROCEDURE + [ ( [ SELF [ IN | IN OUT ] ] + [, [ IN | IN OUT | OUT ] + [ DEFAULT ] ] + [, [ IN | IN OUT | OUT ] + [ DEFAULT ] + ] ...) + ] +| + FUNCTION + [ ( [ SELF [ IN | IN OUT ] ] + [, [ IN | IN OUT | OUT ] + [ DEFAULT ] ] + [, [ IN | IN OUT | OUT ] + [ DEFAULT ] + ] ...) + ] + RETURN +} +``` + +Where `constructor` is the following: + +```sql +CONSTRUCTOR FUNCTION + [ ( [ [ IN | IN OUT | OUT ] + [ DEFAULT ] ] + [, [ IN | IN OUT | OUT ] + [ DEFAULT ] + ] ...) + ] +RETURN ; +``` + +!!! Note + - You can't use the `OR REPLACE` option to add, delete, or modify the attributes of an existing object type. Use the `DROP TYPE` command to first delete the existing object type. You can use the `OR REPLACE` option to add, delete, or modify the methods in an existing object type. + + - You can use the PostgreSQL form of the `ALTER TYPE ALTER ATTRIBUTE` command to change the data type of an attribute in an existing object type. However, the `ALTER TYPE` command can't add or delete attributes in the object type. + +`name` is an identifier (optionally schema-qualified) assigned to the object type. + +If you omit the `AUTHID` clause or specify `DEFINER`, the rights of the object type owner are used to determine access privileges to database objects. If you specify `CURRENT_USER`, the rights of the current user executing a method in the object determine access privileges. + +## Syntax + +`attribute` is an identifier assigned to an attribute of the object type. + +`datatype` is a base data type. + +`objtype` is a previously defined object type. + +`collecttype` is a previously defined collection type. + +Following the closing parenthesis of the `CREATE TYPE` definition, `[ NOT ] FINAL` specifies whether a subtype can be derived from this object type. `FINAL`, which is the default, means that no subtypes can be derived from this object type. Specify `NOT FINAL` if you want to allow subtypes to be defined under this object type. + +!!! Note + Even though the specification of `NOT FINAL` is accepted in the `CREATE TYPE` command, SPL doesn't currently support creating subtypes. + +Following the closing parenthesis of the `CREATE TYPE` definition, `[ NOT ] INSTANTIABLE` specifies whether an object instance of this object type can be created. `INSTANTIABLE`, which is the default, means that an instance of this object type can be created. Specify `NOT INSTANTIABLE` if this object type is to be used only as a parent “template” from which other specialized subtypes are defined. If `NOT INSTANTIABLE` is specified, then you must specify `NOT FINAL` as well. If any method in the object type contains the `NOT INSTANTIABLE` qualifier, then the object type must be defined with `NOT INSTANTIABLE` and `NOT FINAL`. + +!!! Note + Even though specifying `NOT INSTANTIABLE` is accepted in the `CREATE TYPE` command, SPL doesn't currently support creating subtypes. + +## method_spec + +`method_spec` denotes the specification of a member method or static method. + +Before defining a method, use `[ NOT ] FINAL` to specify whether the method can be overridden in a subtype. `NOT FINAL` is the default, meaning the method can be overridden in a subtype. + +Before defining a method, specify `OVERRIDING` if the method overrides an identically named method in a supertype. The overriding method must have the same number of identically named method parameters with the same data types and parameter modes, in the same order, and with the same return type (if the method is a function) as defined in the supertype. + +Before defining a method, use `[ NOT ] INSTANTIABLE` to specify whether the object type definition provides an implementation for the method. If you specify `INSTANTIABLE`, then the `CREATE TYPE BODY` command for the object type must specify the implementation of the method. If you specify `NOT INSTANTIABLE`, then the `CREATE TYPE BODY` command for the object type must not contain the implementation of the method. In this latter case, it is assumed a subtype contains the implementation of the method, overriding the method in this object type. If there are any `NOT INSTANTIABLE` methods in the object type, then the object type definition must specify `NOT INSTANTIABLE` and `NOT FINAL` following the closing parenthesis of the object type specification. The default is `INSTANTIABLE`. + +## subprogram_spec + +`subprogram_spec` denotes the specification of a procedure or function and begins with the specification of either `MEMBER` or `STATIC`. A member subprogram must be invoked with respect to a particular object instance while a static subprogram isn't invoked with respect to any object instance. + +`proc_name` is an identifier of a procedure. If you specify the `SELF` parameter, `name` is the object type name given in the `CREATE TYPE` command. If specified, `parm1, parm2, …` are the formal parameters of the procedure. `datatype1, datatype2, …` are the data types of `parm1, parm2, …` respectively. `IN`, `IN OUT`, and `OUT` are the possible parameter modes for each formal parameter. The default is `IN`. `value1, value2, …` are default values that you can specify for `IN` parameters. + +## CONSTRUCTOR + +Include the `CONSTRUCTOR FUNCTION` keyword and function definition to define a constructor function. + +`func_name` is an identifier of a function. If specified, `parm1, parm2, …` are the formal parameters of the function. `datatype1, datatype2, …` are the data types of `parm1, parm2, …` respectively. `IN`, `IN OUT`, and `OUT` are the possible parameter modes for each formal parameter. The default is `IN`. `value1, value2, …` are default values that you can specify for `IN` parameters. `return_type` is the data type of the value the function returns. + +Note the following about an object type specification: + +- There must be at least one attribute defined in the object type. + +- There can be zero, one, or more methods defined in the object type. + +- A static method can't be overridden. You can't specify `OVERRIDING` and `STATIC` together in `method_spec`. + +- A static method must be instantiable. You can't specify `NOT INSTANTIABLE` and `STATIC` together in `method_spec`. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/02_object_type_body_syntax.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/02_object_type_body_syntax.mdx new file mode 100644 index 00000000000..af5c54d3085 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/02_object_type_body_syntax.mdx @@ -0,0 +1,104 @@ +--- +title: "Object type body syntax" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/02_object_type_components/02_object_type_body_syntax/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The following is the syntax of the object type body: + +```sql +CREATE [ OR REPLACE ] TYPE BODY + { IS | AS } + [...] + [] [...] +END; +``` + +Where `method_spec` is `subprogram_spec`, and `subprogram_spec` is the following: + +```sql +{ MEMBER | STATIC } +{ PROCEDURE + [ ( [ SELF [ IN | IN OUT ] ] + [, [ IN | IN OUT | OUT ] + [ DEFAULT ] ] + [, [ IN | IN OUT | OUT ] + [ DEFAULT ] + ] ...) + ] +{ IS | AS } + [ PRAGMA AUTONOMOUS_TRANSACTION; ] + [ ] + BEGIN + ; ... +[ EXCEPTION + WHEN ... THEN + ; ...] + END; +| + FUNCTION + [ ( [ SELF [ IN | IN OUT ] ] + [, [ IN | IN OUT | OUT ] + [ DEFAULT ] ] + [, [ IN | IN OUT | OUT ] + [ DEFAULT ] + ] ...) + ] + RETURN +{ IS | AS } + [ PRAGMA AUTONOMOUS_TRANSACTION; ] + [ ] + BEGIN + ; ... +[ EXCEPTION + WHEN ... THEN + ; ...] + END; +``` + +Where `constructor` is: + +```sql +CONSTRUCTOR FUNCTION + [ ( [ [ IN | IN OUT | OUT ] + [ DEFAULT ] ] + [, [ IN | IN OUT | OUT ] + [ DEFAULT ] + ] ...) + ] +RETURN ; +{ IS | AS } +[ ] +BEGIN + ; ... +[ EXCEPTION + WHEN ... THEN + ; ...] +END; +``` + +Where: + +`name` is an identifier (optionally schema-qualified) assigned to the object type. + +`method_spec` denotes the implementation of an instantiable method that was specified in the `CREATE TYPE` command. + +If `INSTANTIABLE` was specified or omitted in `method_spec` of the `CREATE TYPE` command, then there must be a `method_spec` for this method in the `CREATE TYPE BODY` command. + +If `NOT INSTANTIABLE` was specified in `method_spec` of the `CREATE TYPE` command, then there must be no `method_spec` for this method in the `CREATE TYPE BODY` command. + +`subprogram_spec` denotes the specification of a procedure or function and begins with the specification of either `MEMBER` or `STATIC`. The same qualifier must be used as specified in `subprogram_spec` of the `CREATE TYPE` command. + +`proc_name` is an identifier of a procedure specified in the `CREATE TYPE` command. The parameter declarations have the same meaning as described for the `CREATE TYPE` command. They must be specified in the `CREATE TYPE BODY` command in the same manner as in the `CREATE TYPE` command. + +Include the `CONSTRUCTOR FUNCTION` keyword and function definition to define a constructor function. + +`func_name` is an identifier of a function specified in the `CREATE TYPE` command. The parameter declarations have the same meaning as described for the `CREATE TYPE` command and must be specified in the `CREATE TYPE BODY` command in the same manner as in the `CREATE TYPE` command. `return_type` is the data type of the value the function returns and must match the `return_type` given in the `CREATE TYPE` command. + +`PRAGMA AUTONOMOUS_TRANSACTION` is the directive that sets the procedure or function as an autonomous transaction. + +`declarations` are variable, cursor, type, or subprogram declarations. If subprogram declarations are included, they must be declared after all other variable, cursor, and type declarations. + +`statement` is an SPL program statement. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/index.mdx new file mode 100644 index 00000000000..cb64ccbfbe0 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/index.mdx @@ -0,0 +1,24 @@ +--- +title: "Object type components" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.092.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.214.html" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/02_object_type_components/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Object types are created and stored in the database by using the following two constructs of the SPL language: + +- The *object type specification*. This construct is the public interface specifying the attributes and method signatures of the object type. +- The *object type body*. This construct contains the implementation of the methods specified in the object type specification. + + +
+ +object_type_specification_syntax object_type_body_syntax + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/01_member_methods.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/01_member_methods.mdx new file mode 100644 index 00000000000..4b8d99c2520 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/01_member_methods.mdx @@ -0,0 +1,59 @@ +--- +title: "Member methods" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/01_member_methods/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A *member method* is a function or procedure that's defined in an object type and can be invoked only through an instance of that type. Member methods have access to, and can change the attributes of, the object instance on which they're operating. + +This object type specification creates the `emp_obj_typ` object type: + +```sql +CREATE OR REPLACE TYPE emp_obj_typ AS OBJECT +( + empno NUMBER(4), + ename VARCHAR2(20), + addr ADDR_OBJ_TYP, + MEMBER PROCEDURE display_emp(SELF IN OUT emp_obj_typ) +); +``` + +Object type `emp_obj_typ` contains a member method named `display_emp`. `display_emp` uses a `SELF` parameter, which passes the object instance on which the method is invoked. + +A `SELF` parameter is a parameter whose data type is that of the object type being defined. `SELF` always refers to the instance that's invoking the method. A `SELF` parameter is the first parameter in a member procedure or function regardless of whether it's explicitly declared in the parameter list. + +The following code defines an object type body for `emp_obj_typ`: + +```sql +CREATE OR REPLACE TYPE BODY emp_obj_typ AS + MEMBER PROCEDURE display_emp (SELF IN OUT emp_obj_typ) + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('Employee No : ' || empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || ename); + DBMS_OUTPUT.PUT_LINE('Street : ' || addr.street); + DBMS_OUTPUT.PUT_LINE('City/State/Zip: ' || addr.city || ', ' || + addr.state || ' ' || LPAD(addr.zip,5,'0')); + END; +END; +``` + +You can also use the `SELF` parameter in an object type body. Using the `SELF` parameter in the `CREATE TYPE BODY` command, you can write the same object type body as follows: + +```sql +CREATE OR REPLACE TYPE BODY emp_obj_typ AS + MEMBER PROCEDURE display_emp (SELF IN OUT emp_obj_typ) + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('Employee No : ' || SELF.empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || SELF.ename); + DBMS_OUTPUT.PUT_LINE('Street : ' || SELF.addr.street); + DBMS_OUTPUT.PUT_LINE('City/State/Zip: ' || SELF.addr.city || ', ' || + SELF.addr.state || ' ' || LPAD(SELF.addr.zip,5,'0')); + END; +END; +``` + +Both versions of the `emp_obj_typ` body are equivalent. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/02_static_methods.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/02_static_methods.mdx new file mode 100644 index 00000000000..da256e13d03 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/02_static_methods.mdx @@ -0,0 +1,59 @@ +--- +title: "Static methods" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/02_static_methods/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Like a member method, a static method belongs to a type. A static method, however, is invoked not by an instance of the type, but by using the name of the type. For example, to invoke a static function named `get_count`, defined in the `emp_obj_type type`, you can write: + +```sql +emp_obj_type.get_count(); +``` + +A static method doesn't have access to and can't change the attributes of an object instance. It doesn't typically work with an instance of the type. + +The following object type specification includes a static function `get_dname` and a member procedure `display_dept`: + +```sql +CREATE OR REPLACE TYPE dept_obj_typ AS OBJECT ( + deptno NUMBER(2), + STATIC FUNCTION get_dname(p_deptno IN NUMBER) RETURN VARCHAR2, + MEMBER PROCEDURE display_dept +); +``` + +The object type body for `dept_obj_typ` defines a static function named `get_dname` and a member procedure named `display_dept`: + +```sql +CREATE OR REPLACE TYPE BODY dept_obj_typ AS + STATIC FUNCTION get_dname(p_deptno IN NUMBER) RETURN VARCHAR2 + IS + v_dname VARCHAR2(14); + BEGIN + CASE p_deptno + WHEN 10 THEN v_dname := 'ACCOUNTING'; + WHEN 20 THEN v_dname := 'RESEARCH'; + WHEN 30 THEN v_dname := 'SALES'; + WHEN 40 THEN v_dname := 'OPERATIONS'; + ELSE v_dname := 'UNKNOWN'; + END CASE; + RETURN v_dname; + END; + + MEMBER PROCEDURE display_dept + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('Dept No : ' || SELF.deptno); + DBMS_OUTPUT.PUT_LINE('Dept Name : ' || + dept_obj_typ.get_dname(SELF.deptno)); + END; +END; +``` + +The static function `get_dname` can't reference `SELF`. Since a static function is invoked independently of any object instance, it has no implicit access to any object attribute. + +Member procedure `display_dept` can access the `deptno` attribute of the object instance passed in the `SELF` parameter. It isn't necessary to explicitly declare the `SELF` parameter in the `display_dept` parameter list. + +The last `DBMS_OUTPUT.PUT_LINE` statement in the `display_dept` procedure includes a call to the static function `get_dname`, qualified by its object type name `dept_obj_typ`. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods.mdx new file mode 100644 index 00000000000..3ce3180cc15 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods.mdx @@ -0,0 +1,87 @@ +--- +title: "Constructor methods" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A constructor method is a function that creates an instance of an object type, typically by assigning values to the members of the object. An object type can define several constructors to accomplish different tasks. A constructor method is a member function invoked with a `SELF` parameter whose name matches the name of the type. + +For example, if you define a type named `address`, each constructor is named `address`. You can overload a constructor by creating one or more different constructor functions with the same name but with different argument types. + +The SPL compiler provides a default constructor for each object type. The default constructor is a member function whose name matches the name of the type and whose argument list matches the type members in order. For example, given an object type such as: + +```sql +CREATE TYPE address AS OBJECT +( + street_address VARCHAR2(40), + postal_code VARCHAR2(10), + city VARCHAR2(40), + state VARCHAR2(2) +) +``` + +The SPL compiler provides a default constructor with the following signature: + +```sql +CONSTRUCTOR FUNCTION address +( + street_address VARCHAR2(40), + postal_code VARCHAR2(10), + city VARCHAR2(40), + state VARCHAR2(2) +) +``` + +The body of the default constructor sets each member to `NULL`. + +To create a custom constructor, using the keyword constructor, declare the constructor function in the `CREATE TYPE` command, and define the construction function in the `CREATE TYPE BODY` command. For example, you might want to create a custom constructor for the `address` type that computes the city and state given a `street_address` and `postal_code`: + +```sql +CREATE TYPE address AS OBJECT +( + street_address VARCHAR2(40), + postal_code VARCHAR2(10), + city VARCHAR2(40), + state VARCHAR2(2), + + CONSTRUCTOR FUNCTION address + ( + street_address VARCHAR2, + postal_code VARCHAR2 + ) RETURN self AS RESULT +) +CREATE TYPE BODY address AS + CONSTRUCTOR FUNCTION address + ( + street_address VARCHAR2, + postal_code VARCHAR2 + ) RETURN self AS RESULT + IS + BEGIN + self.street_address := street_address; + self.postal_code := postal_code; + self.city := postal_code_to_city(postal_code); + self.state := postal_code_to_state(postal_code); + RETURN; + END; +END; +``` + +To create an instance of an object type, you invoke one of the constructor methods for that type. For example: + +```sql +DECLARE + cust_addr address := address('100 Main Street', 02203'); +BEGIN + DBMS_OUTPUT.PUT_LINE(cust_addr.city); -- displays Boston + DBMS_OUTPUT.PUT_LINE(cust_addr.state); -- displays MA +END; +``` + +Custom constructor functions are: + +- Typically used to compute member values when given incomplete information. The example computes the values for `city` and `state` when given a postal code. + +- Also used to enforce business rules that restrict the state of an object. For example, if you define an object type to represent a `payment`, you can use a custom constructor to ensure that no object of type `payment` can be created with an `amount` that is `NULL`, negative, or zero. The default constructor sets `payment.amount` to `NULL`, so you must create a custom constructor whose signature matches the default constructor to prohibit `NULL` amounts. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/index.mdx new file mode 100644 index 00000000000..4ffd504f1b8 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/index.mdx @@ -0,0 +1,34 @@ +--- +title: "Creating object types" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.093.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.215.html" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can use the `CREATE TYPE` command to create an object type specification and the `CREATE TYPE BODY` command to create an object type body. The examples that follow use the `CREATE TYPE` and `CREATE TYPE BODY` commands. + +The first example creates the `addr_object_type` object type that contains only attributes and no methods: + +```sql +CREATE OR REPLACE TYPE addr_object_type AS OBJECT +( + street VARCHAR2(30), + city VARCHAR2(20), + state CHAR(2), + zip NUMBER(5) +); +``` + +Since there are no methods in this object type, an object type body isn't required. This example creates a composite type, which allows you to treat related objects as a single attribute. + +
+ +member_methods static_methods constructor_methods + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/04_creating_object_instances.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/04_creating_object_instances.mdx new file mode 100644 index 00000000000..048058fcc80 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/04_creating_object_instances.mdx @@ -0,0 +1,76 @@ +--- +title: "Creating object instances" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.094.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.216.html" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/04_creating_object_instances/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +## Creating an instance + +To create an instance of an object type, you must first declare a variable of the object type and then initialize the declared object variable. The syntax for declaring an object variable is: + +```text + +``` + +Where: + +`object` is an identifier assigned to the object variable. + +`obj_type` is the identifier of a previously defined object type. + +## Invoking a constructor method + +After declaring the object variable, you must invoke a *constructor method* to initialize the object with values. Use the following syntax to invoke the constructor method: + +```sql +[NEW] ({ | NULL} [, { | NULL} ] [, ...]) +``` + +Where: + +`obj_type` is the identifier of the object type’s constructor method. The constructor method has the same name as the previously declared object type. + +`expr1, expr2, …` are expressions that are type-compatible with the first attribute of the object type, the second attribute of the object type, and so on. If an attribute is of an object type, then the corresponding expression can be `NULL`, an object initialization expression, or any expression that returns that object type. + +This anonymous block declares and initializes a variable: + +```sql +DECLARE + v_emp EMP_OBJ_TYP; +BEGIN + v_emp := emp_obj_typ (9001,'JONES', + addr_obj_typ('123 MAIN STREET','EDISON','NJ',08817)); +END; +``` + +The variable `v_emp` is declared with a previously defined object type named `EMP_OBJ_TYPE`. The body of the block initializes the variable using the `emp_obj_typ` and `addr_obj_type` constructors. + +You can include the `NEW` keyword when creating a new instance of an object in the body of a block. The `NEW` keyword invokes the object constructor whose signature matches the arguments provided. + +## Example + +This example declares two variables named `mgr` and `emp`. The variables are both of `EMP_OBJ_TYPE`. The `mgr` object is initialized in the declaration, while the `emp` object is initialized to `NULL` in the declaration and assigned a value in the body. + +```sql +DECLARE + mgr EMP_OBJ_TYPE := (9002,'SMITH'); + emp EMP_OBJ_TYPE; +BEGIN + emp := NEW EMP_OBJ_TYPE (9003,'RAY'); +END; +``` + +!!! Note + In EDB Postgres Advanced Server, you can use the following alternate syntax in place of the constructor method. + +```sql +[ ROW ] ({ | NULL } [, { | NULL } ] [, ...]) +``` + +`ROW` is an optional keyword if two or more terms are specified in the parenthesis-enclosed, comma-delimited list. If you specify only one term, then you must specify the `ROW` keyword. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/05_referencing_an_object.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/05_referencing_an_object.mdx new file mode 100644 index 00000000000..964c3d945f7 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/05_referencing_an_object.mdx @@ -0,0 +1,137 @@ +--- +title: "Referencing an object" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.095.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.217.html" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/05_referencing_an_object/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +## Syntax + +After you create and initialize an object variable, you can reference individual attributes using dot notation of the form: + +```text +. +``` + +Where: + +`object` is the identifier assigned to the object variable. + +`attribute` is the identifier of an object type attribute. + +If `attribute` is of an object type, then the reference must take the form: + +```text +.. +``` + +Where `attribute_inner` is an identifier belonging to the object type to which `attribute` references in its definition of `object`. + +## Examples + +This example displays the values assigned to the `emp_obj_typ` object: + +```sql +DECLARE + v_emp EMP_OBJ_TYP; +BEGIN + v_emp := emp_obj_typ(9001,'JONES', + addr_obj_typ('123 MAIN STREET','EDISON','NJ',08817)); + DBMS_OUTPUT.PUT_LINE('Employee No : ' || v_emp.empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || v_emp.ename); + DBMS_OUTPUT.PUT_LINE('Street : ' || v_emp.addr.street); + DBMS_OUTPUT.PUT_LINE('City/State/Zip: ' || v_emp.addr.city || ', ' || + v_emp.addr.state || ' ' || LPAD(v_emp.addr.zip,5,'0')); +END; +``` + +The following is the output from this anonymous block: + +```sql +__OUTPUT__ +Employee No : 9001 +Name : JONES +Street : 123 MAIN STREET +City/State/Zip: EDISON, NJ 08817 +``` + +Methods are called in a similar manner as attributes. + +Once an object variable is created and initialized, member procedures or functions are called using dot notation of the form: + +```text +. +``` + +Where: + +`object` is the identifier assigned to the object variable. + +`prog_name` is the identifier of the procedure or function. + +Static procedures or functions aren't called using an object variable. Instead call the procedure or function using the object type name: + +```text +. +``` + +Where: + +`object_type` is the identifier assigned to the object type. + +`prog_name` is the identifier of the procedure or function. + +You can duplicate the results of the previous anonymous block by calling the member procedure `display_emp`: + +```sql +DECLARE + v_emp EMP_OBJ_TYP; +BEGIN + v_emp := emp_obj_typ(9001,'JONES', + addr_obj_typ('123 MAIN STREET','EDISON','NJ',08817)); + v_emp.display_emp; +END; +``` + +The following is the output from this anonymous block: + +```sql +__OUTPUT__ +Employee No : 9001 +Name : JONES +Street : 123 MAIN STREET +City/State/Zip: EDISON, NJ 08817 +``` + +This anonymous block creates an instance of `dept_obj_typ` and calls the member procedure `display_dept`: + +```sql +DECLARE + v_dept DEPT_OBJ_TYP := dept_obj_typ (20); +BEGIN + v_dept.display_dept; +END; +``` + +The following is the output from this anonymous block: + +```sql +__OUTPUT__ +Dept No : 20 +Dept Name : RESEARCH +``` + +You can call the static function defined in `dept_obj_typ` directly by qualifying it by the object type name as follows: + +```sql +BEGIN + DBMS_OUTPUT.PUT_LINE(dept_obj_typ.get_dname(20)); +END; + +RESEARCH +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/06_dropping_an_object_type.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/06_dropping_an_object_type.mdx new file mode 100644 index 00000000000..53c58d7bd9e --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/06_dropping_an_object_type.mdx @@ -0,0 +1,47 @@ +--- +title: "Dropping an object type" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.096.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.218.html" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/06_dropping_an_object_type/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + + +## Deleting an object type + +The syntax for deleting an object type is as follows: + +```sql +DROP TYPE ; +``` + +Where `objtype` is the identifier of the object type to drop. If the definition of `objtype` contains attributes that are themselves object types or collection types, you must drop these nested object types or collection types last. + +If an object type body is defined for the object type, the `DROP TYPE` command deletes the object-type body as well as the object-type specification. To re-create the complete object type, you must reissue both the `CREATE TYPE` and `CREATE TYPE BODY` commands. + +This example drops the `emp_obj_typ` and the `addr_obj_typ` object types. You must drop `emp_obj_typ` first since it contains `addr_obj_typ` in its definition as an attribute. + +```sql +DROP TYPE emp_obj_typ; +DROP TYPE addr_obj_typ; +``` + +## Dropping only the object type body + +The syntax for deleting an object type body but not the object type specification is: + +```sql +DROP TYPE BODY ; +``` + +You can re-create the object type body by issuing the `CREATE TYPE BODY` command. + +This example drops only the object type body of the `dept_obj_typ`: + +```sql +DROP TYPE BODY dept_obj_typ; +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/index.mdx new file mode 100644 index 00000000000..ba89c2a52cb --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/15_object_types_and_objects/index.mdx @@ -0,0 +1,29 @@ +--- +title: "Using object types and objects" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.090.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.212.html" +redirects: + - /epas/latest/epas_compat_spl/15_object_types_and_objects/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can use object-oriented programming techniques in SPL. Object-oriented programming as seen in programming languages such as Java and C++ centers on the concept of *objects*. An object represents a real-world entity such as a person, place, or thing. The generic description or definition of a particular object such as a person, for example, is called an *object type*. Specific people, such as “Joe” or “Sally”, are said to be *objects of object type* person. They're also known as *instances* of the object type person or, simply, person objects. + +You can create objects and object types in SPL. + +!!! Note + - The terms “database objects” and “objects” are different from the terms "object type" and "object" used in object-oriented programming. Database objects are the entities that can be created in a database, such as tables, views, indexes, and users. In the context of object-oriented program, object type and object refer to specific data structures supported by the SPL programming language to implement object-oriented concepts. + + - In Oracle, the term *abstract data type* (ADT) describes object types in PL/SQL. The SPL implementation of object types is intended to be compatible with Oracle abstract data types. + + - EDB Postgres Advanced Server hasn't yet implemented support for some features of object-oriented programming languages. + +
+ +basic_object_concepts object_type_components creating_object_types creating_object_instances referencing_an_object dropping_an_object_type + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/images/definer's_rights_package.png b/product_docs/docs/epas/17/application_programming/epas_compat_spl/images/definer's_rights_package.png new file mode 100755 index 00000000000..eff47bf9058 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/images/definer's_rights_package.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89d79f4e078a0a507127191caae821b82c85590b59f8de7dac54afc184ca0099 +size 19423 diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/images/definers_rights_package.png b/product_docs/docs/epas/17/application_programming/epas_compat_spl/images/definers_rights_package.png new file mode 100755 index 00000000000..049b2568c14 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/images/definers_rights_package.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93558bcf05875dcb2ed2e49dcb1e732ade405f27e808260369e8e17dba632bf9 +size 21636 diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/images/invokers_rights_programs.png b/product_docs/docs/epas/17/application_programming/epas_compat_spl/images/invokers_rights_programs.png new file mode 100755 index 00000000000..0447f5d9f7e --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/images/invokers_rights_programs.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd88c09a0907e24e609303655d3364f4f218982a14dccf3d419a6c9cf74611fc +size 22493 diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_spl/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_spl/index.mdx new file mode 100644 index 00000000000..6702df8b4d6 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_spl/index.mdx @@ -0,0 +1,21 @@ +--- +navTitle: Stored procedural language +title: "Using the stored procedural language" +indexCards: simple +description: "How to use SPL to create stored procedures, functions, triggers, and packages for the EDB Postgres Advanced Server database" +redirects: + - /epas/latest/epas_compat_spl/ #generated for docs/epas/reorg-role-use-case-mode +--- + +EDB Postgres Advanced Server's stored procedural language (SPL) is a highly productive, procedural programming language for writing custom procedures, functions, triggers, and packages for EDB Postgres Advanced Server. It provides: + +- Full procedural programming functionality to complement the SQL language +- A single, common language to create stored procedures, functions, triggers, and packages for the EDB Postgres Advanced Server database +- A seamless development and testing environment +- The use of reusable code +- Ease of use + +For reference information about the SPL program types, programming statements, control structures, collection types, and collection methods, see [Stored procedural language (SPL) reference](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/). + + + diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/01_oracle_compat_summary.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/01_oracle_compat_summary.mdx new file mode 100644 index 00000000000..9d857ff7329 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/01_oracle_compat_summary.mdx @@ -0,0 +1,14 @@ +--- +title: "Oracle table partitioning compatibility summary" +--- + +EDB Postgres Advanced Server supports aspects of table partitioning that are compatible with Oracle databases. + +!!! Note + The *declarative partitioning* feature, introduced with PostgreSQL version 10, is not covered here. However, PostgreSQL declarative partitioning is supported in EDB Postgres Advanced Server 10 in addition to the table partitioning compatible with Oracle databases described here. For information about declarative partitioning, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/ddl-partitioning.html). + +The PostgreSQL `INSERT... ON CONFLICT DO NOTHING/UPDATE` clause, commonly known as `UPSERT`, isn't supported on Oracle-styled partitioned tables. If you include the `ON CONFLICT DO NOTHING/UPDATE` clause when invoking the INSERT command to add data to a partitioned table, an error occurs. + +!!! Note + EDB Postgres Advanced Server doesn't support global indexes, so the index isn't inherited when you define a primary key on the partitioned table that doesn't include partition key columns. However, all partitions defined in `CREATE TABLE` have an independent primary index on the column. You can re-create the primary key on all newly added partitions by using `ALTER TABLE ... ADD CONSTRAINT`. This primary index enforces uniqueness in each partition but not across the entire partition hierarchy. In other words, you can have the same value repeated for the primary index column in two or more partitions. + diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/01_interval_range_partitioning.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/01_interval_range_partitioning.mdx new file mode 100644 index 00000000000..b95324f0067 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/01_interval_range_partitioning.mdx @@ -0,0 +1,31 @@ +--- +title: "Interval range partitioning" +redirects: + - /epas/latest/epas_compat_table_partitioning/02_selecting_a_partition_type/01_interval_range_partitioning/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Interval range partitioning is an extension to range partitioning that allows a database to create a partition when the inserted data exceeds the range of an existing partition. To implement interval range partitioning, include the `INTERVAL` clause, and specify the range size for a new partition. + +The high value of a range partition, also known as the transition point, is determined by the range partitioning key value. The database creates partitions for inserted data with values that are beyond that high value. + +## Interval range partitioning example + +Suppose an interval is set to one month. If data is inserted for two months after the current transition point, only the partition for the second month is created and not the intervening partition. For example, you can create an interval-range-partitioned table with a monthly interval and a current transition point of February 15, 2023. If you try to insert data for May 10, 2023, then the required partition for April 15 to May 15, 2023 is created and data is inserted into that partition. The partition for February 15, 2023 to March 15, 2023 and March 15, 2023 to April 15, 2023 is skipped. + +For information about interval range partitioning syntax, see [CREATE TABLE...PARTITION BY](../../../reference/oracle_compatibility_reference/04_partitioning_commands_compatible_with_oracle_databases/01_create_table_partition_by/). + +## Restrictions on interval range partitioning + +The following restrictions apply to the `INTERVAL` clause: + +- Interval range partitioning is restricted to a single partition key. That key must be a numerical or date range. +- You must define at least one range partition. +- The `INTERVAL` clause isn't supported for index-organized tables. +- You can't create a domain index on an interval-range-partitioned table. +- In composite partitioning, the interval range partitioning can be useful as a primary partitioning mechanism but isn't supported at the subpartition level. +- You can't define `DEFAULT` and `MAXVALUE` for an interval-range-partitioned table. +- You can't specify `NULL`, `Not-a-Number`, and `Infinity` values in the partitioning key column. +- Interval range partitioning expression must yield constant value and can't be a negative value. +- You must create the partitions for an interval-range-partitioned table in increasing order. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/02_automatic_list_partitioning.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/02_automatic_list_partitioning.mdx new file mode 100644 index 00000000000..e44e9e933ad --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/02_automatic_list_partitioning.mdx @@ -0,0 +1,21 @@ +--- +title: "Automatic list partitioning" +redirects: + - /epas/latest/epas_compat_table_partitioning/02_selecting_a_partition_type/02_automatic_list_partitioning/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Automatic list partitioning is an extension to `LIST` partitioning that allows a database to create a partition for any new distinct value of the list partitioning key. A new partition is created when data is inserted into the `LIST` partitioned table and the inserted value doesn't match any of the existing table partition. Use the `AUTOMATIC` clause to implement automatic list partitioning. + +For example, consider a table named `sales` with a `sales_state` column that contains the existing partition values `CALIFORNIA` and `FLORIDA`. Each of the `sales_state` values increases with a rise in the statewise sales. A sale in a new state, for example, `INDIANA` and `OHIO`, requires creating new partitions. If you implement automatic list partitioning, the new partitions `INDIANA` and `OHIO` are automatically created, and data is entered into the `sales` table. + +For information about automatic list partitioning syntax, see [CREATE TABLE...PARTITION BY](../../../reference/oracle_compatibility_reference/04_partitioning_commands_compatible_with_oracle_databases/01_create_table_partition_by/). + +## Restrictions for automatic list partitioning + +The following restrictions apply to the `AUTOMATIC` clause: + +- A table that enables automatic list partitioning can't have a `DEFAULT` partition. +- Automatic list partitioning doesn't support multi-column list partitioning. +- In composite partitioning, the automatic list partitioning can be useful as a primary partitioning mechanism but isn't supported at the subpartition level. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/index.mdx new file mode 100644 index 00000000000..5585239fda5 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/index.mdx @@ -0,0 +1,50 @@ +--- +title: "Selecting a partition type" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.101.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.326.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.051.html" +redirects: + - /epas/latest/epas_compat_table_partitioning/02_selecting_a_partition_type/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +When you create a partitioned table, you specify `LIST`, `RANGE`, or `HASH` partitioning rules. The partitioning rules provide a set of constraints that define the data that's stored in each partition. As you add rows to the partitioned table, the server uses the partitioning rules to decide which partition contains each row. + +EDB Postgres Advanced Server can also use the partitioning rules to enforce partition pruning, which improves performance when responding to user queries. When selecting a partitioning type and partitioning keys for a table, consider how the data that's stored in a table is queried, and include often-queried columns in the partitioning rules. + +## List partitioning + +When you create a list-partitioned table, you specify a single partitioning key column. When adding a row to the table, the server compares the key values specified in the partitioning rule to the corresponding column in the row. If the column value matches a value in the partitioning rule, the row is stored in the partition named in the rule. + +!!! Note + List partitioning doesn't support multi-column list partitioning. + +See [Automatic list partitioning](02_automatic_list_partitioning.mdx) for information about an extension to `LIST` partitioning that enables a database to automatically create a partition for any new distinct value of the list partitioning key. + +## Range partitioning + +When you create a range-partitioned table, you specify one or more partitioning key columns. When you add a row to the table, the server compares the value of the partitioning keys to the corresponding columns in a table entry. If the column values satisfy the conditions specified in the partitioning rule, the row is stored in the partition named in the rule. + +See [Interval range partitioning](01_interval_range_partitioning.mdx) for information about an extension to range partitioning that enables a database to create a partition when the inserted data exceeds the range of an existing partition. + +## Hash partitioning + +When you create a hash-partitioned table, you specify one or more partitioning key columns. Data is divided into approximately equal-sized partitions among the specified partitions. When you add a row to a hash-partitioned table, the server computes a hash value for the data in the specified columns and stores the row in a partition according to the hash value. + +!!! Note + When upgrading EDB Postgres Advanced Server, you must rebuild each hash-partitioned table on the upgraded version server. + +## Subpartitioning + +Subpartitioning breaks a partitioned table into smaller subsets. You must store all subsets in the same database server cluster. A table is typically subpartitioned by a different set of columns. It can have a different subpartitioning type from the parent partition. If you subpartition one partition, then each partition has at least one subpartition. + +If you subpartition a table, no data is stored in any of the partition tables. Instead, the data is stored in the corresponding subpartitions. + +
+ +interval_range_partitioning automatic_list_partitioning + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/03_using_partition_pruning/01_example_partition_pruning.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/03_using_partition_pruning/01_example_partition_pruning.mdx new file mode 100644 index 00000000000..ff4ccd00c29 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/03_using_partition_pruning/01_example_partition_pruning.mdx @@ -0,0 +1,123 @@ +--- +title: "Example: Partition pruning" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.103.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.328.html" +redirects: + - /epas/latest/epas_compat_table_partitioning/03_using_partition_pruning/01_example_partition_pruning/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The `EXPLAIN` statement displays the execution plan of a statement. You can use the `EXPLAIN` statement to confirm that EDB Postgres Advanced Server is pruning partitions from the execution plan of a query. + +This example shows the efficiency of partition pruning. Create a simple table: + +```sql +CREATE TABLE sales +( + dept_no number, + part_no varchar2, + country varchar2(20), + date date, + amount number +) +PARTITION BY LIST(country) +( + PARTITION europe VALUES('FRANCE', 'ITALY'), + PARTITION asia VALUES('INDIA', 'PAKISTAN'), + PARTITION americas VALUES('US', 'CANADA') +); +``` + +Perform a constrained query that includes the `EXPLAIN` statement: + +```sql +EXPLAIN (COSTS OFF) SELECT * FROM sales WHERE country = 'INDIA'; +``` + +The resulting query plan shows that the server scans only the `sales_asia` table. That's the table in which a row with a `country` value of `INDIA` is stored. + +```sql +edb=# EXPLAIN (COSTS OFF) SELECT * FROM sales WHERE country = 'INDIA'; +__OUTPUT__ + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on sales_asia + Filter: ((country)::text = 'INDIA'::text) +(3 rows) +``` + +Suppose you perform a query that searches for a row that matches a value not included in the partitioning key: + +```sql +EXPLAIN (COSTS OFF) SELECT * FROM sales WHERE dept_no = '30'; +``` + +The resulting query plan shows that the server must look in all of the partitions to locate the rows that satisfy the query: + +```sql +edb=# EXPLAIN (COSTS OFF) SELECT * FROM sales WHERE dept_no = '30'; +__OUTPUT__ + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on sales_americas + Filter: (dept_no = '30'::numeric) + -> Seq Scan on sales_europe + Filter: (dept_no = '30'::numeric) + -> Seq Scan on sales_asia + Filter: (dept_no = '30'::numeric) +(7 rows) +``` + +Constraint exclusion also applies when querying subpartitioned tables: + +```sql +CREATE TABLE sales +( + dept_no number, + part_no varchar2, + country varchar2(20), + date date, + amount number +) +PARTITION BY RANGE(date) SUBPARTITION BY LIST (country) +( + PARTITION "2011" VALUES LESS THAN('01-JAN-2012') + ( + SUBPARTITION europe_2011 VALUES ('ITALY', 'FRANCE'), + SUBPARTITION asia_2011 VALUES ('PAKISTAN', 'INDIA'), + SUBPARTITION americas_2011 VALUES ('US', 'CANADA') + ), + PARTITION "2012" VALUES LESS THAN('01-JAN-2013') + ( + SUBPARTITION europe_2012 VALUES ('ITALY', 'FRANCE'), + SUBPARTITION asia_2012 VALUES ('PAKISTAN', 'INDIA'), + SUBPARTITION americas_2012 VALUES ('US', 'CANADA') + ), + PARTITION "2013" VALUES LESS THAN('01-JAN-2015') + ( + SUBPARTITION europe_2013 VALUES ('ITALY', 'FRANCE'), + SUBPARTITION asia_2013 VALUES ('PAKISTAN', 'INDIA'), + SUBPARTITION americas_2013 VALUES ('US', 'CANADA') + ) +); +``` + +When you query the table, the query planner prunes any partitions or subpartitions from the search path that can't contain the desired result set: + +```sql +edb=# EXPLAIN (COSTS OFF) SELECT * FROM sales WHERE country = 'US' AND date = 'Dec 12, 2012'; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +------------------------------------ + Append + -> Seq Scan on sales_americas_2012 + Filter: (((country)::text = 'US'::text) AND (date = '12-DEC-12 + 00:00:00'::timestamp without time zone)) +(3 rows) +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/03_using_partition_pruning/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/03_using_partition_pruning/index.mdx new file mode 100644 index 00000000000..9457070c77b --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/03_using_partition_pruning/index.mdx @@ -0,0 +1,145 @@ +--- +title: "Using partition pruning" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.102.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.327.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.052.html" +redirects: + - /epas/latest/epas_compat_table_partitioning/03_using_partition_pruning/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +EDB Postgres Advanced Server's query planner uses *partition pruning* to compute an efficient plan to locate any rows that match the conditions specified in the `WHERE` clause of a `SELECT` statement. To successfully prune partitions from an execution plan, the `WHERE` clause must constrain the information that's compared to the partitioning key column specified when creating the partitioned table. + +| When querying a... | Partition pruning is effective when... | +|----------------------------|----------------------------------------| +| List-partitioned table | The `WHERE` clause compares a literal value to the partitioning key using operators like equal (=) or `AND`. | +| Range-partitioned table | The `WHERE` clause compares a literal value to a partitioning key using operators such as equal (=), less than (<), or greater than (>). +| Hash-partitioned table | The `WHERE` clause compares a literal value to the partitioning key using an operator such as equal (=). | + +## Partition pruning techniques + +The partition pruning mechanism uses two optimization techniques: + +- Constraint exclusion +- Fast pruning + +Partition pruning techniques limit the search for data only to those partitions where the values you're searching for might reside. Both pruning techniques remove partitions from a query's execution plan, improving performance. + +The difference between the fast pruning and constraint exclusion is that fast pruning understands the relationship between the partitions in an Oracle-partitioned table. Constraint exclusion doesn't. For example, when a query searches for a specific value in a list-partitioned table, fast pruning can reason that only a specific partition can hold that value. Constraint exclusion must examine the constraints defined for each partition. Fast pruning occurs early in the planning process to reduce the number of partitions that the planner must consider. Constraint exclusion occurs late in the planning process. + +[This example](01_example_partition_pruning.mdx) shows the efficiency of partition pruning, using the `EXPLAIN` statement to confirm that EDB Postgres Advanced Server is pruning partitions from the execution plan of a query. + +## Using constraint exclusion + +The `constraint_exclusion` parameter controls constraint exclusion. The `constraint_exclusion` parameter can have a value of `on`, `off`, or `partition`. To enable constraint exclusion, you must set the parameter to either `partition` or `on`. By default, the parameter is set to `partition`. + +For more information about constraint exclusion, see the [PostgreSQL documentation](https://www.postgresql.org/docs/current/static/ddl-partitioning.html). + +When constraint exclusion is enabled, the server examines the constraints defined for each partition to determine if that partition can satisfy a query. + +When you execute a `SELECT` statement that doesn't contain a `WHERE` clause, the query planner must recommend an execution plan that searches the entire table. When you execute a `SELECT` statement that contains a `WHERE` clause, the query planner: + +- Determines the partition to store the row +- Sends query fragments to that partition +- Prunes the partitions that can't contain that row from the execution plan + +If you aren't using partitioned tables, disabling constraint exclusion might improve performance. + +## Using fast pruning + +Like constraint exclusion, fast pruning can optimize only queries that include a `WHERE` or join clause. However, the qualifiers in the `WHERE` clause must match a certain form. In both cases, the query planner avoids searching for data in partitions that can't hold the data required by the query. + +Fast pruning is controlled by a Boolean configuration parameter named `edb_enable_pruning`. Set `edb_enable_pruning` to `ON` to enable fast pruning of certain queries. Set `edb_enable_pruning` to `OFF` to disable fast pruning. + +!!! Note + Fast pruning can optimize queries against subpartitioned tables or optimize queries against range-partitioned tables only for tables that are partitioned on one column. + +For LIST-partitioned tables, EDB Postgres Advanced Server can fast prune queries that contain a `WHERE` clause that constrains a partitioning column to a literal value. For example, given a LIST-partitioned table such as: + +```sql +CREATE TABLE sales_hist(..., country text, ...) +PARTITION BY LIST(country) +( + PARTITION americas VALUES('US', 'CA', 'MX'), + PARTITION europe VALUES('BE', 'NL', 'FR'), + PARTITION asia VALUES('JP', 'PK', 'CN'), + PARTITION others VALUES(DEFAULT) +) +``` + +Fast pruning can reason about `WHERE` clauses such as: + + `WHERE country = 'US'` + + `WHERE country IS NULL;` + +With the first `WHERE` clause, fast pruning eliminates partitions `europe`, `asia`, and `others` because those partitions can't hold rows that satisfy the qualifier `WHERE country = 'US'`. + +With the second `WHERE` clause, fast pruning eliminates partitions `americas`, `europe`, and `asia` because those partitions can't hold rows where `country IS NULL`. + +The operator specified in the `WHERE` clause must be an equals sign (=) or the equality operator appropriate for the data type of the partitioning column. + +For range-partitioned tables, EDB Postgres Advanced Server can fast prune queries that contain a `WHERE` clause that constrains a partitioning column to a literal value. However, the operator can be any of the following: + + `>` + + `>=` + + `=` + + `<=` + + `<` + +Fast pruning also reasons about more complex expressions involving `AND` and `BETWEEN` operators, such as: + +```sql +WHERE size > 100 AND size <= 200 +WHERE size BETWEEN 100 AND 200 +``` + +Fast pruning can't prune based on expressions involving `OR` or `IN`. For example, when querying a RANGE-partitioned table, such as: + +```sql +CREATE TABLE boxes(id int, size int, color text) + PARTITION BY RANGE(size) +( + PARTITION small VALUES LESS THAN(100), + PARTITION medium VALUES LESS THAN(200), + PARTITION large VALUES LESS THAN(300) +) +``` + +Fast pruning can reason about `WHERE` clauses such as: + + `WHERE size > 100 -- scan partitions 'medium' and 'large'` + + `WHERE size >= 100 -- scan partitions 'medium' and 'large'` + + `WHERE size = 100 -- scan partition 'medium'` + + `WHERE size <= 100 -- scan partitions 'small' and 'medium'` + + `WHERE size < 100 -- scan partition 'small'` + + `WHERE size > 100 AND size < 199 -- scan partition 'medium'` + + `WHERE size BETWEEN 100 AND 199 -- scan partition 'medium'` + + `WHERE color = 'red' AND size = 100 -- scan 'medium'` + + `WHERE color = 'red' AND (size > 100 AND size < 199) -- scan 'medium'` + +In each case, fast pruning requires that the qualifier refer to a partitioning column and literal value (or `IS NULL/IS NOT NULL`). + +!!! Note + Fast pruning can also optimize `DELETE` and `UPDATE` statements containing these `WHERE` clauses. + +
+ +example_partition_pruning + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/05_handling_stray_values_in_a_list_or_range_partitioned_table/defining_a_default_partition.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/05_handling_stray_values_in_a_list_or_range_partitioned_table/defining_a_default_partition.mdx new file mode 100644 index 00000000000..17201a836a4 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/05_handling_stray_values_in_a_list_or_range_partitioned_table/defining_a_default_partition.mdx @@ -0,0 +1,206 @@ +--- +title: "Defining a DEFAULT partition" +redirects: + - /epas/latest/epas_compat_table_partitioning/05_handling_stray_values_in_a_list_or_range_partitioned_table/ #generated for docs/epas/reorg-role-use-case-mode +--- + +A `DEFAULT` partition captures any rows that don't fit into any other partition in a `LIST` partitioned or subpartitioned table. If you don't include a `DEFAULT` rule, any row that doesn't match one of the values in the partitioning constraints causes an error. Each `LIST` partition or subpartition can have its own `DEFAULT` rule. + +The syntax of a `DEFAULT` rule is: + +```sql +PARTITION [] VALUES (DEFAULT) +``` + +Where `partition_name` specifies the name of the partition or subpartition that stores any rows that don't match the rules specified for other partitions. + +## Adding a DEFAULT partition + +You can create a list-partitioned table in which the server decides the partition for storing the data based on the value of the `country` column. In that case, if you attempt to add a row in which the value of the `country` column contains a value not listed in the rules, an error is reported: + +```sql +edb=# INSERT INTO sales VALUES +edb-# (40, '3000x', 'IRELAND', '01-Mar-2012', '45000'); +ERROR: no partition of relation "sales_2012" found for row +DETAIL: Partition key of the failing row contains (country) = (IRELAND). +``` + +This example creates such a table but adds a `DEFAULT` partition. The server stores any rows that don't match a value specified in the partitioning rules for `europe`, `asia`, or `americas` partitions in the `others` partition. + +```sql +CREATE TABLE sales +( + dept_no number, + part_no varchar2, + country varchar2(20), + date date, + amount number +) +PARTITION BY LIST(country) +( + PARTITION europe VALUES('FRANCE', 'ITALY'), + PARTITION asia VALUES('INDIA', 'PAKISTAN'), + PARTITION americas VALUES('US', 'CANADA'), + PARTITION others VALUES (DEFAULT) +); +``` +## Testing the DEFAULT partition + +To test the `DEFAULT` partition, add a row with a value in the `country` column that doesn't match one of the countries specified in the partitioning constraints: + +```text +INSERT INTO sales VALUES + (40, '3000x', 'IRELAND', '01-Mar-2012', '45000'); +``` + +Query the contents of the `sales` table to confirm that the previously rejected row is now stored in the `sales_others` partition: + +```sql +edb=# SELECT tableoid::regclass, * FROM sales; +__OUTPUT__ + tableoid | dept_no | part_no | country | date | amount +----------------+---------+---------+----------+--------------------+-------- + sales_americas | 40 | 9519b | US | 12-APR-12 00:00:00 | 145000 + sales_americas | 40 | 4577b | US | 11-NOV-12 00:00:00 | 25000 + sales_americas | 30 | 7588b | CANADA | 14-DEC-12 00:00:00 | 50000 + sales_americas | 30 | 9519b | CANADA | 01-FEB-12 00:00:00 | 75000 + sales_americas | 30 | 4519b | CANADA | 08-APR-12 00:00:00 | 120000 + sales_americas | 40 | 3788a | US | 12-MAY-12 00:00:00 | 4950 + sales_americas | 40 | 4788a | US | 23-SEP-12 00:00:00 | 4950 + sales_americas | 40 | 4788b | US | 09-OCT-12 00:00:00 | 15000 + sales_europe | 10 | 4519b | FRANCE | 17-JAN-12 00:00:00 | 45000 + sales_europe | 10 | 9519b | ITALY | 07-JUL-12 00:00:00 | 15000 + sales_europe | 10 | 9519a | FRANCE | 18-AUG-12 00:00:00 | 650000 + sales_europe | 10 | 9519b | FRANCE | 18-AUG-12 00:00:00 | 650000 + sales_asia | 20 | 3788a | INDIA | 01-MAR-12 00:00:00 | 75000 + sales_asia | 20 | 3788a | PAKISTAN | 04-JUN-12 00:00:00 | 37500 + sales_asia | 20 | 3788b | INDIA | 21-SEP-12 00:00:00 | 5090 + sales_asia | 20 | 4519a | INDIA | 18-OCT-12 00:00:00 | 650000 + sales_asia | 20 | 4519b | INDIA | 02-DEC-12 00:00:00 | 5090 + sales_others | 40 | 3000x | IRELAND | 01-MAR-12 00:00:00 | 45000 +(18 rows) +``` + +EDB Postgres Advanced Server provides the following methods to reassign the contents of a `DEFAULT` partition or subpartition: + +- You can use the `ALTER TABLE… ADD PARTITION` command to add a partition to a table with a `DEFAULT` rule. There can't be conflicting values between existing rows in the table and the values of the partition you're adding. You can alternatively use the `ALTER TABLE… SPLIT PARTITION` command to split an existing partition. +- You can use the `ALTER TABLE… ADD SUBPARTITION` command to add a subpartition to a table with a `DEFAULT` rule. There can't be conflicting values between existing rows in the table and the values of the subpartition you're adding. You can alternatively use the `ALTER TABLE… SPLIT SUBPARTITION` command to split an existing subpartition. + +## Example: Adding a partition to a table with a DEFAULT partition + +This example uses the `ALTER TABLE... ADD PARTITION` command. It assumes there's no conflicting values between the existing rows in the table and the values of the partition to add. + +```sql +edb=# ALTER TABLE sales ADD PARTITION africa values ('SOUTH AFRICA', 'KENYA'); +ALTER TABLE +``` + +When the following rows are inserted into the table, an error occurs, indicating that there are conflicting values: + +```sql +edb=# INSERT INTO sales (dept_no, country) VALUES +(1,'FRANCE'),(2,'INDIA'),(3,'US'),(4,'SOUTH AFRICA'),(5,'NEPAL'); +INSERT 0 5 +``` + +Row `(4,'SOUTH AFRICA')` conflicts with the `VALUES` list in the `ALTER TABLE... ADD PARTITION` statement, thus resulting in an error: + +```sql +edb=# ALTER TABLE sales ADD PARTITION africa values ('SOUTH AFRICA', 'KENYA'); +ERROR: updated partition constraint for default partition "sales_others" +would be violated by some row +``` + +## Example: Splitting a DEFAULT partition + +This example splits a `DEFAULT` partition, redistributing the partition's content between two new partitions in the table `sales`. + +This command inserts rows into the table, including rows into the `DEFAULT` partition: + +```sql +INSERT INTO sales VALUES + (10, '4519b', 'FRANCE', '17-Jan-2012', '45000'), + (10, '9519b', 'ITALY', '07-Jul-2012', '15000'), + (20, '3788a', 'INDIA', '01-Mar-2012', '75000'), + (20, '3788a', 'PAKISTAN', '04-Jun-2012', '37500'), + (30, '9519b', 'US', '12-Apr-2012', '145000'), + (30, '7588b', 'CANADA', '14-Dec-2012', '50000'), + (40, '4519b', 'SOUTH AFRICA', '08-Apr-2012', '120000'), + (40, '4519b', 'KENYA', '08-Apr-2012', '120000'), + (50, '3788a', 'CHINA', '12-May-2012', '4950'); +``` + +The partitions include the `DEFAULT others` partition: + +```sql +edb=# SELECT partition_name, high_value FROM ALL_TAB_PARTITIONS; +__OUTPUT__ + partition_name | high_value +----------------+--------------------- + EUROPE | 'FRANCE', 'ITALY' + ASIA | 'INDIA', 'PAKISTAN' + AMERICAS | 'US', 'CANADA' + OTHERS | DEFAULT +(4 rows) +``` + +This command shows the rows distributed among the partitions: + +```sql +edb=# SELECT tableoid::regclass, * FROM sales; +__OUTPUT__ + tableoid | dept_no| part_no | country | date | amount +--------------+--------+---------+--------------+--------------------+------- +sales_americas| 30 | 9519b | US | 12-APR-12 00:00:00 |145000 +sales_americas| 30 | 7588b | CANADA | 14-DEC-12 00:00:00 | 50000 +sales_europe | 10 | 4519b | FRANCE | 17-JAN-12 00:00:00 | 45000 +sales_europe | 10 | 9519b | ITALY | 07-JUL-12 00:00:00 | 15000 +sales_asia | 20 | 3788a | INDIA | 01-MAR-12 00:00:00 | 75000 +sales_asia | 20 | 3788a | PAKISTAN | 04-JUN-12 00:00:00 | 37500 +sales_others | 40 | 4519b | SOUTH AFRICA | 08-APR-12 00:00:00 |120000 +sales_others | 40 | 4519b | KENYA | 08-APR-12 00:00:00 |120000 +sales_others | 50 | 3788a | CHINA | 12-MAY-12 00:00:00 | 4950 +(9 rows) +``` + +This command splits the `DEFAULT others` partition into partitions named `africa` and `others`: + +```sql +ALTER TABLE sales SPLIT PARTITION others VALUES + ('SOUTH AFRICA', 'KENYA') + INTO (PARTITION africa, PARTITION others); +``` + +The partitions now include the `africa` partition along with the `DEFAULT others` partition: + +```sql +edb=# SELECT partition_name, high_value FROM ALL_TAB_PARTITIONS; +__OUTPUT__ + partition_name | high_value +----------------+------------------------- + EUROPE | 'FRANCE', 'ITALY' + ASIA | 'INDIA', 'PAKISTAN' + AMERICAS | 'US', 'CANADA' + AFRICA | 'SOUTH AFRICA', 'KENYA' + OTHERS | DEFAULT +(5 rows) +``` + +This command shows that the rows were redistributed across the new partitions: + +```sql +edb=# SELECT tableoid::regclass, * FROM sales; +__OUTPUT__ + tableoid |dept_no | part_no | country | date | amount +---------------+--------+---------+-------------+--------------------+------- +sales_americas | 30 | 9519b | US | 12-APR-12 00:00:00 |145000 +sales_americas | 30 | 7588b | CANADA | 14-DEC-12 00:00:00 | 50000 +sales_europe | 10 | 4519b | FRANCE | 17-JAN-12 00:00:00 | 45000 +sales_europe | 10 | 9519b | ITALY | 07-JUL-12 00:00:00 | 15000 +sales_asia | 20 | 3788a | INDIA | 01-MAR-12 00:00:00 | 75000 +sales_asia | 20 | 3788a | PAKISTAN | 04-JUN-12 00:00:00 | 37500 +sales_africa | 40 | 4519b | SOUTH AFRICA| 08-APR-12 00:00:00 |120000 +sales_africa | 40 | 4519b | KENYA | 08-APR-12 00:00:00 |120000 +sales_others_1 | 50 | 3788a | CHINA | 12-MAY-12 00:00:00 | 4950 +(9 rows) +``` diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/05_handling_stray_values_in_a_list_or_range_partitioned_table/defining_a_maxvalue_partition.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/05_handling_stray_values_in_a_list_or_range_partitioned_table/defining_a_maxvalue_partition.mdx new file mode 100644 index 00000000000..09100aadf71 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/05_handling_stray_values_in_a_list_or_range_partitioned_table/defining_a_maxvalue_partition.mdx @@ -0,0 +1,83 @@ +--- +title: "Defining a MAXVALUE partition" +--- + +A `MAXVALUE` partition or subpartition captures any rows that don't fit into any other partition in a range-partitioned or subpartitioned table. If you don't include a `MAXVALUE` rule, any row that exceeds the maximum limit specified by the partitioning rules causes in an error. Each partition or subpartition can have its own `MAXVALUE` partition. + +The syntax of a `MAXVALUE` rule is: + +```sql +PARTITION [] VALUES LESS THAN (MAXVALUE) +``` + +Where `partition_name` specifies the name of the partition that stores any rows that don't match the rules specified for other partitions. + +[This example](../06_specifying_multiple_partitioning_keys_in_a_range_partitioned_table/) created a range-partitioned table in which the data was partitioned based on the value of the `date` column. If you attempt to add a row with a `date` value that exceeds a date listed in the partitioning constraints, EDB Postgres Advanced Server reports an error. + +```sql +edb=# INSERT INTO sales VALUES +edb-# (40, '3000x', 'IRELAND', '01-Mar-2013', '45000'); +ERROR: no partition of relation "sales" found for row +DETAIL: Partition key of the failing row contains (date) = (01-MAR-13 00:00:00). +``` + +This `CREATE TABLE` command creates the same table but with a `MAXVALUE` partition. Instead of throwing an error, the server stores any rows that don't match the previous partitioning constraints in the `others` partition. + +```sql +CREATE TABLE sales +( + dept_no number, + part_no varchar2, + country varchar2(20), + date date, + amount number +) +PARTITION BY RANGE(date) +( + PARTITION q1_2012 VALUES LESS THAN('2012-Apr-01'), + PARTITION q2_2012 VALUES LESS THAN('2012-Jul-01'), + PARTITION q3_2012 VALUES LESS THAN('2012-Oct-01'), + PARTITION q4_2012 VALUES LESS THAN('2013-Jan-01'), + PARTITION others VALUES LESS THAN (MAXVALUE) +); +``` + +To test the `MAXVALUE` partition, add a row with a value in the `date` column that exceeds the last date value listed in a partitioning rule. The server stores the row in the `others` partition. + +```sql +INSERT INTO sales VALUES + (40, '3000x', 'IRELAND', '01-Mar-2013', '45000'); +``` + +Query the contents of the `sales` table to confirm that the previously rejected row is now stored in the `sales_others` partition: + +```sql +edb=# SELECT tableoid::regclass, * FROM sales; +__OUTPUT__ + tableoid | dept_no | part_no | country | date | amount +---------------+---------+---------+----------+--------------------+-------- + sales_q1_2012 | 10 | 4519b | FRANCE | 17-JAN-12 00:00:00 | 45000 + sales_q1_2012 | 20 | 3788a | INDIA | 01-MAR-12 00:00:00 | 75000 + sales_q1_2012 | 30 | 9519b | CANADA | 01-FEB-12 00:00:00 | 75000 + sales_q2_2012 | 40 | 9519b | US | 12-APR-12 00:00:00 | 145000 + sales_q2_2012 | 20 | 3788a | PAKISTAN | 04-JUN-12 00:00:00 | 37500 + sales_q2_2012 | 30 | 4519b | CANADA | 08-APR-12 00:00:00 | 120000 + sales_q2_2012 | 40 | 3788a | US | 12-MAY-12 00:00:00 | 4950 + sales_q3_2012 | 10 | 9519b | ITALY | 07-JUL-12 00:00:00 | 15000 + sales_q3_2012 | 10 | 9519a | FRANCE | 18-AUG-12 00:00:00 | 650000 + sales_q3_2012 | 10 | 9519b | FRANCE | 18-AUG-12 00:00:00 | 650000 + sales_q3_2012 | 20 | 3788b | INDIA | 21-SEP-12 00:00:00 | 5090 + sales_q3_2012 | 40 | 4788a | US | 23-SEP-12 00:00:00 | 4950 + sales_q4_2012 | 40 | 4577b | US | 11-NOV-12 00:00:00 | 25000 + sales_q4_2012 | 30 | 7588b | CANADA | 14-DEC-12 00:00:00 | 50000 + sales_q4_2012 | 40 | 4788b | US | 09-OCT-12 00:00:00 | 15000 + sales_q4_2012 | 20 | 4519a | INDIA | 18-OCT-12 00:00:00 | 650000 + sales_q4_2012 | 20 | 4519b | INDIA | 02-DEC-12 00:00:00 | 5090 + sales_others | 40 | 3000x | IRELAND | 01-MAR-13 00:00:00 | 45000 +(18 rows) +``` + +EDB Postgres Advanced Server doesn't have a way to reassign the contents of a `MAXVALUE` partition or subpartition. + +- You can't use the `ALTER TABLE… ADD PARTITION` statement to add a partition to a table with a `MAXVALUE` rule. However, you can use the `ALTER TABLE… SPLIT PARTITION` statement to split an existing partition. +- You can't use the `ALTER TABLE… ADD SUBPARTITION` statement to add a subpartition to a table with a `MAXVALUE` rule. However, you can split an existing subpartition with the `ALTER TABLE… SPLIT SUBPARTITION` statement. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/05_handling_stray_values_in_a_list_or_range_partitioned_table/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/05_handling_stray_values_in_a_list_or_range_partitioned_table/index.mdx new file mode 100644 index 00000000000..22224ea2759 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/05_handling_stray_values_in_a_list_or_range_partitioned_table/index.mdx @@ -0,0 +1,17 @@ +--- +title: "Handling stray values in a LIST or RANGE partitioned table" +indexCards: simple +navigation: + - defining_a_default_partition + - defining_a_maxvalue_partition +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.118.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.343.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.067.html" +--- + + + +A `DEFAULT` or `MAXVALUE` partition or subpartition captures any rows that don't meet the other partitioning rules defined for a table. + diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/06_specifying_multiple_partitioning_keys_in_a_range_partitioned_table.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/06_specifying_multiple_partitioning_keys_in_a_range_partitioned_table.mdx new file mode 100644 index 00000000000..5c80faa758b --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/06_specifying_multiple_partitioning_keys_in_a_range_partitioned_table.mdx @@ -0,0 +1,55 @@ +--- +title: "Specifying multiple partitioning keys in a RANGE partitioned table" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.119.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.344.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.068.html" +redirects: + - /epas/latest/epas_compat_table_partitioning/06_specifying_multiple_partitioning_keys_in_a_range_partitioned_table/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can often improve performance by specifying multiple key columns for a `RANGE` partitioned table. If you often select rows using comparison operators on a small set of columns based on a greater-than or less-than value, consider using those columns in `RANGE` partitioning rules. + +Range-partitioned table definitions can include multiple columns in the partitioning key. To specify multiple partitioning keys for a range-partitioned table, include the column names in a comma-separated list after the `PARTITION BY RANGE` clause: + +```sql +CREATE TABLE sales +( + dept_no number, + part_no varchar2, + country varchar2(20), + sale_year number, + sale_month number, + sale_day number, + amount number +) +PARTITION BY RANGE(sale_year, sale_month) +( + PARTITION q1_2012 + VALUES LESS THAN(2012, 4), + PARTITION q2_2012 + VALUES LESS THAN(2012, 7), + PARTITION q3_2012 + VALUES LESS THAN(2012, 10), + PARTITION q4_2012 + VALUES LESS THAN(2013, 1) +); +``` + +If a table is created with multiple partitioning keys, you must specify multiple key values when querying the table to take full advantage of partition pruning: + +```sql +edb=# EXPLAIN SELECT * FROM sales WHERE sale_year = 2012 AND sale_month = 8; +__OUTPUT__ + QUERY PLAN +---------------------------------------------------------------------------- + Append (cost=0.00..14.35 rows=1 width=250) + -> Seq Scan on sales_q3_2012 (cost=0.00..14.35 rows=1 width=250) + Filter: ((sale_year = '2012'::numeric) AND (sale_month = '8'::numeric)) +(3 rows) +``` + +Since all rows with a value of `8` in the `sale_month` column and a value of `2012` in the `sale_year` column are stored in the `q3_2012` partition, EDB Postgres Advanced Server searches only that partition. diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/index.mdx new file mode 100644 index 00000000000..7df3532b769 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/index.mdx @@ -0,0 +1,52 @@ +--- +title: "Retrieving information about a partitioned table" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.120.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.345.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.069.html" +redirects: + - /epas/latest/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +EDB Postgres Advanced Server provides five system catalog views that you can use to view information about the structure of partitioned tables. + +## Querying the partitioning views + +You can query the following views to retrieve information about partitioned and subpartitioned tables: + +- `ALL_PART_TABLES` +- `ALL_TAB_PARTITIONS` +- `ALL_TAB_SUBPARTITIONS` +- `ALL_PART_KEY_COLUMNS` +- `ALL_SUBPART_KEY_COLUMNS` + +The structure of each view is explained in [Table partitioning views reference](../../../reference/application_programmer_reference/01_table_partitioning_views_reference/#table_partitioning_views_reference). If you're using the EDB-PSQL client, you can also learn about the structure of a view by entering: + + `\d ` + +Where `view_name` specifies the name of the table partitioning view. + +Querying a view can provide information about the structure of a partitioned or subpartitioned table. For example, this code displays the names of a subpartitioned table: + +```sql +edb=# SELECT subpartition_name, partition_name FROM ALL_TAB_SUBPARTITIONS; +__OUTPUT__ + subpartition_name | partition_name +-------------------+---------------- + EUROPE_2011 | EUROPE + EUROPE_2012 | EUROPE + ASIA_2011 | ASIA + ASIA_2012 | ASIA + AMERICAS_2011 | AMERICAS + AMERICAS_2012 | AMERICAS +(6 rows) +``` + +
+ +table_partitioning_views_reference + +
diff --git a/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/index.mdx b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/index.mdx new file mode 100644 index 00000000000..7f26f50e8a3 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/epas_compat_table_partitioning/index.mdx @@ -0,0 +1,17 @@ +--- +navTitle: Table partitioning +title: "Using table partitioning" +indexCards: simple +description: "How to take advantage of the table partitioning support in EDB Postgres Advanced Server" +redirects: + - /epas/latest/epas_compat_table_partitioning/ #generated for docs/epas/reorg-role-use-case-mode +--- + +In a partitioned table, one logically large table is broken into smaller physical pieces. Partitioning can provide several benefits: + +- Query performance can improve dramatically, particularly when most of the heavily accessed rows of the table are in a single partition or a small number of partitions. Partitioning allows you to omit the partition column from the front of an index. This approach reduces index size and makes it more likely that the heavily used parts of the index fit in memory. +- When a query or update accesses a large percentage of a single partition, performance might improve. This improvement happens because the server performs a sequential scan of the partition instead of using an index and random-access reads scattered across the whole table. +- If you plan the requirement into the partitioning design, you can implement a bulk load or unload by adding or removing partitions. `ALTER TABLE` is far faster than a bulk operation. It also avoids the `VACUUM` overhead caused by a bulk `DELETE`. +- You can migrate seldom-used data to less-expensive or slower storage media. + +Table partitioning is worthwhile when a table is becoming very large. The exact point at which a table benefits from partitioning depends on the application. A good guideline is for the size of the table not to exceed the physical memory of the database server. diff --git a/product_docs/docs/epas/17/application_programming/index.mdx b/product_docs/docs/epas/17/application_programming/index.mdx new file mode 100644 index 00000000000..e4987f9a7cc --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/index.mdx @@ -0,0 +1,6 @@ +--- +title: "Application programming" +indexCards: simple +--- + +EDB Postgres Advanced Server includes features designed to increase application programmer productivity, such as user-defined objects, autonomous transactions, synonyms, and 200+ prepackaged utility functions. diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/01_default_optimization_modes.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/01_default_optimization_modes.mdx new file mode 100644 index 00000000000..27d545f2d7d --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/01_default_optimization_modes.mdx @@ -0,0 +1,86 @@ +--- +title: "Default optimization modes" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.038.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.121.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.130.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/05_optimizer_hints/01_default_optimization_modes/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can choose an optimization mode as the default setting for an EDB Postgres Advanced Server database cluster. You can also change this setting on a per-session basis by using the `ALTER SESSION` command as well as in individual `DELETE`, `SELECT`, and `UPDATE` commands in an optimizer hint. The configuration parameter that controls these default modes is `OPTIMIZER_MODE`. + +The table shows the possible values. + +| Hint | Description | +| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `ALL_ROWS` | Optimizes for retrieving all rows of the result set. | +| `CHOOSE` | Does no default optimization based on assumed number of rows to retrieve from the result set. This is the default. | +| `FIRST_ROWS` | Optimizes for retrieving only the first row of the result set. | +| `FIRST_ROWS_10` | Optimizes for retrieving the first 10 rows of the results set. | +| `FIRST_ROWS_100` | Optimizes for retrieving the first 100 rows of the result set. | +| `FIRST_ROWS_1000` | Optimizes for retrieving the first 1000 rows of the result set. | +| `FIRST_ROWS(n)` | Optimizes for retrieving the first *n* rows of the result set. You can't use this form as the object of the `ALTER SESSION SET OPTIMIZER_MODE` command. You can use it only in the form of a hint in a SQL command. | + +These optimization modes are based on the assumption that the client submitting the SQL command is interested in viewing only the first *n* rows of the result set and not the remainder of the result set. Resources allocated to the query are adjusted as such. + +## Example: Specifying the number of rows to retrieve in the result set + +Alter the current session to optimize for retrieval of the first 10 rows of the result set: + +```sql +ALTER SESSION SET OPTIMIZER_MODE = FIRST_ROWS_10; +``` + +## Example: Showing the current value of the OPTIMIZER_MODE parameter + +You can show the current value of the `OPTIMIZER_MODE` parameter by using the `SHOW` command. This command depends on the utility. In PSQL, use the `SHOW` command as follows: + +```sql +SHOW OPTIMIZER_MODE; +__OUTPUT__ +optimizer_mode +----------------- + first_rows_10 +(1 row) +``` + +The `SHOW` command compatible with Oracle databases has the following syntax: + +```sql +SHOW PARAMETER OPTIMIZER_MODE; + +NAME +-------------------------------------------------- +VALUE +-------------------------------------------------- +optimizer_mode +first_rows_10 +``` + +This example shows an optimization mode used in a `SELECT` command as a hint: + +```sql +SELECT /*+ FIRST_ROWS(7) */ * FROM emp; +__OUTPUT__ +empno| ename | job | mgr | hiredate | sal | comm | deptno +-----+-------+----------+------+--------------------+---------+-------+------- +7369 | SMITH | CLERK | 7902 | 17-DEC-80 00:00:00 | 800.00 | | 20 +7499 | ALLEN | SALESMAN | 7698 | 20-FEB-81 00:00:00 | 1600.00 | 300.00| 30 +7521 | WARD | SALESMAN | 7698 | 22-FEB-81 00:00:00 | 1250.00 | 500.00| 30 +7566 | JONES | MANAGER | 7839 | 02-APR-81 00:00:00 | 2975.00 | | 20 +7654 | MARTIN| SALESMAN | 7698 | 28-SEP-81 00:00:00 | 1250.00 |1400.00| 30 +7698 | BLAKE | MANAGER | 7839 | 01-MAY-81 00:00:00 | 2850.00 | | 30 +7782 | CLARK | MANAGER | 7839 | 09-JUN-81 00:00:00 | 2450.00 | | 10 +7788 | SCOTT | ANALYST | 7566 | 19-APR-87 00:00:00 | 3000.00 | | 20 +7839 | KING | PRESIDENT| | 17-NOV-81 00:00:00 | 5000.00 | | 10 +7844 | TURNER| SALESMAN | 7698 | 08-SEP-81 00:00:00 | 1500.00 | 0.00 | 30 +7876 | ADAMS | CLERK | 7788 | 23-MAY-87 00:00:00 | 1100.00 | | 20 +7900 | JAMES | CLERK | 7698 | 03-DEC-81 00:00:00 | 950.00 | | 30 +7902 | FORD | ANALYST | 7566 | 03-DEC-81 00:00:00 | 3000.00 | | 20 +7934 | MILLER| CLERK | 7782 | 23-JAN-82 00:00:00 | 1300.00 | | 10 +(14 rows) +``` diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/02_access_method_hints.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/02_access_method_hints.mdx new file mode 100644 index 00000000000..73931a3ad2d --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/02_access_method_hints.mdx @@ -0,0 +1,278 @@ +--- +title: "Access method hints" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.039.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.122.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.131.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/05_optimizer_hints/02_access_method_hints/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The following hints influence how the optimizer accesses relations to create the result set. + +| Hint | Description | +| --------------------------------- | ----------------------------------------------------- | +| `FULL(table)` | Perform a full sequential scan on `table`. | +| `INDEX(table [ index ] [...])` | Use `index` on `table` to access the relation. | +| `NO_INDEX(table [ index ] [...])` | Don't use `index` on `table` to access the relation. | + +In addition, you can use the `ALL_ROWS`, `FIRST_ROWS`, and `FIRST_ROWS(n)` hints. + +`INDEX` and `NO_INDEX` hints for the partitioned table internally expand to include the corresponding inherited child indexes and apply in later processing. + +## About the examples + +The sample application doesn't have enough data to show the effects of optimizer hints. Thus the remainder of these examples use a banking database created by the `pgbench` application located in the EDB Postgres Advanced Server `bin` subdirectory. + +## Example: Create a sample database and tables + +The following steps create a database named, `bank` populated by the tables `pgbench_accounts, pgbench_branches, pgbench_tellers`, and `pgbench_history`. The `–s 20` option specifies a scaling factor of 20, which results in the creation of 20 branches. Each branch has 100,000 accounts. The result is a total of 2,000,000 rows in the `pgbench_accounts` table and 20 rows in the `pgbench_branches` table. Ten tellers are assigned to each branch resulting, in a total of 200 rows in the `pgbench_tellers` table. + +The following initializes the `pgbench` application in the `bank` database. + +```sql +createdb -U enterprisedb bank +CREATE DATABASE + +pgbench -i -s 20 -U enterprisedb bank + +NOTICE: table "pgbench_history" does not exist, skipping +NOTICE: table "pgbench_tellers" does not exist, skipping +NOTICE: table "pgbench_accounts" does not exist, skipping +NOTICE: table "pgbench_branches" does not exist, skipping +creating tables... +100000 of 2000000 tuples (5%) done (elapsed 0.11 s, remaining 2.10 s) +200000 of 2000000 tuples (10%) done (elapsed 0.22 s, remaining 1.98 s) +300000 of 2000000 tuples (15%) done (elapsed 0.33 s, remaining 1.84 s) +400000 of 2000000 tuples (20%) done (elapsed 0.42 s, remaining 1.67 s) +500000 of 2000000 tuples (25%) done (elapsed 0.52 s, remaining 1.57 s) +600000 of 2000000 tuples (30%) done (elapsed 0.62 s, remaining 1.45 s) +700000 of 2000000 tuples (35%) done (elapsed 0.73 s, remaining 1.35 s) +800000 of 2000000 tuples (40%) done (elapsed 0.87 s, remaining 1.31 s) +900000 of 2000000 tuples (45%) done (elapsed 0.98 s, remaining 1.19 s) +1000000 of 2000000 tuples (50%) done (elapsed 1.09 s, remaining 1.09 s) +1100000 of 2000000 tuples (55%) done (elapsed 1.22 s, remaining 1.00 s) +1200000 of 2000000 tuples (60%) done (elapsed 1.36 s, remaining 0.91 s) +1300000 of 2000000 tuples (65%) done (elapsed 1.51 s, remaining 0.82 s) +1400000 of 2000000 tuples (70%) done (elapsed 1.65 s, remaining 0.71 s) +1500000 of 2000000 tuples (75%) done (elapsed 1.78 s, remaining 0.59 s) +1600000 of 2000000 tuples (80%) done (elapsed 1.93 s, remaining 0.48 s) +1700000 of 2000000 tuples (85%) done (elapsed 2.10 s, remaining 0.37 s) +1800000 of 2000000 tuples (90%) done (elapsed 2.23 s, remaining 0.25 s) +1900000 of 2000000 tuples (95%) done (elapsed 2.37 s, remaining 0.12 s) +2000000 of 2000000 tuples (100%) done (elapsed 2.48 s, remaining 0.00 s) +vacuum... +set primary keys... +done. +``` + +A total of 500,00 transactions are then processed. These transactions populate the `pgbench_history` table with 500,000 rows. + +```sql +pgbench -U enterprisedb -t 500000 bank + +starting vacuum...end. +transaction type: +scaling factor: 20 +query mode: simple +number of clients: 1 +number of threads: 1 +number of transactions per client: 500000 +number of transactions actually processed: 500000/500000 +latency average: 0.000 ms +tps = 1464.338375 (including connections establishing) +tps = 1464.350357 (excluding connections establishing) +``` + +The following are the table definitions: + +```sql +\d pgbench_accounts + + Table "public.pgbench_accounts" + Column | Type | Modifiers +----------+---------------+----------- + aid | integer | not null + bid | integer | + abalance | integer | + filler | character(84) | +Indexes: + "pgbench_accounts_pkey" PRIMARY KEY, btree (aid) + +\d pgbench_branches + + Table "public.pgbench_branches" + Column | Type | Modifiers +----------+---------------+----------- + bid | integer | not null + bbalance | integer | + filler | character(88) | +Indexes: + "pgbench_branches_pkey" PRIMARY KEY, btree (bid) + +\d pgbench_tellers + + Table "public.pgbench_tellers" + Column | Type | Modifiers +----------+---------------+----------- + tid | integer | not null + bid | integer | + tbalance | integer | + filler | character(84) | +Indexes: + "pgbench_tellers_pkey" PRIMARY KEY, btree (tid) + +\d pgbench_history + + Table "public.pgbench_history" + Column | Type | Modifiers +--------+-----------------------------+----------- + tid | integer | + bid | integer | + aid | integer | + delta | integer | + mtime | timestamp without time zone | + filler | character(22) | +``` + +The `EXPLAIN` command shows the plan selected by the query planner. In this example, `aid` is the primary key column, so an indexed search is used on index `pgbench_accounts_pkey`: + +```sql +EXPLAIN SELECT * FROM pgbench_accounts WHERE aid = 100; + + QUERY PLAN +----------------------------------------------------------------------------- +------------------ +Index Scan using pgbench_accounts_pkey on pgbench_accounts (cost=0.43..8.45 +rows=1 width=97) + Index Cond: (aid = 100) +(2 rows) +``` + +## Example: FULL hint + +The `FULL` hint forces a full sequential scan instead of using the index: + +```sql +EXPLAIN SELECT /*+ FULL(pgbench_accounts) */ * FROM pgbench_accounts WHERE +aid = 100; + + QUERY PLAN +--------------------------------------------------------------------- +Seq Scan on pgbench_accounts (cost=0.00..58781.69 rows=1 width=97) + Filter: (aid = 100) +(2 rows) +``` + +## Example: NO_INDEX hint + +The `NO_INDEX` hint forces a parallel sequential scan instead of using the index: + +```sql +EXPLAIN SELECT /*+ NO_INDEX(pgbench_accounts pgbench_accounts_pkey) */ * +FROM pgbench_accounts WHERE aid = 100; + + QUERY PLAN +----------------------------------------------------------------------------- +------- + Gather (cost=1000.00..45094.80 rows=1 width=97) + Workers Planned: 2 + -> Parallel Seq Scan on pgbench_accounts (cost=0.00..44094.70 rows=1 + width=97) + Filter: (aid = 100) +(4 rows) +``` + +## Example: Tracing optimizer hints + +You can obtain more detailed information than the `EXPLAIN` command provides about whether the planner used a hint. To do so, set the `trace_hints` configuration parameter as follows: + +```sql +SET trace_hints TO on; +``` + +The `SELECT` command with the `NO_INDEX` hint shows the additional information produced when you set the `trace_hints` configuration parameters: + +```sql +EXPLAIN SELECT /*+ NO_INDEX(pgbench_accounts pgbench_accounts_pkey) */ * +FROM pgbench_accounts WHERE aid = 100; + +INFO: [HINTS] Index Scan of [pgbench_accounts].[pgbench_accounts_pkey] +rejected due to NO_INDEX hint. + QUERY PLAN +----------------------------------------------------------------------------- +------- + Gather (cost=1000.00..45094.80 rows=1 width=97) + Workers Planned: 2 + -> Parallel Seq Scan on pgbench_accounts (cost=0.00..44094.70 rows=1 +width=97) + Filter: (aid = 100) +(4 rows) +``` + +## Example: Hint ignored + +If a hint is ignored, the `INFO: [HINTS]` line doesn't appear. This might indicate a syntax error or some other misspelling in the hint. In this example, the index name is misspelled. + +```sql +EXPLAIN SELECT /*+ NO_INDEX(pgbench_accounts pgbench_accounts_xxx) */ * FROM +pgbench_accounts WHERE aid = 100; + + QUERY PLAN +----------------------------------------------------------------------------- +------------------ +Index Scan using pgbench_accounts_pkey on pgbench_accounts (cost=0.43..8.45 +rows=1 width=97) + Index Cond: (aid = 100) +(2 rows) +``` + +## Example: INDEX hint for the partitioned table + + +```sql +CREATE TABLE t_1384(col1 int, col2 int, col3 int) +PARTITION BY RANGE(col1) +(PARTITION p1 VALUES LESS THAN(500), +PARTITION p2 VALUES LESS THAN(1000)); + +ALTER TABLE t_1384 ADD PRIMARY KEY(col1); + +CREATE INDEX idx1 ON t_1384(col2); + +CREATE INDEX idx2 ON t_1384(col1, col2); + +SET enable_hints = true; + +SET trace_hints TO on; + +-- Use primary index +EXPLAIN (COSTS OFF) SELECT /*+ INDEX(s t_1384_pkey) */ * FROM t_1384 s +WHERE col2 = 10; + +INFO: [HINTS] SeqScan of [s] rejected due to INDEX hint. +INFO: [HINTS] Parallel SeqScan of [s] rejected due to INDEX hint. +INFO: [HINTS] Index Scan of [s].[t_1384_p1_col1_col2_idx] rejected due to INDEX hint. +INFO: [HINTS] Index Scan of [s].[t_1384_p1_col2_idx] rejected due to INDEX hint. +INFO: [HINTS] Index Scan of [s].[t_1384_p1_pkey] accepted. +INFO: [HINTS] SeqScan of [s] rejected due to INDEX hint. +INFO: [HINTS] Parallel SeqScan of [s] rejected due to INDEX hint. +INFO: [HINTS] Index Scan of [s].[t_1384_p2_col1_col2_idx] rejected due to INDEX hint. +INFO: [HINTS] Index Scan of [s].[t_1384_p2_col2_idx] rejected due to INDEX hint. +INFO: [HINTS] Index Scan of [s].[t_1384_p2_pkey] accepted. + QUERY PLAN +----------------------------------------------------- +Append + -> Bitmap Heap Scan on t_1384_p1 s_1 + Recheck Cond: (col2 = 10) + -> Bitmap Index Scan on t_1384_p1_col2_idx + Index Cond: (col2 = 10) + -> Bitmap Heap Scan on t_1384_p2 s_2 + Recheck Cond: (col2 = 10) + -> Bitmap Index Scan on t_1384_p2_col2_idx + Index Cond: (col2 = 10) +(9 rows) +``` \ No newline at end of file diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/03_specifying_a_join_order.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/03_specifying_a_join_order.mdx new file mode 100644 index 00000000000..450983aa89a --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/03_specifying_a_join_order.mdx @@ -0,0 +1,37 @@ +--- +title: "Specifying a join order" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.040.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.123.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.132.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/05_optimizer_hints/03_specifying_a_join_order/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Include the `ORDERED` directive to instruct the query optimizer to join tables in the order in which they're listed in the `FROM` clause. If you don't include the `ORDERED` keyword, the query optimizer chooses the order in which to join the tables. + +For example, the following command allows the optimizer to choose the order in which to join the tables listed in the `FROM` clause: + +```sql +SELECT e.ename, d.dname, h.startdate + FROM emp e, dept d, jobhist h + WHERE d.deptno = e.deptno + AND h.empno = e.empno; +``` + +The following command instructs the optimizer to join the tables in the order specified: + +```sql +SELECT /*+ ORDERED */ e.ename, d.dname, h.startdate + FROM emp e, dept d, jobhist h + WHERE d.deptno = e.deptno + AND h.empno = e.empno; +``` + +In the `ORDERED` version of the command, EDB Postgres Advanced Server first joins `emp e` with `dept d` before joining the results with `jobhist h`. Without the `ORDERED` directive, the query optimizer selects the join order. + +!!! Note + The `ORDERED` directive doesn't work for Oracle-style outer joins (joins that contain a + sign). diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/04_joining_relations_hints.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/04_joining_relations_hints.mdx new file mode 100644 index 00000000000..b58494af652 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/04_joining_relations_hints.mdx @@ -0,0 +1,140 @@ +--- +title: "Joining relations hints" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.041.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.124.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.133.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/05_optimizer_hints/04_joining_relations_hints/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +When you join two tables, you can use any of three plans to perform the join. + +- **Nested loop join** — A table is scanned once for every row in the other joined table. +- **Merge sort join** — Each table is sorted on the join attributes before the join starts. The two tables are then scanned in parallel, and the matching rows are combined to form the join rows. +- **Hash join** — A table is scanned and its join attributes are loaded into a hash table using its join attributes as hash keys. The other joined table is then scanned and its join attributes are used as hash keys to locate the matching rows from the first table. + +## List of optimizer hints for join plans + +The following table lists the optimizer hints that you can use to influence the planner to use one type of join plan over another. + +| Hint | Description | +| --------------------------- | ------------------------------------------ | +| `USE_HASH(table [...])` | Use a hash join for `table`. | +| `NO_USE_HASH(table [...])` | Don't use a hash join for `table`. | +| `USE_MERGE(table [...])` | Use a merge sort join for `table`. | +| `NO_USE_MERGE(table [...])` | Don't use a merge sort join for `table`. | +| `USE_NL(table [...])` | Use a nested loop join for `table`. | +| `NO_USE_NL(table [...])` | Don't use a nested loop join for `table`. | + +## Example: Hash join + +In this example, the `USE_HASH` hint is used for a join on the `pgbench_branches` and `pgbench_accounts` tables. The query plan shows that a hash join is used by creating a hash table from the join attribute of the `pgbench_branches` table: + +```sql +EXPLAIN SELECT /*+ USE_HASH(b) */ b.bid, a.aid, abalance FROM +pgbench_branches b, pgbench_accounts a WHERE b.bid = a.bid; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +------ + Hash Join (cost=21.45..81463.06 rows=2014215 width=12) + Hash Cond: (a.bid = b.bid) + -> Seq Scan on pgbench_accounts a (cost=0.00..53746.15 rows=2014215 width=12) + -> Hash (cost=21.20..21.20 rows=20 width=4) + -> Seq Scan on pgbench_branches b (cost=0.00..21.20 rows=20 width=4) +(5 rows) +``` + +Next, the `NO_USE_HASH(a b)` hint forces the planner to use an approach other than hash tables. The result is a merge join. + +```sql +EXPLAIN SELECT /*+ NO_USE_HASH(a b) */ b.bid, a.aid, abalance FROM +pgbench_branches b, pgbench_accounts a WHERE b.bid = a.bid; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +------------------ + Merge Join (cost=333526.08..368774.94 rows=2014215 width=12) + Merge Cond: (b.bid = a.bid) + -> Sort (cost=21.63..21.68 rows=20 width=4) + Sort Key: b.bid + -> Seq Scan on pgbench_branches b (cost=0.00..21.20 rows=20 width=4) + -> Materialize (cost=333504.45..343575.53 rows=2014215 width=12) + -> Sort (cost=333504.45..338539.99 rows=2014215 width=12) + Sort Key: a.bid + -> Seq Scan on pgbench_accounts a (cost=0.00..53746.15 rows=2014215 width=12) +(9 rows) +``` + +Finally, the `USE_MERGE` hint forces the planner to use a merge join: + +```sql +EXPLAIN SELECT /*+ USE_MERGE(a) */ b.bid, a.aid, abalance FROM +pgbench_branches b, pgbench_accounts a WHERE b.bid = a.bid; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +------------------ + Merge Join (cost=333526.08..368774.94 rows=2014215 width=12) + Merge Cond: (b.bid = a.bid) + -> Sort (cost=21.63..21.68 rows=20 width=4) + Sort Key: b.bid + -> Seq Scan on pgbench_branches b (cost=0.00..21.20 rows=20 width=4) + -> Materialize (cost=333504.45..343575.53 rows=2014215 width=12) + -> Sort (cost=333504.45..338539.99 rows=2014215 width=12) + Sort Key: a.bid + -> Seq Scan on pgbench_accounts a (cost=0.00..53746.15 rows=2014215 width=12) +(9 rows) +``` + +## Example: Three-table join + +In this three-table join example, the planner first performs a hash join on the `pgbench_branches` and `pgbench_history` tables. Then it performs a hash join of the result with the `pgbench_accounts` table. + +```sql +EXPLAIN SELECT h.mtime, h.delta, b.bid, a.aid FROM pgbench_history h, pgbench_branches b, +pgbench_accounts a WHERE h.bid = b.bid AND h.aid = a.aid; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +----------- + Hash Join (cost=86814.29..123103.29 rows=500000 width=20) + Hash Cond: (h.aid = a.aid) + -> Hash Join (cost=21.45..15081.45 rows=500000 width=20) + Hash Cond: (h.bid = b.bid) + -> Seq Scan on pgbench_history h (cost=0.00..8185.00 rows=500000 width=20) + -> Hash (cost=21.20..21.20 rows=20 width=4) + -> Seq Scan on pgbench_branches b (cost=0.00..21.20 rows=20 width=4) + -> Hash (cost=53746.15..53746.15 rows=2014215 width=4) + -> Seq Scan on pgbench_accounts a (cost=0.00..53746.15 rows=2014215 width=4) +(9 rows) +``` + +This plan is altered by using hints to force a combination of a merge sort join and a hash join: + +```sql +EXPLAIN SELECT /*+ USE_MERGE(h b) USE_HASH(a) */ h.mtime, h.delta, b.bid, a.aid FROM +pgbench_history h, pgbench_branches b, pgbench_accounts a WHERE h.bid = b.bid AND h.aid = a.aid; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +--------------------- + Hash Join (cost=152583.39..182562.49 rows=500000 width=20) + Hash Cond: (h.aid = a.aid) + -> Merge Join (cost=65790.55..74540.65 rows=500000 width=20) + Merge Cond: (b.bid = h.bid) + -> Sort (cost=21.63..21.68 rows=20 width=4) + Sort Key: b.bid + -> Seq Scan on pgbench_branches b (cost=0.00..21.20 rows=20 width=4) + -> Materialize (cost=65768.92..68268.92 rows=500000 width=20) + -> Sort (cost=65768.92..67018.92 rows=500000 width=20) + Sort Key: h.bid + -> Seq Scan on pgbench_history h (cost=0.00..8185.00 rows=500000 width=20) + -> Hash (cost=53746.15..53746.15 rows=2014215 width=4) + -> Seq Scan on pgbench_accounts a (cost=0.00..53746.15 rows=2014215 width=4) +(13 rows) +``` diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/05_global_hints.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/05_global_hints.mdx new file mode 100644 index 00000000000..8ccd89c0269 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/05_global_hints.mdx @@ -0,0 +1,150 @@ +--- +title: "Global hints" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.042.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.125.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.134.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/05_optimizer_hints/05_global_hints/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +In addition to applying hints directly to tables that are referenced in the SQL command, you can apply hints to tables that appear in a view when the view is referenced in the SQL command. The hint doesn't appear in the view but in the SQL command that references the view. + +When specifying a hint that applies to a table in a view, give the view and table names in dot notation in the hint argument list. + +## Synopsis + +```sql +(.
) +``` + +## Parameters + +`hint` + + Any of the hints in the table [Access method hints](02_access_method_hints/#access_method_hints), [Joining relations hints](04_joining_relations_hints/#joining_relations_hints). + +`view` + + The name of the view containing `table`. + +`table` + + The table on which to apply the hint. + +## Example: Applying hints to a stored view + +A view named `tx` is created from the three-table join of `pgbench_history`, `pgbench_branches`, and `pgbench_accounts`, shown in the last example of [Joining relations hints](04_joining_relations_hints/#joining_relations_hints). + +```sql +CREATE VIEW tx AS SELECT h.mtime, h.delta, b.bid, a.aid FROM pgbench_history +h, pgbench_branches b, pgbench_accounts a WHERE h.bid = b.bid AND h.aid = +a.aid; +``` + +The query plan produced by selecting from this view is: + +```sql +EXPLAIN SELECT * FROM tx; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +----------- + Hash Join (cost=86814.29..123103.29 rows=500000 width=20) + Hash Cond: (h.aid = a.aid) + -> Hash Join (cost=21.45..15081.45 rows=500000 width=20) + Hash Cond: (h.bid = b.bid) + -> Seq Scan on pgbench_history h (cost=0.00..8185.00 rows=500000 width=20) + -> Hash (cost=21.20..21.20 rows=20 width=4) + -> Seq Scan on pgbench_branches b (cost=0.00..21.20 rows=20 width=4) + -> Hash (cost=53746.15..53746.15 rows=2014215 width=4) + -> Seq Scan on pgbench_accounts a (cost=0.00..53746.15 rows=2014215 width=4) +(9 rows) +``` + +The same hints that were applied to this join at the end of [Joining relations hints](04_joining_relations_hints/#joining_relations_hints) can be applied to the view: + +```sql +EXPLAIN SELECT /*+ USE_MERGE(tx.h tx.b) USE_HASH(tx.a) */ * FROM tx; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +--------------------- + Hash Join (cost=152583.39..182562.49 rows=500000 width=20) + Hash Cond: (h.aid = a.aid) + -> Merge Join (cost=65790.55..74540.65 rows=500000 width=20) + Merge Cond: (b.bid = h.bid) + -> Sort (cost=21.63..21.68 rows=20 width=4) + Sort Key: b.bid + -> Seq Scan on pgbench_branches b (cost=0.00..21.20 rows=20 width=4) + -> Materialize (cost=65768.92..68268.92 rows=500000 width=20) + -> Sort (cost=65768.92..67018.92 rows=500000 width=20) + Sort Key: h.bid + -> Seq Scan on pgbench_history h (cost=0.00..8185.00 rows=500000 width=20) + -> Hash (cost=53746.15..53746.15 rows=2014215 width=4) + -> Seq Scan on pgbench_accounts a (cost=0.00..53746.15 rows=2014215 width=4) +(13 rows) +``` + +## Applying hints to tables in subqueries + +In addition to applying hints to tables in stored views, you can apply hints to tables in subqueries. In this query on the sample application `emp` table, employees and their managers are listed by joining the `emp` table with a subquery of the `emp` table identified by the alias `b`: + +```sql +SELECT a.empno, a.ename, b.empno "mgr empno", b.ename "mgr ename" FROM emp a, +(SELECT * FROM emp) b WHERE a.mgr = b.empno; +__OUTPUT__ + empno | ename | mgr empno | mgr ename +-------+--------+-----------+----------- + 7369 | SMITH | 7902 | FORD + 7499 | ALLEN | 7698 | BLAKE + 7521 | WARD | 7698 | BLAKE + 7566 | JONES | 7839 | KING + 7654 | MARTIN | 7698 | BLAKE + 7698 | BLAKE | 7839 | KING + 7782 | CLARK | 7839 | KING + 7788 | SCOTT | 7566 | JONES + 7844 | TURNER | 7698 | BLAKE + 7876 | ADAMS | 7788 | SCOTT + 7900 | JAMES | 7698 | BLAKE + 7902 | FORD | 7566 | JONES + 7934 | MILLER | 7782 | CLARK +(13 rows) +``` + +This code shows the plan chosen by the query planner: + +```sql +EXPLAIN SELECT a.empno, a.ename, b.empno "mgr empno", b.ename "mgr ename" +FROM emp a, (SELECT * FROM emp) b WHERE a.mgr = b.empno; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------- + Hash Join (cost=1.32..2.64 rows=13 width=22) + Hash Cond: (a.mgr = emp.empno) + -> Seq Scan on emp a (cost=0.00..1.14 rows=14 width=16) + -> Hash (cost=1.14..1.14 rows=14 width=11) + -> Seq Scan on emp (cost=0.00..1.14 rows=14 width=11) +(5 rows) +``` + +You can apply a hint to the `emp` table in the subquery to perform an index scan on index `emp_pk` instead of a table scan. Note the difference in the query plans. + +```sql +EXPLAIN SELECT /*+ INDEX(b.emp emp_pk) */ a.empno, a.ename, b.empno "mgr +empno", b.ename "mgr ename" FROM emp a, (SELECT * FROM emp) b WHERE a.mgr = +b.empno; +__OUTPUT__ + QUERY PLAN +--------------------------------------------------------------------------- + Merge Join (cost=4.17..13.11 rows=13 width=22) + Merge Cond: (a.mgr = emp.empno) + -> Sort (cost=1.41..1.44 rows=14 width=16) + Sort Key: a.mgr + -> Seq Scan on emp a (cost=0.00..1.14 rows=14 width=16) + -> Index Scan using emp_pk on emp (cost=0.14..12.35 rows=14 width=11) +(6 rows) +``` diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/06_using_the_append_optimizer_hint.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/06_using_the_append_optimizer_hint.mdx new file mode 100644 index 00000000000..c7da61ab619 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/06_using_the_append_optimizer_hint.mdx @@ -0,0 +1,42 @@ +--- +title: "APPEND optimizer hint" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.043.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.126.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.135.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/05_optimizer_hints/06_using_the_append_optimizer_hint/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +By default, EDB Postgres Advanced Server adds new data into the first available free space in a table vacated by vacuumed records. Include the `APPEND` directive after an `INSERT` or `SELECT` command to bypass midtable free space and affix new rows to the end of the table. This optimizer hint can be particularly useful when bulk loading data. + +The syntax is: + +```sql +/*+APPEND*/ +``` + +For example, the following command, compatible with Oracle databases, instructs the server to append the data in the `INSERT` statement to the end of the `sales` table: + +```sql +INSERT /*+APPEND*/ INTO sales VALUES +(10, 10, '01-Mar-2011', 10, 'OR'); +``` + +EDB Postgres Advanced Server supports the `APPEND` hint when adding multiple rows in a single `INSERT` statement: + +```sql +INSERT /*+APPEND*/ INTO sales VALUES +(20, 20, '01-Aug-2011', 20, 'NY'), +(30, 30, '01-Feb-2011', 30, 'FL'), +(40, 40, '01-Nov-2011', 40, 'TX'); +``` + +You can also include the `APPEND` hint in the `SELECT` clause of an `INSERT INTO` statement: + +```sql +INSERT INTO sales_history SELECT /*+APPEND*/ FROM sales; +``` diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/07_parallelism_hints.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/07_parallelism_hints.mdx new file mode 100644 index 00000000000..81c081016b5 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/07_parallelism_hints.mdx @@ -0,0 +1,210 @@ +--- +title: "Parallelism hints" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.044.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/05_optimizer_hints/07_parallelism_hints/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +*Parallel scanning* is the use of multiple background workers to simultaneously perform a scan of a table, that is, in parallel, for a given query. This process provides performance improvement over other methods such as the sequential scan. + +- The `PARALLEL` optimizer hint forces parallel scanning. +- The `NO_PARALLEL` optimizer hint prevents use of a parallel scan. + +## Synopsis + +```sql +PARALLEL (
[ | DEFAULT ]) + +NO_PARALLEL (
) +``` + +## Parameters + +`table` + + The table to which to apply the parallel hint. + +`parallel_degree | DEFAULT` + + `parallel_degree` is a positive integer that specifies the desired number of workers to use for a parallel scan. If specified, the lesser of `parallel_degree` and configuration parameter `max_parallel_workers_per_gather` is used as the planned number of workers. For information on the `max_parallel_workers_per_gather` parameter, see *Asynchronous Behavior* under *Resource Consumption* in the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/runtime-config-resource.html). + + - If you specify `DEFAULT`, then the maximum possible parallel degree is used. + - If you omit both `parallel_degree` and `DEFAULT`, then the query optimizer determines the parallel degree. In this case, if `table` was set with the `parallel_workers` storage parameter, then this value is used as the parallel degree. Otherwise, the optimizer uses the maximum possible parallel degree as if `DEFAULT` were specified. For information on the `parallel_workers` storage parameter, see `Storage Parameters` under `CREATE TABLE` in the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-createtable.html). + + Regardless of the circumstance, the parallel degree never exceeds the setting of configuration parameter `max_parallel_workers_per_gather`. + +## About the examples + +For these exammples, the following configuration parameter settings are in effect: + +```sql +SHOW max_worker_processes; +__OUTPUT__ + max_worker_processes +---------------------- + 8 +(1 row) +``` +```sql +SHOW max_parallel_workers_per_gather; +__OUTPUT__ + max_parallel_workers_per_gather +--------------------------------- + 2 +(1 row) +``` + +## Example: Default scan + +This example shows the default scan on table `pgbench_accounts`. A sequential scan is shown in the query plan. + +```sql +SET trace_hints TO on; + +EXPLAIN SELECT * FROM pgbench_accounts; +__OUTPUT__ + QUERY PLAN +--------------------------------------------------------------------------- + Seq Scan on pgbench_accounts (cost=0.00..53746.15 rows=2014215 width=97) +(1 row) +``` + +## Example: PARALLEL hint + +This example uses the `PARALLEL` hint. In the query plan, the Gather node, which launches the background workers, indicates the plan to use two workers: + +!!! Note + If `trace_hints` is set to `on`, the `INFO: [HINTS]` lines appear stating that `PARALLEL` was accepted for `pgbench_accounts` and other hint information. For the remaining examples, these lines aren't displayed as they generally show the same output, that is, `trace_hints` was reset to `off`. + +```sql +EXPLAIN SELECT /*+ PARALLEL(pgbench_accounts) */ * FROM pgbench_accounts; +__OUTPUT__ +INFO: [HINTS] SeqScan of [pgbench_accounts] rejected due to PARALLEL hint. +INFO: [HINTS] PARALLEL on [pgbench_accounts] accepted. +INFO: [HINTS] Index Scan of [pgbench_accounts].[pgbench_accounts_pkey] +rejected due to PARALLEL hint. + QUERY PLAN +----------------------------------------------------------------------------- +------------ + Gather (cost=1000.00..244418.06 rows=2014215 width=97) + Workers Planned: 2 + -> Parallel Seq Scan on pgbench_accounts (cost=0.00..41996.56 rows=839256 width=97) +(3 rows) +``` + +Now, the `max_parallel_workers_per_gather` setting is increased: + +```sql +SET max_parallel_workers_per_gather TO 6; + +SHOW max_parallel_workers_per_gather; +__OUTPUT__ + max_parallel_workers_per_gather +--------------------------------- + 6 +(1 row) +``` + +The same query on `pgbench_accounts` is issued again with no parallel degree specification in the `PARALLEL` hint. The number of planned workers has increased to 4, as determined by the optimizer. + +```sql +EXPLAIN SELECT /*+ PARALLEL(pgbench_accounts) */ * FROM pgbench_accounts; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +------------ + Gather (cost=1000.00..241061.04 rows=2014215 width=97) + Workers Planned: 4 + -> Parallel Seq Scan on pgbench_accounts (cost=0.00..38639.54 rows=503554 width=97) +(3 rows) +``` + +Now, a value of `6` is specified for the parallel degree parameter of the `PARALLEL` hint. The planned number of workers is returned as this specified value: + +```sql +EXPLAIN SELECT /*+ PARALLEL(pgbench_accounts 6) */ * FROM pgbench_accounts; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +------------ + Gather (cost=1000.00..239382.52 rows=2014215 width=97) + Workers Planned: 6 + -> Parallel Seq Scan on pgbench_accounts (cost=0.00..36961.03 rows=335702 width=97) +(3 rows) +``` + +The same query is now issued with the `DEFAULT` setting for the parallel degree. The results indicate that the maximum allowable number of workers is planned. + +```sql +EXPLAIN SELECT /*+ PARALLEL(pgbench_accounts DEFAULT) */ * FROM +pgbench_accounts; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +------------ + Gather (cost=1000.00..239382.52 rows=2014215 width=97) + Workers Planned: 6 + -> Parallel Seq Scan on pgbench_accounts (cost=0.00..36961.03 rows=335702 width=97) +(3 rows) +``` + +Table `pgbench_accounts` is now altered so that the `parallel_workers` storage parameter is set to `3`. + +!!! Note + This format of the `ALTER TABLE` command to set the `parallel_workers` parameter isn't compatible with Oracle databases. + +The `parallel_workers` setting is shown by the PSQL `\d+` command. + +```sql +ALTER TABLE pgbench_accounts SET (parallel_workers=3); +__OUTPUT__ +\d+ pgbench_accounts + Table "public.pgbench_accounts" + Column | Type | Modifiers | Storage | Stats target | Description +---------+---------------+-----------+----------+--------------+------------ +- + aid | integer | not null | plain | | + bid | integer | | plain | | + abalance| integer | | plain | | + filler | character(84) | | extended | | +Indexes: + "pgbench_accounts_pkey" PRIMARY KEY, btree (aid) +Options: fillfactor=100, parallel_workers=3 +``` + +## Example: PARALLEL hint is given with no parallel degree + +When the `PARALLEL` hint is given with no parallel degree, the resulting number of planned workers is the value from the `parallel_workers` parameter: + +```sql +EXPLAIN SELECT /*+ PARALLEL(pgbench_accounts) */ * FROM pgbench_accounts; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- +------------ + Gather (cost=1000.00..242522.97 rows=2014215 width=97) + Workers Planned: 3 + -> Parallel Seq Scan on pgbench_accounts (cost=0.00..40101.47 rows=649747 width=97) +(3 rows) +``` + +Specifying a parallel degree value or `DEFAULT` in the `PARALLEL` hint overrides the `parallel_workers` setting. + +## Example: NO_PARALLEL hint + +This example shows the `NO_PARALLEL` hint. With `trace_hints` set to `on`, the `INFO: [HINTS]` message states that the parallel scan was rejected due to the `NO_PARALLEL` hint. + +```sql +EXPLAIN SELECT /*+ NO_PARALLEL(pgbench_accounts) */ * FROM pgbench_accounts; +__OUTPUT__ +INFO: [HINTS] Parallel SeqScan of [pgbench_accounts] rejected due to +NO_PARALLEL hint. + QUERY PLAN +--------------------------------------------------------------------------- + Seq Scan on pgbench_accounts (cost=0.00..53746.15 rows=2014215 width=97) +(1 row) +``` diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/08_conflicting_hints.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/08_conflicting_hints.mdx new file mode 100644 index 00000000000..4db494f2725 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/08_conflicting_hints.mdx @@ -0,0 +1,25 @@ +--- +title: "Conflicting hints" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.045.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.127.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.136.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/05_optimizer_hints/08_conflicting_hints/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +If a command includes two or more conflicting hints, the server ignores the contradicting hints. The following table lists hints that contradict each other. + +| Hint | Conflicting hint | +| ---------------------------- | ---------------------------------------------------------------------------------------- | +| `ALL_ROWS` | `FIRST_ROWS` - all formats | +| `FULL(table)` | `INDEX(table [ index ])`

`PARALLEL(table [ degree ])` | +| `INDEX(table)` | `FULL(table)`

`NO_INDEX(table)`

`PARALLEL(table [ degree ])` | +| `INDEX(table index)` | `FULL(table)`

`NO_INDEX(table index)`

`PARALLEL(table [ degree ])` | +| `PARALLEL(table [ degree ])` | `FULL(table)`

`INDEX(table)`

`NO_PARALLEL(table)` | +| `USE_HASH(table)` | `NO_USE_HASH(table)` | +| `USE_MERGE(table)` | `NO_USE_MERGE(table)` | +| `USE_NL(table)` | `NO_USE_NL(table)` | diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/about_optimizer_hints.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/about_optimizer_hints.mdx new file mode 100644 index 00000000000..0fd018abed8 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/about_optimizer_hints.mdx @@ -0,0 +1,52 @@ +--- +title: "About optimizer hints" +--- + +An *optimizer hint* is one or more directives embedded in a comment-like syntax that immediately follows a `DELETE`, `INSERT`, `SELECT`, or `UPDATE` command. Keywords in the comment instruct the server to use or avoid a specific plan when producing the result set. + +## Synopsis + +```sql +{ DELETE | INSERT | SELECT | UPDATE } /*+ { [ ] } [...] */ + + +{ DELETE | INSERT | SELECT | UPDATE } --+ { [ ] } [...] + +``` + + In both forms, a plus sign (+) must immediately follow the `/*` or `--` opening comment symbols, with no intervening space. Otherwise the server doesn't interpret the tokens that follow as hints. + +If you're using the first form, the hint and optional comment might span multiple lines. In the second form, all hints and comments must occupy a single line. The rest of the statement must start on a new line. + +## Description + +Note: + +- The database server always tries to use the specified hints if at all possible. +- If a planner method parameter is set so as to disable a certain plan type, then this plan isn't used even if it's specified in a hint, unless there are no other possible options for the planner. Examples of planner method parameters are `enable_indexscan`, `enable_seqscan`, `enable_hashjoin`, `enable_mergejoin`, and `enable_nestloop`. These are all Boolean parameters. +- The hint is embedded in a comment. As a consequence, if the hint is misspelled or if any parameter to a hint, such as view, table, or column name, is misspelled or nonexistent in the SQL command, there's no indication that an error occurred. No syntax error is given. The entire hint is silently ignored. +- If an alias is used for a table name in the SQL command, then you must use the alias name in the hint, not the original table name. For example, in the command `SELECT /*+ FULL(acct) */ * FROM accounts acct ..., acct`, you must specify the alias for `accounts` in the `FULL` hint, not in the table name `accounts`. + +Use the `EXPLAIN` command to ensure that the hint is correctly formed and the planner is using the hint. + +In general, don't use optimizer hints in a production application, where table data changes throughout the life of the application. By ensuring that dynamic columns are analyzed frequently via the `ANALYZE` command, the column statistics are updated to reflect value changes. The planner uses such information to produce the lowest-cost plan for any given command execution. Use of optimizer hints defeats the purpose of this process and results in the same plan regardless of how the table data changes. + +## Parameters + +`hint` + + An optimizer hint directive. + +`comment` + + A string with additional information. Comments have restrictions as to what characters you can include. Generally, `comment` can consist only of alphabetic, numeric, the underscore, dollar sign, number sign, and space characters. These must also conform to the syntax of an identifier. Any subsequent hint is ignored if the comment isn't in this form. + +`statement_body` + + The remainder of the `DELETE`, `INSERT`, `SELECT`, or `UPDATE` command. + +
+ +default_optimization_modes access_method_hints specifying_a_join_order joining_relations_hints global_hints using_the_append_optimizer_hint parallelism_hints conflicting_hints + +
diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/index.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/index.mdx new file mode 100644 index 00000000000..a16f5829369 --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/05_optimizer_hints/index.mdx @@ -0,0 +1,32 @@ +--- +title: "Using optimizer hints" +description: "Describes how to generate and use optimizer hints" +indexCards: simple +navigation: +- about_optimizer_hints +- 01_default_optimization_modes +- 02_access_method_hints +- 03_specifying_a_join_order +- 04_joining_relations_hints +- 05_global_hints +- 06_using_the_append_optimizer_hint +- 07_parallelism_hints +- 08_conflicting_hints +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.037.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.120.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/05_optimizer_hints/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +When you invoke a `DELETE`, `INSERT`, `SELECT`, or `UPDATE` command, the server generates a set of execution plans. After analyzing those execution plans, the server selects a plan that generally returns the result set in the least amount of time. The server's choice of plan depends on several factors: + +- The estimated execution cost of data handling operations +- Parameter values assigned to parameters in the `Query Tuning` section of the `postgresql.conf` file +- Column statistics that were gathered by the ANALYZE command + +As a rule, the query planner selects the least expensive plan. You can use an *optimizer hint* to influence the server as it selects a query plan. + diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/index.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/index.mdx new file mode 100644 index 00000000000..6226b7fe76f --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/index.mdx @@ -0,0 +1,9 @@ +--- +title: "Optimizing code" +description: "Describes using EDB Postgres Advanced Server features to optimize database performance" +indexCards: simple +redirects: + - /epas/latest/application_programming/optimizing_code/ #generated for docs/epas/reorg-role-use-case-mode +--- + +EDB Postgres Advanced Server includes features designed to help application programmers address database performance problems. SQL Profiler helps you locate and optimize poorly running SQL code. You can use optimizer hints to influence the server as it selects a query plan when you invoke a DELETE, INSERT, SELECT, or UPDATE command. diff --git a/product_docs/docs/epas/17/application_programming/optimizing_code/optimizing_code.mdx b/product_docs/docs/epas/17/application_programming/optimizing_code/optimizing_code.mdx new file mode 100644 index 00000000000..de6fcf6d57a --- /dev/null +++ b/product_docs/docs/epas/17/application_programming/optimizing_code/optimizing_code.mdx @@ -0,0 +1,8 @@ +--- +title: "Optimizing inefficient SQL code" +description: "Describes the benefits of using the SQL Profiler utility to optimize code" +--- + +Inefficient SQL code is a leading cause of database performance problems. The challenge for database administrators and developers is locating and then optimizing this code in large, complex systems. + +See [SQL Profiler](../../managing_performance/03_sql_profiler/) to see how the utility can help you locate and optimize poorly running SQL code. \ No newline at end of file diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/01_setting_new_parameters.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/01_setting_new_parameters.mdx new file mode 100644 index 00000000000..58c24e2d638 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/01_setting_new_parameters.mdx @@ -0,0 +1,174 @@ +--- +title: "Setting configuration parameters" +navTitle: "Setting configuration parameters" +description: "Describes how to set the configuration parameters for EDB Postgres Advanced Server" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.09.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.009.html" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/01_setting_new_parameters/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + + + +Set each configuration parameter using a name/value pair. Parameter names aren't case sensitive. The parameter name is typically separated from its value by an optional equals sign (`=`). + +This example shows some configuration parameter settings in the `postgresql.conf` file: + +```ini +# This is a comment +log_connections = yes +log_destination = 'syslog' +search_path = '"$user", public' +shared_buffers = 128MB +``` + +## Types of parameter values + +Parameter values are specified as one of five types: + +- **Boolean** — Acceptable values are `on`, `off`, `true`, `false`, `yes`, `no`, `1`, `0`, or any unambiguous prefix of these. +- **Integer** — Number without a fractional part. +- **Floating point** — Number with an optional fractional part separated by a decimal point. +- **String** — Text value enclosed in single quotes if the value isn't a simple identifier or number, that is, the value contains special characters such as spaces or other punctuation marks. +- **Enum** — Specific set of string values. The allowed values can be found in the system view `pg_settings.enumvals`. Enum values are not case sensitive. + +Some settings specify a memory or time value. Each of these has an implicit unit, which is kilobytes, blocks (typically 8 kilobytes), milliseconds, seconds, or minutes. You can find default units by referencing the system view `pg_settings.unit`. You can specify a different unit explicitly. + +Valid memory units are: +- `kB` (kilobytes) +- `MB` (megabytes) +- `GB` (gigabytes). + +Valid time units are: +- `ms` (milliseconds) +- `s` (seconds) +- `min` (minutes) +- `h` (hours) +- `d` (days). + +The multiplier for memory units is 1024. + +## Specifying configuration parameter settings + +A number of parameter settings are set when the EDB Postgres Advanced Server database product is built. These are read-only parameters, and you can't change their values. A couple of parameters are also permanently set for each database when the database is created. These parameters are read-only and you can't later change them for the database. However, there are a number of ways to specify the configuration parameter settings: + +- The initial settings for almost all configurable parameters across the entire database cluster are listed in the `postgresql.conf` configuration file. These settings are put into effect upon database server start or restart. You can override some of these initial parameter settings. All configuration parameters have built-in default settings that are in effect unless you explicitly override them. + +- Configuration parameters in the `postgresql.conf` file are overridden when the same parameters are included in the `postgresql.auto.conf` file. Use the `ALTER SYSTEM` command to manage the configuration parameters in the `postgresql.auto.conf` file. + +- You can modify parameter settings in the configuration file while the database server is running. If the configuration file is then reloaded (meaning a SIGHUP signal is issued), for certain parameter types, the changed parameters settings immediately take effect. For some of these parameter types, the new settings are available in a currently running session immediately after the reload. For others, you must start a new session to use the new settings. And for some others, modified settings don't take effect until the database server is stopped and restarted. See the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/config-setting.html) for information on how to reload the configuration file. + +- You can use the SQL commands `ALTER DATABASE`, `ALTER ROLE`, or `ALTER ROLE IN DATABASE` to modify certain parameter settings. The modified parameter settings take effect for new sessions after you execute the command. `ALTER DATABASE` affects new sessions connecting to the specified database. `ALTER ROLE` affects new sessions started by the specified role. `ALTER ROLE IN DATABASE` affects new sessions started by the specified role connecting to the specified database. Parameter settings established by these SQL commands remain in effect indefinitely, across database server restarts, overriding settings established by the other methods. You can change parameter settings established using the `ALTER DATABASE`, `ALTER ROLE`, or `ALTER ROLE IN DATABASE` commands by either: + + - Reissuing these commands with a different parameter value. + + - Issuing these commands using the `SET parameter TO DEFAULT` clause or the `RESET parameter` clause. These clauses change the parameter back to using the setting set by the other methods. See the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/sql-commands.html) for the syntax of these SQL commands. + +- You can make changes for certain parameter settings for the duration of individual sessions using the `PGOPTIONS` environment variable or by using the `SET` command in the EDB-PSQL or PSQL command-line programs. Parameter settings made this way override settings established using any of the methods discussed earlier, but only during that session. + +## Modifying the postgresql.conf file + +The configuration parameters in the `postgresql.conf` file specify server behavior with regard to auditing, authentication, encryption, and other behaviors. On Linux and Windows hosts, the `postgresql.conf` file resides in the `data` directory under your EDB Postgres Advanced Server installation. + +Parameters that are preceded by a pound sign (#) are set to their default value. To change a parameter value, remove the pound sign and enter a new value. After setting or changing a parameter, you must either `reload` or `restart` the server for the new parameter value to take effect. + +In the `postgresql.conf` file, some parameters contain comments that indicate `change requires restart`. To view a list of the parameters that require a server restart, use the following query at the psql command line: + +```sql +SELECT name FROM pg_settings WHERE context = 'postmaster'; +``` + + + +## Modifying the pg_hba.conf file + +Appropriate authentication methods provide protection and security. Entries in the `pg_hba.conf` file specify the authentication methods that the server uses with connecting clients. Before connecting to the server, you might need to modify the authentication properties specified in the `pg_hba.conf` file. + +When you invoke the initdb utility to create a cluster, the utility creates a `pg_hba.conf` file for that cluster that specifies the type of authentication required from connecting clients. You can modify this file. After modifying the authentication settings in the `pg_hba.conf` file, restart the server and apply the changes. For more information about authentication and modifying the `pg_hba.conf` file, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html). + +When the server receives a connection request, it verifies the credentials provided against the authentication settings in the `pg_hba.conf` file before allowing a connection to a database. To log the `pg_hba.conf` file entry to authenticate a connection to the server, set the `log_connections` parameter to `ON` in the `postgresql.conf` file. + +A record specifies a connection type, database name, user name, client IP address, and the authentication method to authorize a connection upon matching these parameters in the `pg_hba.conf` file. Once the connection to a server is authorized, you can see the matched line number and the authentication record from the `pg_hba.conf` file. + +This example shows a log detail for a valid `pg_hba.conf` entry after successful authentication: + +```shell +2020-05-08 10:42:17 IST LOG: connection received: host=[local] +2020-05-08 10:42:17 IST LOG: connection authorized: user=u1 database=edb +application_name=psql +2020-05-08 10:42:17 IST DETAIL: Connection matched pg_hba.conf line 84: +"local all all md5" +``` + +## Obfuscating the LDAP password + +When using [LDAP](https://www.postgresql.org/docs/15/auth-ldap.html) for authentication, the LDAP password used to connect to the LDAP server (the ldapbindpasswd password) is stored in the `pg_hba.conf` file. You can store the password there in an obfuscated form, which can then be de-obfuscated by a loadable module that you supply. The loadable module supplies a hook function that performs the de-obfuscation. + +For example, this C-loadable module uses `rot13_passphrase` as the hook function to de-obfuscate the password from the `pg_hba.conf` file: + + ```c + #include "postgres.h" + + #include + #include + + #include "libpq/libpq.h" + #include "libpq/libpq-be.h" + #include "libpq/auth.h" + #include "utils/guc.h" + + PG_MODULE_MAGIC; + + void _PG_init(void); + void _PG_fini(void); + + /* hook function */ + static char* rot13_passphrase(char *password); + + /* + * Module load callback + */ + void + _PG_init(void) + { + ldap_password_hook = rot13_passphrase; + } + + void + _PG_fini(void) + { + /* do nothing yet */ + } + + static char* + rot13_passphrase(char *pw) + { + size_t size = strlen(pw) + 1; + + char* new_pw = (char*) palloc(size); + strlcpy(new_pw, pw, size); + for (char *p = new_pw; *p; p++) + { + char c = *p; + + if ((c >= 'a' && c <= 'm') || (c >= 'A' && c <= 'M')) + *p = c + 13; + else if ((c >= 'n' && c <= 'z') || (c >= 'N' && c <= 'Z')) + *p = c - 13; + } + + return new_pw; + } + ``` + +Add your module to the `shared_preload_libraries` parameter in the `postgresql.conf` file. For example: + + ```ini + shared_preload_libraries = '$libdir/ldap_password_func' + ``` + +Restart your server to load the changes in this parameter. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/01_shared_buffers.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/01_shared_buffers.mdx new file mode 100644 index 00000000000..6733196bc28 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/01_shared_buffers.mdx @@ -0,0 +1,27 @@ +--- +title: "shared_buffers" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/01_shared_buffers/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 32MB + +**Range:** 128kB to system dependent + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Sets the amount of memory the database server uses for shared memory buffers. The default is typically 32MB but might be less if your kernel settings don't support it, as determined during `initdb`. This setting must be at least 128kB. (Nondefault values of `BLCKSZ` change the minimum.) However, you usually need settings significantly higher than the minimum for good performance. + +If you have a dedicated database server with 1GB or more of RAM, a reasonable starting value for `shared_buffers` is 25% of the memory in your system. For some workloads, even large settings for `shared_buffers` are effective. However, because EDB Postgres Advanced Server also relies on the operating system cache, allocating more than 40% of RAM to `shared_buffers` isn't likely to work better than a smaller amount. + +On systems with less than 1GB of RAM, a smaller percentage of RAM is appropriate to leave space for the operating system. Fifteen percent of memory is more typical in these situations. Also, on Windows, large values for `shared_buffers` aren't as effective. You might have better results keeping the setting relatively low and using the operating system cache more instead. The useful range for `shared_buffers` on Windows systems is generally from 64MB to 512MB. + +Increasing this parameter might cause EDB Postgres Advanced Server to request more System V shared memory than your operating system's default configuration allows. See [Shared Memory and Semaphores](https://www.postgresql.org/docs/15/kernel-resources.html#SYSVIPC) in the PostgreSQL core documentation for information on how to adjust those parameters. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/02_work_mem.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/02_work_mem.mdx new file mode 100644 index 00000000000..2a70aead93e --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/02_work_mem.mdx @@ -0,0 +1,27 @@ +--- +title: "work_mem" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/02_work_mem/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 1MB + +**Range:** 64kB to 2097151kB + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Specifies the amount of memory for internal sort operations and hash tables to use before writing to temporary disk files. + +For a complex query, several sort or hash operations might run in parallel. Each operation is allowed to use as much memory as this value specifies before it starts to write data into temporary files. Also, several running sessions might perform such operations concurrently. Therefore, the total memory used might be many times the value of `work_mem`. Keep this information in mind when choosing the value. + +Sort operations are used for `ORDER BY`, `DISTINCT`, and merge joins. Hash tables are used in hash joins, hash-based aggregation, and hash-based processing of `IN` subqueries. + +Reasonable values are typically between 4MB and 64MB, depending on the size of your machine, how many concurrent connections you expect (determined by `max_connections`), and the complexity of your queries. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/03_maintenance_work_mem.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/03_maintenance_work_mem.mdx new file mode 100644 index 00000000000..27c4f4c2001 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/03_maintenance_work_mem.mdx @@ -0,0 +1,25 @@ +--- +title: "maintenance_work_mem" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/03_maintenance_work_mem/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 16MB + +**Range:** 1024kB to 2097151kB + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Specifies the maximum amount of memory used by maintenance operations such as `VACUUM`, `CREATE INDEX`, and `ALTER TABLE ADD FOREIGN KEY`. It defaults to 16MB. Since a database session can execute only one of these operations at a time, and an installation normally doesn't have many of them running concurrently, it's safe to set this value significantly larger than `work_mem`. Larger settings might improve performance for vacuuming and for restoring database dumps. + +When autovacuum runs, you can allocate up to `autovacuum_max_workers` times, so be careful not to set the default value too high. + +A good rule of thumb is to set this to about 5% of system memory but not more than about 512MB. Larger values don't necessarily improve performance. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/04_wal_buffers.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/04_wal_buffers.mdx new file mode 100644 index 00000000000..2a8693952de --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/04_wal_buffers.mdx @@ -0,0 +1,25 @@ +--- +title: "wal_buffers" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/04_wal_buffers/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 64kB + +**Range:** 32kB to system dependent + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +The amount of memory used in shared memory for WAL data. Because the data is written out to disk at every transaction commit, the setting must be large enough only to hold the amount of WAL data generated by one typical transaction. + +Increasing this parameter might cause EDB Postgres Advanced Server to request more System V shared memory than your operating system's default configuration allows. See [Shared Memory and Semaphores](https://www.postgresql.org/docs/15/kernel-resources.html#SYSVIPC) in the PostgreSQL core documentation for information on how to adjust those parameters. + +Although even this very small setting doesn't always cause a problem, in some situations it can result in extra `fsync` calls and degrade overall system throughput. Increasing this value to about 1MB can address this problem. On very busy systems, you might need an even higher value, up to a maximum of about 16MB. Like `shared_buffers`, this parameter increases EDB Postgres Advanced Server’s initial shared memory allocation. If increasing it causes an EDB Postgres Advanced Server start failure, increase the operating system limit. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/05_checkpoint_segments.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/05_checkpoint_segments.mdx new file mode 100644 index 00000000000..30e200d712d --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/05_checkpoint_segments.mdx @@ -0,0 +1,9 @@ +--- +title: "checkpoint_segments" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/05_checkpoint_segments/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Deprecated. This parameter isn't supported by EDB Postgres Advanced Server. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/06_checkpoint_completion_target.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/06_checkpoint_completion_target.mdx new file mode 100644 index 00000000000..68776eb7f8e --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/06_checkpoint_completion_target.mdx @@ -0,0 +1,23 @@ +--- +title: "checkpoint_completion_target" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/06_checkpoint_completion_target/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Floating point + +**Default value:** 0.5 + +**Range:** 0 to 1 + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the target of checkpoint completion as a fraction of total time between checkpoints. This parameter spreads out the checkpoint writes while the system starts working toward the next checkpoint. + +The default of 0.5 aims to finish the checkpoint writes when 50% of the next checkpoint is ready. A value of 0.9 aims to finish the checkpoint writes when 90% of the next checkpoint is done. This value throttles the checkpoint writes over a larger amount of time and avoids spikes of performance bottlenecking. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/07_checkpoint_timeout.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/07_checkpoint_timeout.mdx new file mode 100644 index 00000000000..e89eacbb8cd --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/07_checkpoint_timeout.mdx @@ -0,0 +1,25 @@ +--- +title: "checkpoint_timeout" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/07_checkpoint_timeout/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 5min + +**Range:** 30s to 3600s + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Maximum time between automatic WAL checkpoints, in seconds. Increasing this parameter can increase the amount of time needed for crash recovery. + +Increasing `checkpoint_timeout` to a larger value, such as 15 minutes, can reduce the I/O load on your system, especially when using large values for `shared_buffers`. + +The downside of making these adjustments to the checkpoint parameters is that your system uses a modest amount of additional disk space and takes longer to recover in the event of a crash. However, for most users, this is worth it for a significant performance improvement. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/08_max_wal_size.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/08_max_wal_size.mdx new file mode 100644 index 00000000000..5f658625064 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/08_max_wal_size.mdx @@ -0,0 +1,23 @@ +--- +title: "max_wal_size" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/08_max_wal_size/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 1 GB + +**Range:** 2 to 2147483647 + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +`max_wal_size` specifies the maximum size for the WAL to reach between automatic WAL checkpoints. This is a soft limit. WAL size can exceed `max_wal_size` under special circumstances, such as when under a heavy load, with a failing archive_command, or with a high `wal_keep_segments` setting. + +Increasing this parameter can increase the amount of time needed for crash recovery. You can set this parameter in the `postgresql.conf` file or on the server command line. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/09_min_wal_size.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/09_min_wal_size.mdx new file mode 100644 index 00000000000..74bd9913365 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/09_min_wal_size.mdx @@ -0,0 +1,21 @@ +--- +title: "min_wal_size" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/09_min_wal_size/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 80 MB + +**Range:** 2 to 2147483647 + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +If WAL disk usage stays below the value specified by `min_wal_size`, old WAL files are recycled for future use at a checkpoint rather than removed. This feature ensures that enough WAL space is reserved to handle spikes in WAL usage, like when running large batch jobs. You can set this parameter in the `postgresql.conf` file or on the server command line. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/10_bgwriter_delay.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/10_bgwriter_delay.mdx new file mode 100644 index 00000000000..498f32a09f5 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/10_bgwriter_delay.mdx @@ -0,0 +1,25 @@ +--- +title: "bgwriter_delay" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/10_bgwriter_delay/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 200ms + +**Range:** 10ms to 10000ms + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the delay between activity rounds for the background writer. In each round, the writer issues writes for some number of dirty buffers. (You can control the number of dirty buffers using the `bgwriter_lru_maxpages` and `bgwriter_lru_multiplier` parameters.) It then sleeps for `bgwriter_delay` milliseconds, and repeats. + +On many systems, the effective resolution of sleep delays is 10ms. Setting `bgwriter_delay` to a value that isn't a multiple of 10 might have the same results as setting it to the next higher multiple of 10. + +Typically, when tuning `bgwriter_delay`, you reduce it from its default value. This parameter is rarely increased. Saving on power consumption on a system with very low use is one case when you might increase the value. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/11_seq_page_cost.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/11_seq_page_cost.mdx new file mode 100644 index 00000000000..c51fc30eee4 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/11_seq_page_cost.mdx @@ -0,0 +1,23 @@ +--- +title: "seq_page_cost" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/11_seq_page_cost/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Floating point + +**Default value:** 1 + +**Range:** 0 to 1.79769e+308 + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Sets the planner's estimate of the cost of a disk page fetch that's part of a series of sequential fetches. You can override this value for a particular tablespace by setting the tablespace parameter of the same name. See [`ALTER TABLESPACE`](https://www.postgresql.org/docs/current/sql-altertablespace.html) in the PostgreSQL core documentation. + +The default value assumes very little caching, so it's often a good idea to reduce it. Even if your database is significantly larger than physical memory, you might want to try setting this parameter to something lower than the default value of 1 to see if you get better query plans that way. If your database fits entirely in memory, you can lower this value much more, for example, to 0.1. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/12_random_page_cost.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/12_random_page_cost.mdx new file mode 100644 index 00000000000..446f5cfd14f --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/12_random_page_cost.mdx @@ -0,0 +1,27 @@ +--- +title: "random_page_cost" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/12_random_page_cost/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Floating point + +**Default value:** 4 + +**Range:** 0 to 1.79769e+308 + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Sets the planner's estimate of the cost of a nonsequentially fetched disk page. You can override the default for a particular tablespace by setting the tablespace parameter of the same name. See [`ALTER TABLESPACE`](https://www.postgresql.org/docs/current/sql-altertablespace.html) in the PostgreSQL core documentation. + +Reducing this value relative to `seq_page_cost` causes the system to prefer index scans. Raising it makes index scans look relatively more expensive. You can raise or lower both values together to change the importance of disk I/O costs relative to CPU costs, which are described by the `cpu_tuple_cost` and `cpu_index_tuple_cost` parameters. + +The default value assumes very little caching, so it's often a good idea to reduce it. Even if your database is significantly larger than physical memory, you might want to try setting this parameter to 2 (that is, lower than the default) to see whether you get better query plans that way. If your database fits entirely in memory, you can lower this value much more, for example, to 0.1. + +Although the system allows it, never set `random_page_cost` less than `seq_page_cost`. However, setting them equal or very close to equal makes sense if the database fits mostly or entirely in memory, since in that case there's no penalty for touching pages out of sequence. Also, in a heavily cached database, lower both values relative to the CPU parameters, since the cost of fetching a page already in RAM is much smaller than it normally is. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/13_effective_cache_size.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/13_effective_cache_size.mdx new file mode 100644 index 00000000000..a0ce1ece3ae --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/13_effective_cache_size.mdx @@ -0,0 +1,27 @@ +--- +title: "effective_cache_size" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/13_effective_cache_size/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 128MB + +**Range:** 8kB to 17179869176kB + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Sets the planner's assumption about the effective size of the disk cache that's available to a single query. This assumption is factored into estimates of the cost of using an index. A higher value makes it more likely index scans are used. A lower value makes it more likely sequential scans are used. + +When setting this parameter, consider both EDB Postgres Advanced Server’s shared buffers and the portion of the kernel's disk cache that are used for EDB Postgres Advanced Server data files. Also, consider the expected number of concurrent queries on different tables, since they have to share the available space. + +This parameter doesn't affect the size of shared memory allocated by EDB Postgres Advanced Server, and it doesn't reserve kernel disk cache. Use it only for estimating. + +If this parameter is set too low, the planner might decide not to use an index even when it's helpful to do so. Setting `effective_cache_size` to 50% of physical memory is a normal, conservative setting. A more aggressive setting is approximately 75% of physical memory. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/14_synchronous_commit.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/14_synchronous_commit.mdx new file mode 100644 index 00000000000..52ff714212c --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/14_synchronous_commit.mdx @@ -0,0 +1,27 @@ +--- +title: "synchronous_commit" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/14_synchronous_commit/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Boolean + +**Default value:** `true` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Specifies whether a transaction commit waits for WAL records to be written to disk before the command returns a `success` indication to the client. The safe setting is on, which is the default. When off, there can be a delay between when success is reported to the client and when the transaction is really guaranteed to be safe against a server crash. (The maximum delay is three times `wal_writer_delay`.) + +Unlike `fsync`, setting this parameter to off does not create any risk of database inconsistency. An operating system or database crash might result in some recent "allegedly committed" transactions being lost. However, the database state is the same as if those transactions aborted cleanly. + +So, turning `synchronous_commit` off can be a useful alternative when performance is more important than exact certainty about the durability of a transaction. See [`Asynchronous Commit`](https://www.postgresql.org/docs/current/wal-async-commit.html) in the PostgreSQL core documentation for information. + +You can change this parameter at any time. The behavior for any one transaction is determined by the setting in effect when it commits. It's therefore possible and useful to have some transactions commit synchronously and others asynchronously. For example, to make a single multistatement transaction commit asynchronously when the default is the opposite, issue `SET LOCAL synchronous_commit TO OFF` in the transaction. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/15_edb_max_spins_per_delay.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/15_edb_max_spins_per_delay.mdx new file mode 100644 index 00000000000..c8c9087e5ef --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/15_edb_max_spins_per_delay.mdx @@ -0,0 +1,23 @@ +--- +title: "edb_max_spins_per_delay" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/15_edb_max_spins_per_delay/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 1000 + +**Range:** 10 to 1000 + +**Minimum scope of effect:** Per cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the maximum number of times that a session spins while waiting for a spin lock. If a lock isn't acquired, the session sleeps. + +This parameter can be useful for systems that use non-uniform memory access (NUMA) architecture. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/16_pg_prewarm.autoprewarm.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/16_pg_prewarm.autoprewarm.mdx new file mode 100644 index 00000000000..e247ff7d8ba --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/16_pg_prewarm.autoprewarm.mdx @@ -0,0 +1,39 @@ +--- +title: "pg_prewarm.autoprewarm" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/16_pg_prewarm.autoprewarm/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Boolean + +**Default value:** `true` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Controls whether the database server runs `autoprewarm`, a background worker process that dumps shared buffers to disk before a shutdown. It then *prewarms* the shared buffers the next time the server is started, meaning it loads blocks from the disk back into the buffer pool. + +The advantage to this parameter is that it shortens the warmup times after the server restarts by loading the data that was dumped to disk before shutdown. + +Set `pg_prewarm.autoprewarm` to on to enable the `autoprewarm` worker. Set it to off to disable `autoprewarm`. + +Before you can use `autoprewarm`, you must add `$libdir/pg_prewarm` to the libraries listed in the `shared_preload_libraries` configuration parameter of the `postgresql.conf` file, as this example shows: + +```ini +shared_preload_libraries = '$libdir/dbms_pipe,$libdir/edb_gen,$libdir/dbms_aq,$libdir/pg_prewarm' +``` + +After modifying the `shared_preload_libraries` parameter, restart the database server. After the restart, the `autoprewarm` background worker launches immediately after the server reaches a consistent state. + +The `autoprewarm` process starts loading blocks that were previously recorded in `$PGDATA/autoprewarm.blocks` until no free buffer space is left in the buffer pool. In this manner, any new blocks that were loaded either by the recovery process or by the querying clients aren't replaced. + +Once the `autoprewarm` process finishes loading buffers from disk, it periodically dumps shared buffers to disk at the interval specified by the `pg_prewarm.autoprewarm_interval` parameter. At the next server restart, the `autoprewarm` process prewarms shared buffers with the blocks that were last dumped to disk. + +See [pg_prewarm.autoprewarm_interval](17_pg_prewarm.autoprewarm_interval/#pg_prewarm_autoprewarm_interval) for information on the `autoprewarm` background worker. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/17_pg_prewarm.autoprewarm_interval.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/17_pg_prewarm.autoprewarm_interval.mdx new file mode 100644 index 00000000000..9245ed2aba2 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/17_pg_prewarm.autoprewarm_interval.mdx @@ -0,0 +1,23 @@ +--- +title: "pg_prewarm.autoprewarm_interval" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/17_pg_prewarm.autoprewarm_interval/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 300s + +**Range:** 0s to 2147483s + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +The minimum number of seconds after which the `autoprewarm` background worker dumps shared buffers to disk. If set to 0, shared buffers aren't dumped at regular intervals. They're dumped only when you shut down the server. + +See the [pg_prewarm.autoprewarm](16_pg_prewarm.autoprewarm/#pg_prewarm_autoprewarm) for information on the `autoprewarm` background worker. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/index.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/index.mdx new file mode 100644 index 00000000000..311fcc1738d --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Top performance-related parameters" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.12.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.012.html" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/01_top_performance_related_parameters/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters have the most immediate impact on performance. + +
+ +shared_buffers work_mem maintenance_work_mem wal_buffers checkpoint_segments checkpoint_completion_target checkpoint_timeout max_wal_size min_wal_size bgwriter_delay seq_page_cost random_page_cost effective_cache_size synchronous_commit edb_max_spins_per_delay pg_prewarm.autoprewarm pg_prewarm.autoprewarm_interval + +
diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/02_resource_usage_memory.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/02_resource_usage_memory.mdx new file mode 100644 index 00000000000..a9ae8c4e873 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/02_resource_usage_memory.mdx @@ -0,0 +1,63 @@ +--- +title: "Resource usage/memory" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.13.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.013.html" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/02_resource_usage_memory/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters control resource use pertaining to memory. + +## edb_dynatune + +**Parameter type:** Integer + +**Default value:** 0 + +**Range:** 0 to 100 + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Determines how much of the host system’s resources for the database server to use based on the host machine’s total available resources and the intended use of the host machine. + +When you first install EDB Postgres Advanced Server, you set `edb_dynatune` according to the use of the host machine on which it was installed, that is, development machine, mixed-use machine, or dedicated server. For most purposes, the database administrator doesn't need to adjust the various configuration parameters in the `postgresql.conf` file to improve performance. + +You can set the `edb_dynatune` parameter to any integer value from 0 to 100. A value of 0 turns off the dynamic tuning feature, which leaves the database server resource use under the control of the other configuration parameters in the `postgresql.conf` file. + +A low, non-zero value, for example, 1 to 33, dedicates the least amount of the host machine’s resources to the database server. These values are suitable for a development machine where many other applications are being used. + +A value in the range of 34 to 66 dedicates a moderate amount of resources to the database server. This setting might be used for a dedicated application server that has a fixed number of other applications running on the same machine as EDB Postgres Advanced Server. + +The highest values of 67 to 100 dedicate most of the server’s resources to the database server. Use settings in this range for a host machine that's dedicated to running EDB Postgres Advanced Server. + +After you select a value for `edb_dynatune`, you can further fine-tune database server performance by adjusting the other configuration parameters in the `postgresql.conf` file. Any adjusted setting overrides the corresponding value chosen by `edb_dynatune`. To change the value of a parameter, uncomment the configuration parameter, specify the desired value, and restart the database server. + +## edb_dynatune_profile + +**Parameter type:** Enum + +**Default value:** `oltp` + +**Range:** `{oltp | reporting | mixed}` + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Controls tuning aspects based on the expected workload profile on the database server. + +The following are the possible values: + +- `oltp`. Recommended when the database server is processing heavy online transaction processing workloads. +- `reporting`. Recommended for database servers used for heavy data reporting. +- `mixed`. Recommended for servers that provide a mix of transaction processing and data reporting. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/03_resource_usage_edb_resource_manager.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/03_resource_usage_edb_resource_manager.mdx new file mode 100644 index 00000000000..d1e94d96eca --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/03_resource_usage_edb_resource_manager.mdx @@ -0,0 +1,45 @@ +--- +title: "Resource usage/EDB Resource Manager" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/03_resource_usage_edb_resource_manager/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters control resource use through [EDB Resource Manager](../../10_edb_resource_manager/). + +## edb_max_resource_groups + +**Parameter type:** Integer + +**Default value:** 16 + +**Range:** 0 to 65536 + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Controls the maximum number of resource groups that EDB Resource Manager can use simultaneously. You can create more resource groups than the value specified by `edb_max_resource_groups`. However, the number of resource groups in active use by processes in these groups can't exceed this value. + +Set this parameter large enough to handle the number of groups you expect to maintain. + +## edb_resource_group + +**Parameter type:** String + +**Default value:** none + +**Range:** n/a + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +The name of the resource group for EDB Resource Manager to control in the current session according to the group’s resource type settings. + +If you don't set this parameter, then the current session doesn't use EDB Resource Manager. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/04_query_tuning.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/04_query_tuning.mdx new file mode 100644 index 00000000000..01fbb30ae05 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/04_query_tuning.mdx @@ -0,0 +1,29 @@ +--- +title: "Query tuning" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.14.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.014.html" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/04_query_tuning/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters are used for [optimizer hints](../../../application_programming/optimizing_code/05_optimizer_hints/). + +## enable_hints + +**Parameter type:** Boolean + +**Default value:** `true` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Enables optimizer hints embedded in SQL commands. Optimizer hints are ignored when this parameter is off. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/05_query_tuning_planner_method_configuration.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/05_query_tuning_planner_method_configuration.mdx new file mode 100644 index 00000000000..7a3025e2fb1 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/05_query_tuning_planner_method_configuration.mdx @@ -0,0 +1,33 @@ +--- +title: "Query tuning/planner method configuration" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/05_query_tuning_planner_method_configuration/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters are used for planner method configuration. + +## edb_enable_pruning + +**Parameter type:** Boolean + +**Default value:** `false` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Allows the query planner to *early prune* partitioned tables. Early pruning means that the query planner can "prune" (that is, ignore) partitions that aren't searched in a query before generating query plans. This setting helps improve performance because it prevents generating query plans of partitions that aren't searched. + +Conversely, *late pruning* means that the query planner prunes partitions after generating query plans for each partition. The `constraint_exclusion` configuration parameter controls late pruning. + +The ability to early prune depends on the nature of the query in the `WHERE` clause. You can use early pruning in only simple queries with constraints like `WHERE column = literal`, for example, `WHERE deptno = 10`. + +Don't use early pruning for more complex queries such as `WHERE column = expression`, for example, `WHERE deptno = 10 + 5`. + +This parameter is deprecated from version 15 and later. Use `enable_partition_pruning` instead. \ No newline at end of file diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/06_reporting_and_logging_what_to_log.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/06_reporting_and_logging_what_to_log.mdx new file mode 100644 index 00000000000..1923e86e2a1 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/06_reporting_and_logging_what_to_log.mdx @@ -0,0 +1,75 @@ +--- +title: "Reporting and logging/what to log" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.15.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.015.html" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/06_reporting_and_logging_what_to_log/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters control reporting and logging. + +## trace_hints + +**Parameter type:** Boolean + +**Default value:** `false` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required Aauthorization to activate:** Session user + +Use with the optimizer hints feature to provide more detailed information about whether the planner used a hint. Set the `client_min_messages` and `trace_hints` configuration parameters as follows: + +```sql +SET client_min_messages TO info; +SET trace_hints TO true; +``` + +This example shows how the `SELECT` command with the `NO_INDEX` hint displays the added information produced when you set those configuration parameters: + +```sql +EXPLAIN SELECT /*+ NO_INDEX(accounts accounts_pkey) */ * FROM accounts +WHERE aid = 100; + +INFO: [HINTS] Index Scan of [accounts].[accounts_pkey] rejected because +of NO_INDEX hint. + +INFO: [HINTS] Bitmap Heap Scan of [accounts].[accounts_pkey] rejected +because of NO_INDEX hint. +__OUTPUT__ + QUERY PLAN + +------------------------------------------------------------- + +Seq Scan on accounts (cost=0.00..14461.10 rows=1 width=97) + Filter: (aid = 100) +(2 rows) +``` + +## edb_log_every_bulk_value + + + +**Parameter type:** Boolean + +**Default value:** `false` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Superuser + +Bulk processing logs the resulting statements into both the EDB Postgres Advanced Server log file and the EDB Audit log file. However, logging every statement in bulk processing is costly. You can control the bulk processing statements that are logged with the `edb_log_every_bulk_value` configuration parameter. + +When this parameter is set to `true`, every statement in bulk processing is logged. During bulk execution, when `edb_log_every_bulk_value` is set to `false`, a log message is recorded once per bulk processing along with the number of rows processed. The duration is emitted once per bulk processing. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/01_edb_audit.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/01_edb_audit.mdx new file mode 100644 index 00000000000..b0ddee5a52f --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/01_edb_audit.mdx @@ -0,0 +1,21 @@ +--- +title: "edb_audit" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/01_edb_audit/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Enum + +**Default value:** `none` + +**Range:** `{none | csv | xml}` + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Enables or disables database auditing. The values `xml` or `csv` enable database auditing. These values determine the file format in which to capture auditing information. `none` disables database auditing. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/02_edb_audit_directory.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/02_edb_audit_directory.mdx new file mode 100644 index 00000000000..5b31cb78ab7 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/02_edb_audit_directory.mdx @@ -0,0 +1,21 @@ +--- +title: "edb_audit_directory" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/02_edb_audit_directory/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** String + +**Default value:** `edb_audit` + +**Range:** n/a + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the directory where the audit log files are created. Specify either an absolute path or a path relative to the EDB Postgres Advanced Server `data` directory. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/03_edb_audit_filename.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/03_edb_audit_filename.mdx new file mode 100644 index 00000000000..9cf103d1a14 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/03_edb_audit_filename.mdx @@ -0,0 +1,21 @@ +--- +title: "edb_audit_filename" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/03_edb_audit_filename/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** String + +**Default value:** `audit-%Y%m%d_%H%M%S` + +**Range:** n/a + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the file name of the audit file where the auditing information is stored. The default file name is `audit-%Y%m%d_%H%M%S`. The escape sequences, such as `%Y` and `%m`, are replaced by the appropriate current values of the system date and time. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/04_edb_audit_rotation_day.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/04_edb_audit_rotation_day.mdx new file mode 100644 index 00000000000..aacc42c780c --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/04_edb_audit_rotation_day.mdx @@ -0,0 +1,21 @@ +--- +title: "edb_audit_rotation_day" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/04_edb_audit_rotation_day/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** String + +**Default value:** `every` + +**Range:** `{none | every | sun | mon | tue | wed | thu | fri | sat} ...` + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the day of the week on which to rotate the audit files. Valid values are `sun`, `mon`, `tue`, `wed`, `thu`, `fri`, `sat`, `every`, and `none`. To disable rotation, set the value to `none`. To rotate the file every day, set the `edb_audit_rotation_day` value to `every`. To rotate the file on a specific day of the week, set the value to the desired day of the week. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/05_edb_audit_rotation_size.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/05_edb_audit_rotation_size.mdx new file mode 100644 index 00000000000..3a8804ade0b --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/05_edb_audit_rotation_size.mdx @@ -0,0 +1,21 @@ +--- +title: "edb_audit_rotation_size" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/05_edb_audit_rotation_size/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 0MB + +**Range:** 0MB to 5000MB + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies a file size threshold in megabytes when file rotation occurs. If the parameter is commented out or set to 0, rotating the file based on size doesn't occur. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/06_edb_audit_rotation_seconds.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/06_edb_audit_rotation_seconds.mdx new file mode 100644 index 00000000000..72941062da8 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/06_edb_audit_rotation_seconds.mdx @@ -0,0 +1,21 @@ +--- +title: "edb_audit_rotation_seconds" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/06_edb_audit_rotation_seconds/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Integer + +**Default value:** 0s + +**Range:** 0s to 2147483647s + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the rotation time in seconds when a new log file is created. To disable this feature, leave this parameter set to 0. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/07_edb_audit_connect.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/07_edb_audit_connect.mdx new file mode 100644 index 00000000000..cf795e65eb4 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/07_edb_audit_connect.mdx @@ -0,0 +1,21 @@ +--- +title: "edb_audit_connect" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/07_edb_audit_connect/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Enum + +**Default value:** `failed` + +**Range:** `{none | failed | all}` + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Enables auditing of database connection attempts by users. To disable auditing of all connection attempts, set `edb_audit_connect` to `none`. To audit all failed connection attempts, set the value to `failed`. To audit all connection attempts, set the value to `all`. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/08_edb_audit_disconnect.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/08_edb_audit_disconnect.mdx new file mode 100644 index 00000000000..a3df7658b45 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/08_edb_audit_disconnect.mdx @@ -0,0 +1,21 @@ +--- +title: "edb_audit_disconnect" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/08_edb_audit_disconnect/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Enum + +**Default value:** `none` + +**Range:** `{none | all}` + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Enables auditing of database disconnections by connected users. To enable auditing of disconnections, set the value to `all`. To disable, set the value to `none`. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/09_edb_audit_statement.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/09_edb_audit_statement.mdx new file mode 100644 index 00000000000..b225660b6fe --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/09_edb_audit_statement.mdx @@ -0,0 +1,32 @@ +--- +title: "edb_audit_statement" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/09_edb_audit_statement/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** String + +**Default value:** `ddl, error` + +**Range:** `{none | ddl | dml | insert | update | delete | truncate | select | error | create | drop | alter | grant | revoke | rollback | set | all | { select | update | delete | insert }@groupname} ...` + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies auditing of different categories of SQL statements as well as statements related to specific SQL commands. + +- To log errors, set the parameter value to `error`. +- To audit all DDL statements, such as `CREATE TABLE` and `ALTER TABLE`, set the parameter value to `ddl`. +- To audit specific types of DDL statements, the parameter values can include those specific SQL commands (`create`, `drop`, or `alter`). In addition, you can specify the object type following the command, such as `create table`, `create view`, and `drop role`. +- To audit all modification statements, such as `INSERT`, `UPDATE`, `DELETE`, or `TRUNCATE`, set `edb_audit_statement` to `dml`. +- To audit specific types of DML statements, the parameter values can include the specific SQL commands `insert`, `update`, `delete`, or `truncate`. Include parameter values `select`, `grant`, `revoke`, or `rollback` to audit statements regarding those SQL commands. +- To audit `SET` statements, include the parameter value `SET`. +- To audit every statement, set the value to `all`. +- To disable this feature, set the value to `none`. + +The per-object level auditing audits the operations permitted by object privileges, such as `SELECT`, `UPDATE`, `DELETE`, and `INSERT` statements, including `(@)` and excluding `(-)` groups on a given table. To audit a specific type of object, specify the name of the object group to audit. The `edb_audit_statement` parameter can include the specific SQL commands (`select`, `update`, `delete`, or `insert`) associated with a group name with `(@)` include and `(-)` exclude symbol. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/10_edb_audit_tag.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/10_edb_audit_tag.mdx new file mode 100644 index 00000000000..c00a6a2a6fd --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/10_edb_audit_tag.mdx @@ -0,0 +1,19 @@ +--- +title: "edb_audit_tag" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/10_edb_audit_tag/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** String + +**Default value:** none + +**Minimum scope of effect:** Session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** User + +Specifies a string value to include in the audit log when the `edb_audit` parameter is set to `csv` or `xml`. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/11_edb_audit_destination.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/11_edb_audit_destination.mdx new file mode 100644 index 00000000000..40788146cb1 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/11_edb_audit_destination.mdx @@ -0,0 +1,26 @@ +--- +title: "edb_audit_destination" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/11_edb_audit_destination/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +**Parameter type:** Enum + +**Default value:** `file` + +**Range:** `{file | syslog}` + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies whether to record the audit log information in the directory as given by the `edb_audit_directory` parameter or to the directory and file managed by the `syslog` process. Set to `file` to use the directory specified by `edb_audit_directory`. + +Set to `syslog` to use the syslog process and its location as configured in the `/etc/syslog.conf` file. The `syslog` setting is valid only for EDB Postgres Advanced Server running on a Linux host. It isn't supported on Windows systems. + +!!! Note + In recent Linux versions, `syslog` was replaced with `rsyslog`, and the configuration file is in `/etc/rsyslog.conf`. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/12_edb_log_every_bulk_value.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/12_edb_log_every_bulk_value.mdx new file mode 100644 index 00000000000..7ed0815e20e --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/12_edb_log_every_bulk_value.mdx @@ -0,0 +1,9 @@ +--- +title: "edb_log_every_bulk_value" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/12_edb_log_every_bulk_value/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +For information on `edb_log_every_bulk_value`, see [edb_log_every_bulk_value](../06_reporting_and_logging_what_to_log/#edb_log_every_bulk_value_1). diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/index.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/index.mdx new file mode 100644 index 00000000000..54678857595 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/index.mdx @@ -0,0 +1,16 @@ +--- +title: "Auditing settings" +indexCards: simple +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/07_auditing_settings/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters are for use with the EDB Postgres Advanced Server database [auditing feature](../../../../epas_security_guide/05_edb_audit_logging/). + +
+ +edb_audit edb_audit_directory edb_audit_filename edb_audit_rotation_day edb_audit_rotation_size edb_audit_rotation_seconds edb_audit_connect edb_audit_disconnect edb_audit_statement edb_audit_tag edb_audit_destination edb_log_every_bulk_value + +
diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/08_ccd_locale_and_formatting.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/08_ccd_locale_and_formatting.mdx new file mode 100644 index 00000000000..bcc1793a5f1 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/08_ccd_locale_and_formatting.mdx @@ -0,0 +1,36 @@ +--- +title: "Client connection defaults/locale and formatting" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.16.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.016.html" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/08_ccd_locale_and_formatting/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters affect locale and formatting. + +## icu_short_form + +**Parameter type:** String + +**Default value:** none + +**Range:** n/a + +**Minimum scope of effect:** Database + +**When value changes take effect:** n/a + +**Required authorization to activate:** n/a + +Contains the default ICU short-form name assigned to a database or to the EDB Postgres Advanced Server instance. See [Unicode collation algorithm](../../../tools_utilities_and_components/application_developer_tools/06_unicode_collation_algorithm) for general information about the ICU short form and the Unicode collation algorithm. + +Set this configuration parameter either when: + +- Using the `CREATE DATABASE` command with the `ICU_SHORT_FORM` parameter. In this case, set the specified short-form name. It appears in the `icu_short_form` configuration parameter when connected to this database. +- Creating an EDB Postgres Advanced Server instance with the `initdb` command used with the `--icu_short_form` option. In this case, set the specified short-form name. It appears in the `icu_short_form` configuration parameter when connected to a database in that EDB Postgres Advanced Server instance. The database doesn't override it with its own `ICU_SHORT_FORM` parameter and a different short form. + +Once you set this parameter, you can't change it. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/09_ccd_statement_behaviour.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/09_ccd_statement_behaviour.mdx new file mode 100644 index 00000000000..767ae126d61 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/09_ccd_statement_behaviour.mdx @@ -0,0 +1,57 @@ +--- +title: "Client connection defaults/statement behavior" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/09_ccd_statement_behaviour/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters affect statement behavior. + +## default_heap_fillfactor + +**Parameter type:** Integer + +**Default value:** 100 + +**Range:** 10 to 100 + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Sets the fill factor for a table when the `FILLFACTOR` storage parameter is omitted from a `CREATE TABLE` command. + +The fill factor for a table is a percentage from 10 to 100, where 100 is complete packing. When you specify a smaller fill factor, `INSERT` operations pack table pages only to the indicated percentage. The remaining space on each page is reserved for updating rows on that page. This approach gives `UPDATE` a chance to place the updated copy of a row on the same page as the original, which is more efficient than placing it on a different page. + +For a table whose entries are never updated, complete packing is the best choice. In heavily updated tables, use smaller fill factors. + +## edb_data_redaction + +**Parameter type:** Boolean + +**Default value:** `true` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Data redaction is the support of policies to limit the exposure of certain sensitive data to certain users by altering the displayed information. + +When set to `TRUE`, the data redaction is applied to all users except for superusers and the table owner: + +- Superusers and table owner bypass data redaction. +- All other users get the redaction policy applied and see the reformatted data. + +When set to `FALSE`, the following occurs: + +- Superusers and table owner still bypass data redaction. +- All other users get an error. + +For information on data redaction, see [EDB Postgres Advanced Server Security Features](../../../epas_security_guide/). diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/10_ccd_other_defaults.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/10_ccd_other_defaults.mdx new file mode 100644 index 00000000000..09265aad0ab --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/10_ccd_other_defaults.mdx @@ -0,0 +1,65 @@ +--- +title: "Client connection defaults/other defaults" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/10_ccd_other_defaults/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These parameters set miscellaneous client connection defaults. + +## oracle_home + +**Parameter type:** String + +**Default value:** none + +**Range:** n/a + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Before creating a link to an Oracle server, you must direct EDB Postgres Advanced Server to the correct Oracle home directory. Set the `LD_LIBRARY_PATH` environment variable on Linux or `PATH` on Windows to the `lib` directory of the Oracle client installation directory. + +Alternatively, you can set the value of the `oracle_home` configuration parameter in the `postgresql.conf` file. The value specified in the `oracle_home` configuration parameter overrides the `LD_LIBRARY_PATH` environment variable in Linux and `PATH` environment variable in Windows. + +!!! Note + The `oracle_home` configuration parameter must provide the correct path to the Oracle client, that is,`OCI` library. + +To set the `oracle_home` configuration parameter in the `postgresql.conf` file, add the following line: + +```text +oracle_home = '' +``` + +<lib_directory> is the name of the `oracle_home` path to the Oracle client installation directory that contains `libclntsh.so` in Linux and `oci.dll` in Windows. + +After setting the `oracle_home` configuration parameter, you must restart the server for the changes to take effect. Restart the server: + +- On Linux, using the `systemctl` command or `pg_ctl` services + +- On Windows, from the Windows Services console + + +## odbc_lib_path + +**Parameter type:** String + +**Default value:** none + +**Range:** n/a + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +If you're using an ODBC driver manager and if it's installed in a nonstandard location, specify the location by setting the `odbc_lib_path` configuration parameter in the `postgresql.conf` file: + +`odbc_lib_path = 'complete_path_to_libodbc.so'` + +The configuration file must include the complete pathname to the driver manager shared library, which is typically `libodbc.so`. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/11_compatibility_options.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/11_compatibility_options.mdx new file mode 100644 index 00000000000..4f0c69f1bdc --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/11_compatibility_options.mdx @@ -0,0 +1,375 @@ +--- +title: "Compatibility options" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.17.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.017.html" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/11_compatibility_options/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters control various database compatibility features. + +## edb_redwood_date + +**Parameter type:** Boolean + +**Default value:** `false` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Translates `DATE` to `TIMESTAMP` when `DATE` appears as the data type of a column in the commands and the table definition is stored in the database. A time component is stored in the column along with the date. + +If `edb_redwood_date` is set to `FALSE`, the column’s data type in a `CREATE TABLE` or `ALTER TABLE` command remains as a native PostgreSQL `DATE` data type and is stored as such in the database. The PostgreSQL `DATE` data type stores only the date without a time component in the column. + +Regardless of the setting of `edb_redwood_date`, when `DATE` appears as a data type in any other context, it's always internally translated to a `TIMESTAMP`. It can thus handle a time component if present. Examples of these contexts include: + +- The data type of a variable in an SPL declaration section +- The data type of a formal parameter in an SPL procedure or SPL function +- The return type of an SPL function + +## edb_redwood_greatest_least + +**Parameter type:** Boolean + +**Default value:** `true` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +The `GREATEST` function returns the parameter with the greatest value from its list of parameters. The `LEAST` function returns the parameter with the least value from its list of parameters. + +When `edb_redwood_greatest_least` is set to `TRUE`, the `GREATEST` and `LEAST` functions return null when at least one of the parameters is null. + +```sql +SET edb_redwood_greatest_least TO on; + +SELECT GREATEST(1, 2, NULL, 3); +__OUTPUT__ +greatest +---------- + +(1 row) +``` + +When `edb_redwood_greatest_least` is set to `FALSE`, null parameters are ignored except when all parameters are null. In that case, the functions return null. + +```sql +SET edb_redwood_greatest_least TO off; + +SELECT GREATEST(1, 2, NULL, 3); +__OUTPUT__ +greatest +---------- + + 3 +(1 row) +``` +```sql +SELECT GREATEST(NULL, NULL, NULL); +__OUTPUT__ +greatest +---------- + +(1 row) +``` + +## edb_redwood_raw_names + +**Parameter type:** Boolean + +**Default value:** `false` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +When `edb_redwood_raw_names` is set to `FALSE`, database object names such as table names, column names, trigger names, program names, and user names appear in uppercase letters when viewed from Redwood catalogs (that is, system catalogs prefixed by `ALL_`, `DBA_`, or `USER_`). In addition, quotation marks enclose names that were created with enclosing quotation marks. + +When `edb_redwood_raw_names` is set to `TRUE`, the database object names are displayed as they're stored in the PostgreSQL system catalogs when viewed from the Redwood catalogs. Names created without quotation marks around them appear in lower case as expected in PostgreSQL. Names created enclosed by quotation marks appear as they were created but without the quotation marks. + +For example, the following user name is created, and then a session is started with that user: + +```sql +CREATE USER reduser IDENTIFIED BY password; +edb=# \c - reduser +Password for user reduser: +You are now connected to database "edb" as user "reduser". +``` + +When connected to the database as `reduser`, the following tables are created: + +```sql +CREATE TABLE all_lower (col INTEGER); +CREATE TABLE ALL_UPPER (COL INTEGER); +CREATE TABLE "Mixed_Case" ("Col" INTEGER); +``` + +When viewed from the Redwood catalog `USER_TABLES`, with `edb_redwood_raw_names` set to the default value `FALSE`, the names appear in upper case. The exception is the `Mixed_Case` name, which appears as created and also enclosed by quotation marks. + +```sql +edb=> SELECT * FROM USER_TABLES; +__OUTPUT__ + schema_name | table_name | tablespace_name | status | temporary +--------------+--------------+------------------+---------+----------- + REDUSER | ALL_LOWER | | VALID | N + REDUSER | ALL_UPPER | | VALID | N + REDUSER | "Mixed_Case" | | VALID | N +(3 rows) +``` + +When viewed with `edb_redwood_raw_names` set to `TRUE`, the names appear in lower case except for the `Mixed_Case` name, which appears as created but without quotation marks. + +```sql +edb=> SET edb_redwood_raw_names TO true; +SET +edb=> SELECT * FROM USER_TABLES; +__OUTPUT__ + schema_name | table_name | tablespace_name | status | temporary +-------------+------------+------------------+--------+----------- +reduser | all_lower | | VALID | N +reduser | all_upper | | VALID | N +reduser | Mixed_Case | | VALID | N +(3 rows) +``` + +These names now match the case when viewed from the PostgreSQL `pg_tables` catalog: + +```sql +edb=> SELECT schemaname, tablename, tableowner FROM pg_tables WHERE +tableowner = 'reduser'; +__OUTPUT__ + schemaname | tablename | tableowner +------------+------------+------------ +reduser | all_lower | reduser +reduser | all_upper | reduser +reduser | Mixed_Case | reduser +(3 rows) +``` + +## edb_redwood_strings + +**Parameter type:** Boolean + +**Default value:** `false` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +If the `edb_redwood_strings` parameter is set to `TRUE`, when a string is concatenated with a null variable or null column, the result is the original string. If `edb_redwood_strings` is set to `FALSE`, the native PostgreSQL behavior is maintained, which is the concatenation of a string with a null variable or null column that gives a null result. + +This example shows the difference. The sample application contains a table of employees. This table has a column named `comm` that's null for most employees. The following query is run with `edb_redwood_string` set to `FALSE`. Concatenating a null column with non-empty strings produces a final result of null, so only employees that have a commission appear in the query result. The output line for all other employees is null. + +```sql +SET edb_redwood_strings TO off; + +SELECT RPAD(ename,10) || ' ' || TO_CHAR(sal,'99,999.99') || ' ' || +TO_CHAR(comm,'99,999.99') "EMPLOYEE COMPENSATION" FROM emp; +__OUTPUT__ + EMPLOYEE COMPENSATION +---------------------------------- + + ALLEN 1,600.00 300.00 + WARD 1,250.00 500.00 + + MARTIN 1,250.00 1,400.00 + + + + + TURNER 1,500.00 .00 + + +(14 rows) +``` + +The following is the same query executed when `edb_redwood_strings` is set to `TRUE`. Here, the value of a null column is treated as an empty string. Concatenating an empty string with a non-empty string produces the non-empty string. + +```sql +SET edb_redwood_strings TO on; + +SELECT RPAD(ename,10) || ' ' || TO_CHAR(sal,'99,999.99') || ' ' || +TO_CHAR(comm,'99,999.99') "EMPLOYEE COMPENSATION" FROM emp; +__OUTPUT__ + EMPLOYEE COMPENSATION + +---------------------------------- + + SMITH 800.00 + ALLEN 1,600.00 300.00 + WARD 1,250.00 500.00 + JONES 2,975.00 + MARTIN 1,250.00 1,400.00 + BLAKE 2,850.00 + CLARK 2,450.00 + SCOTT 3,000.00 + KING 5,000.00 + TURNER 1,500.00 .00 + ADAMS 1,100.00 + JAMES 950.00 + FORD 3,000.00 + MILLER 1,300.00 +(14 rows) +``` + +## edb_stmt_level_tx + +**Parameter type:** Boolean + +**Default value:** `false` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +The term *statement-level transaction isolation* describes the behavior in which a runtime error occurs in a SQL command, and all the updates on the database caused by that single command are rolled back. For example, if a single `UPDATE` command successfully updates five rows, but an attempt to update a sixth row results in an exception, the updates to all six rows made by this `UPDATE` command are rolled back. The effects of prior SQL commands that haven't yet been committed or rolled back are pending until a `COMMIT` or `ROLLBACK` command is executed. + +In EDB Postgres Advanced Server, if an exception occurs while executing a SQL command, all the updates on the database since the start of the transaction are rolled back. In addition, the transaction is left in an aborted state, and either a `COMMIT` or `ROLLBACK` command must be issued before another transaction can start. + +If `edb_stmt_level_tx` is set to `TRUE`, then an exception doesn't roll back prior uncommitted database updates. If `edb_stmt_level_tx` is set to `FALSE`, then an exception rolls back uncommitted database updates. + +!!! Note + Use `edb_stmt_level_tx` set to `TRUE` only when necessary, as it can have have a negative performance impact. + +This example, run in PSQL, shows that when `edb_stmt_level_tx` is `FALSE`, the abort of the second `INSERT` command also rolls back the first `INSERT` command. In PSQL, you must issue the command `\set AUTOCOMMIT off`. Otherwise every statement commits automatically, which doesn't show the effect of `edb_stmt_level_tx`. + +```sql +\set AUTOCOMMIT off +SET edb_stmt_level_tx TO off; + +INSERT INTO emp (empno,ename,deptno) VALUES (9001, 'JONES', 40); +INSERT INTO emp (empno,ename,deptno) VALUES (9002, 'JONES', 00); +ERROR: insert or update on table "emp" violates foreign key constraint +"emp_ref_dept_fk" +DETAIL: Key (deptno)=(0) is not present in table "dept". + +COMMIT; +SELECT empno, ename, deptno FROM emp WHERE empno > 9000; +__OUTPUT__ +empno | ename | deptno +-------+-------+-------- +(0 rows) +``` + +In this example, with `edb_stmt_level_tx` set to `TRUE`, the first `INSERT` command wasn't rolled back after the error on the second `INSERT` command. At this point, the first INSERT command can either be committed or rolled back. + +```sql +\set AUTOCOMMIT off +SET edb_stmt_level_tx TO on; + +INSERT INTO emp (empno,ename,deptno) VALUES (9001, 'JONES', 40); +INSERT INTO emp (empno,ename,deptno) VALUES (9002, 'JONES', 00); +ERROR: insert or update on table "emp" violates foreign key constraint +"emp_ref_dept_fk" +DETAIL: Key (deptno)=(0) is not present in table "dept" + +SELECT empno, ename, deptno FROM emp WHERE empno > 9000; +__OUTPUT__ +empno | ename | deptno +-------+-------+-------- + 9001 | JONES | 40 +(1 row) + +COMMIT; +``` + +If a `ROLLBACK` command is issued instead of the `COMMIT` command, the insert of employee number `9001` is rolled back as well. + +## db_dialect + +**Parameter type:** Enum + +**Default value:** `postgres` + +**Range:** `{postgres | redwood}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +In addition to the native PostgreSQL system catalog `pg_catalog`, EDB Postgres Advanced Server contains an extended catalog view. This is the `sys` catalog for the expanded catalog view. The `db_dialect` parameter controls the order in which these catalogs are searched for name resolution. + +When set to `postgres`, the namespace precedence is `pg_catalog` and then `sys`, giving the PostgreSQL catalog the highest precedence. When set to `redwood`, the namespace precedence is `sys` and then `pg_catalog`, giving the expanded catalog views the highest precedence. + +## default_with_rowids + +**Parameter type:** Boolean + +**Default value:** `false` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +When set to `on`, `CREATE TABLE` includes a `ROWID` column in newly created tables, which you can then reference in SQL commands. In earlier versions of EDB Postgres Advanced Server, `ROWIDs` were mapped to `OIDs`. With EDB Postgres Advanced Server version 12 and later, the `ROWID` is an autoincrementing value based on a sequence that starts with 1. It's assigned to each row of a table created with the `ROWIDs` option. By default, a unique index is created on a `ROWID` column. + +The `ALTER` and `DROP` operations are restricted on a `ROWID` column. + +To restore a database with `ROWIDs` with EDB Postgres Advanced Server 11 or an earlier version, you must perform the following: + +- `pg_dump`: If a table includes `OIDs` then specify `--convert-oids-to-rowids` to dump a database. Otherwise, ignore the `OIDs` to continue table creation on EDB Postgres Advanced Server version 12 and later. +- `pg_upgrade`: Errors out. But if a table includes `OIDs` or `ROWIDs`, then you must perform the following: + 1. Take a dump of the tables by specifying the `--convert-oids-to-rowids` option. + 2. Drop the tables, and then perform the upgrade. + 3. After the upgrade is successful, restore the dump into a new cluster that contains the dumped tables into a target database. + +## optimizer_mode + +**Parameter type:** Enum + +**Default value:** `choose` + +**Range:** `{choose | ALL_ROWS | FIRST_ROWS | FIRST_ROWS_10 | FIRST_ROWS_100 | FIRST_ROWS_1000}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Sets the default optimization mode for analyzing optimizer hints. + +The following table shows the possible values. + +| Hint | Description | +| ----------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `ALL_ROWS` | Optimizes for retrieval of all rows of the result set. | +| `CHOOSE` | Does no default optimization based on assumed number of rows to retrieve from the result set. This is the default. | +| `FIRST_ROWS` | Optimizes for retrieval of only the first row of the result set. | +| `FIRST_ROWS_10` | Optimizes for retrieval of the first 10 rows of the results set. | +| `FIRST_ROWS_100` | Optimizes for retrieval of the first 100 rows of the result set. | +| `FIRST_ROWS_1000` | Optimizes for retrieval of the first 1000 rows of the result set. | + +These optimization modes are based on the assumption that the client submitting the SQL command is interested in viewing only the first `n` rows of the result set and then abandons the rest of the result set. Resources allocated to the query are adjusted as such. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/12_customized_options.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/12_customized_options.mdx new file mode 100644 index 00000000000..41d34a503da --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/12_customized_options.mdx @@ -0,0 +1,392 @@ +--- +title: "Customized options" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.18.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.018.html" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/12_customized_options/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +## custom_variable_classes + +The `custom_variable_classes` parameter was deprecated in EDB Postgres Advanced Server 9.2. Parameters that previously depended on this parameter no longer require its support. In previous releases of EDB Postgres Advanced Server, `custom_variable_classes` was required by parameters not normally known to be added by add-on modules, such as procedural languages. + +## dbms_alert.max_alerts + +**Parameter type:** Integer + +**Default value:** 100 + +**Range:** 0 to 500 + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the maximum number of concurrent alerts allowed on a system using the `DBMS_ALERTS` package. + +## dbms_pipe.total_message_buffer + +**Parameter type:** Integer + +**Default value:** 30 Kb + +**Range:** 30 Kb to 256 Kb + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the total size of the buffer used for the `DBMS_PIPE` package. + +## index_advisor.enabled + +**Parameter type:** Boolean + +**Default value:** `true` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Temporarily suspends Index Advisor in an EDB-PSQL or PSQL session. To use this configuration parameter, the Index Advisor plugin `index_advisor` must be loaded in the EDB-PSQL or PSQL session. + +You can load the Index Advisor plugin as follows: + +```sql +$ psql -d edb -U enterprisedb +Password for user enterprisedb: +psql (14.0.0) +Type "help" for help. + +edb=# LOAD 'index_advisor'; +LOAD +``` + +Use the `SET` command to change the parameter setting to control whether Index Advisor generates an alternative query plan: + +```text +edb=# SET index_advisor.enabled TO off; +SET +edb=# EXPLAIN SELECT * FROM t WHERE a < 10000; +__OUTPUT__ + QUERY PLAN + +------------------------------------------------------- + Seq Scan on t (cost=0.00..1693.00 rows=9864 width=8) + Filter: (a < 10000) +(2 rows) +``` +```sql +edb=# SET index_advisor.enabled TO on; +SET +edb=# EXPLAIN SELECT * FROM t WHERE a < 10000; +__OUTPUT__ + QUERY PLAN +----------------------------------------------------------------------------- + Seq Scan on t (cost=0.00..1693.00 rows=9864 width=8) + Filter: (a < 10000) + Result (cost=0.00..327.88 rows=9864 width=8) + One-Time Filter: '===[ HYPOTHETICAL PLAN ]==='::text + -> Index Scan using ":1" on t (cost=0.00..327.88 + rows=9864 width=8) + Index Cond: (a < 10000) +(6 rows) +``` + +## edb_sql_protect.enabled + +**Parameter type:** Boolean + +**Default value:** `false` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Controls whether SQL/Protect is actively monitoring protected roles by analyzing SQL statements issued by those roles and reacting according to the setting of `edb_sql_protect.level`. When you're ready to begin monitoring with SQL/Protect, set this parameter to `on`. + +## edb_sql_protect.level + +**Parameter type:** Enum + +**Default value:** `passive` + +**Range:** `{learn | passive | active}` + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Sets the action taken by SQL/Protect when a SQL statement is issued by a protected role. + +You can set this parameter to one of the following values to use learn mode, passive mode, or active mode: + +- `learn`. Tracks the activities of protected roles and records the relations used by the roles. Use this value when initially configuring SQL/Protect so the expected behaviors of the protected applications are learned. +- `passive`. Issues warnings if protected roles are breaking the defined rules but doesn't stop any SQL statements from executing. This is the next step after SQL/Protect learns the expected behavior of the protected roles. This essentially behaves in intrusion detection mode. You can run it in production if you monitor it. +- `active`. Stops all invalid statements for a protected role. This behavior acts as a SQL firewall that prevents dangerous queries from running. This is particularly effective against early penetration testing when the attacker is trying to determine the vulnerability point and the type of database behind the application. Not only does SQL/Protect close those vulnerability points, it tracks the blocked queries. This behavior can alert administrators before the attacker finds an alternative method of penetrating the system. + +If you're using SQL/Protect for the first time, set `edb_sql_protect.level` to `learn`. + +## edb_sql_protect.max_protected_relations + +**Parameter type:** Integer + +**Default value:** 1024 + +**Range:** 1 to 2147483647 + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Sets the maximum number of relations that can be protected per role. The total number of protected relations for the server is the number of protected relations times the number of protected roles. Every protected relation consumes space in shared memory. The space for the maximum possible protected relations is reserved during database server startup. + +If the server is started when `edb_sql_protect.max_protected_relations` is set to a value outside of the valid range (for example, a value of 2,147,483,648), then a warning message is logged in the database server log file: + +```sql +2014-07-18 16:04:12 EDT WARNING: invalid value for parameter +"edb_sql_protect.max_protected_relations": "2147483648" +2014-07-18 16:04:12 EDT HINT: Value exceeds integer range. +``` + +The database server starts successfully but with `edb_sql_protect.max_protected_relations` set to the default value of 1024. + +Although the upper range for the parameter is listed as the maximum value for an integer data type, the practical setting depends on how much shared memory is available and the parameter value used during database server startup. + +As long as the space required can be reserved in shared memory, the value is acceptable. If the value is such that the space in shared memory can't be reserved, the database server startup fails with an error message like the following: + +```sql +2014-07-18 15:22:17 EDT FATAL: could not map anonymous shared memory: +Cannot allocate memory +2014-07-18 15:22:17 EDT HINT: This error usually means that PostgreSQL's +request for a shared memory segment exceeded available memory, swap +space or huge pages. To reduce the request size (currently 2070118400 +bytes), reduce PostgreSQL's shared memory usage, perhaps by reducing +shared_buffers or max_connections. +``` + +In this case, reduce the parameter value until you can start the database server successfully. + +## edb_sql_protect.max_protected_roles + +**Parameter type:** Integer + +**Default value:** 64 + +**Range:** 1 to 2147483647 + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Sets the maximum number of roles that can be protected. + +Every protected role consumes space in shared memory. The server reserves space for the number of protected roles times the number of protected relations (`edb_sql_protect.max_protected_relations`). The space for the maximum possible protected roles is reserved during database server startup. + +If the database server is started when `edb_sql_protect.max_protected_roles` is set to a value outside of the valid range (for example, a value of 2,147,483,648), then a warning message is logged in the database server log file: + +```sql +2014-07-18 16:04:12 EDT WARNING: invalid value for parameter +"edb_sql_protect.max_protected_roles": "2147483648" +2014-07-18 16:04:12 EDT HINT: Value exceeds integer range. +``` + +The database server starts successfully but with `edb_sql_protect.max_protected_roles` set to the default value of 64. + +Although the upper range for the parameter is listed as the maximum value for an integer data type, the practical setting depends on how much shared memory is available and the parameter value used during database server startup. + +As long as the space required can be reserved in shared memory, the value is acceptable. If the value is such that the space in shared memory can't be reserved, the database server startup fails with an error message such as the following: + +```sql +2014-07-18 15:22:17 EDT FATAL: could not map anonymous shared memory: +Cannot allocate memory +2014-07-18 15:22:17 EDT HINT: This error usually means that PostgreSQL's +request for a shared memory segment exceeded available memory, swap +space or huge pages. To reduce the request size (currently 2070118400 +bytes), reduce PostgreSQL's shared memory usage, perhaps by reducing +shared_buffers or max_connections. +``` + +In this cases, reduce the parameter value until you can start the database server successfully. + +## edb_sql_protect.max_queries_to_save + +**Parameter type:** Integer + +**Default value:** 5000 + +**Range:** 100 to 2147483647 + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Sets the maximum number of offending queries to save in the view `edb_sql_protect_queries`. + +Every saved query consumes space in shared memory. The space for the maximum possible queries that can be saved is reserved during database server startup. + +If the database server is started when `edb_sql_protect.max_queries_to_save` is set to a value outside of the valid range (for example, a value of 10), then a warning message is logged in the database server log file: + +```sql +2014-07-18 13:05:31 EDT WARNING: 10 is outside the valid range for +parameter "edb_sql_protect.max_queries_to_save" (100 .. 2147483647) +``` + +The database server starts successfully but with `edb_sql_protect.max_queries_to_save` set to the default value of 5000. + +Although the upper range for the parameter is listed as the maximum value for an integer data type, the practical setting depends on how much shared memory is available and the parameter value used during database server startup. + +As long as the space required can be reserved in shared memory, the value is acceptable. If the value is such that the space in shared memory can't be reserved, the database server startup fails with an error message like the following: + +```sql +2014-07-18 15:22:17 EDT FATAL: could not map anonymous shared memory: +Cannot allocate memory +2014-07-18 15:22:17 EDT HINT: This error usually means that PostgreSQL's +request for a shared memory segment exceeded available memory, swap +space or huge pages. To reduce the request size (currently 2070118400 +bytes), reduce PostgreSQL's shared memory usage, perhaps by reducing +shared_buffers or max_connections. +``` + +In this case, reduce the parameter value until you can start the database server successfully. + +## edb_wait_states.directory + +**Parameter type:** String + +**Default value:** `edb_wait_states` + +**Range:** n/a + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Sets the directory path where the EDB wait states log files are stored. Use a full, absolute path, not a relative path. However, the default setting is `edb_wait_states`, which makes `$PGDATA/edb_wait_states` the default directory location. See [EDB Wait States](/pg_extensions/wait_states/) for more information. + +## edb_wait_states.retention_period + +**Parameter type:** Integer + +**Default value:** 604800s + +**Range:** 86400s to 2147483647s + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Sets the time to wait before deleting the log files for EDB wait states. The default is 604,800 seconds, which is 7 days. See [EDB Wait States](/pg_extensions/wait_states/) for more information. + +## edb_wait_states.sampling_interval + +**Parameter type:** Integer + +**Default value:** 1s + +**Range:** 1s to 2147483647s + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Sets the timing interval between two sampling cycles for EDB wait states. The default setting is 1 second. See [EDB Wait States](/pg_extensions/wait_states/) for more information. + +## edbldr.empty_csv_field + +**Parameter type:** Enum + +**Default value:** `NULL` + +**Range:** `{NULL | empty_string | pgsql}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Use the `edbldr.empty_csv_field` parameter to specify how EDB\*Loader treats an empty string. The table shows the valid values for the `edbldr.empty_csv_field` parameter. + +| Parameter setting | EDB\*Loader behavior | +| ------------------- | ----------------------------------------------------------------------------------------------------------------- | +| `NULL` | An empty field is treated as `NULL`. | +| `empty_string` | An empty field is treated as a string of length zero. | +| `pgsql` | An empty field is treated as a `NULL` if it doesn't contain quotes and as an empty string if it contains quotes. | + +For more information about the `edbldr.empty_csv_field` parameter in EDB\*Loader, see [Tools, utilities, and components](../../02_edb_loader). + +## utl_encode.uudecode_redwood + +**Parameter type:** Boolean + +**Default value:** `false` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +When set to `TRUE`, EDB Postgres Advanced Server’s `UTL_ENCODE.UUDECODE` function can decode uuencoded data that was created by the Oracle implementation of the `UTL_ENCODE.UUENCODE` function. + +When set to `FALSE`, EDB Postgres Advanced Server’s `UTL_ENCODE.UUDECODE` function can decode uuencoded data created by EDB Postgres Advanced Server’s `UTL_ENCODE.UUENCODE` function. + +## utl_file.umask + +**Parameter type:** String + +**Default value:** 0077 + +**Range:** Octal digits for umask settings + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +The `utl_file.umask` parameter sets the *file mode creation mask* in a manner similar to the Linux `umask` command. This is for use only within the EDB Postgres Advanced Server `UTL_FILE` package. + +!!! Note + The `utl_file.umask` parameter isn't supported on Windows systems. + +The value specified for `utl_file.umask` is a three- or four-character octal string that's valid for the Linux `umask` command. The setting determines the permissions on files created by the `UTL_FILE` functions and procedures. + +The following shows the results of the default `utl_file.umask` setting of 0077. All permissions are denied on users belonging to the `enterprisedb` group as well as all other users. Only the user `enterprisedb` has read and write permissions on the file. + +```text +-rw------- 1 enterprisedb enterprisedb 21 Jul 24 16:08 utlfile +``` diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/13_ungrouped.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/13_ungrouped.mdx new file mode 100644 index 00000000000..07771383ae8 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/13_ungrouped.mdx @@ -0,0 +1,101 @@ +--- +title: "Ungrouped configuration parameters" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.19.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.019.html" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/13_ungrouped/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters apply only to EDB Postgres Advanced Server and are for a specific, limited purpose. + +## nls_length_semantics + +**Parameter type:** Enum + +**Default value:** `byte` + +**Range:** `{byte | char}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Superuser + +This parameter has no effect in EDB Postgres Advanced Server. For example, this form of the `ALTER SESSION` command is accepted in EDB Postgres Advanced Server without throwing a syntax error. However, it doesn't alter the session environment. + +```sql +ALTER SESSION SET nls_length_semantics = char; +``` + +!!! Note + Since setting this parameter has no effect on the server environment, it doesn't appear in the system view `pg_settings`. + +## query_rewrite_enabled + +**Parameter type:** Enum + +**Default value:** `false` + +**Range:** `{true | false | force}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +This parameter has no effect in EDB Postgres Advanced Server. For example, this form of the `ALTER SESSION` command is accepted in EDB Postgres Advanced Server without throwing a syntax error. However, it doesn't alter the session environment. + +```sql +ALTER SESSION SET query_rewrite_enabled = force; +``` + +!!! Note + Since setting this parameter has no effect on the server environment, it doesn't appear in the system view `pg_settings`. + +## query_rewrite_integrity + +**Parameter type:** Enum + +**Default value:** `enforced` + +**Range:** `{enforced | trusted | stale_tolerated}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Superuser + +This parameter has no effect in EDB Postgres Advanced Server. For example, this form of the `ALTER SESSION` command is accepted in EDB Postgres Advanced Server without throwing a syntax error. However, it doesn't alter the session environment. + +```sql +ALTER SESSION SET query_rewrite_integrity = stale_tolerated; +``` + +!!! Note + Since setting this parameter has no effect on the server environment, it doesn't appear in the system view `pg_settings`. + +## timed_statistics + +**Parameter type:** Boolean + +**Default value:** `true` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Per session + +**When value changes take effect:** Immediate + +**Required authorization to activate:** Session user + +Controls collecting timing data for the Dynamic Runtime Instrumentation Tools Architecture (DRITA) feature. When set to `on`, timing data is collected. + +!!! Note + When EDB Postgres Advanced Server is installed, the `postgresql.conf` file contains an explicit entry that setts `timed_statistics` to `off`. If this entry is commented out and the configuration file is reloaded, timed statistics collection uses the default value, which is `on`. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/14_audit_archiver.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/14_audit_archiver.mdx new file mode 100644 index 00000000000..bc83700ebe8 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/14_audit_archiver.mdx @@ -0,0 +1,194 @@ +--- +title: "Audit archiving parameters" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/14_audit_archiver/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +These configuration parameters are used by the EDB Postgres Advanced Server database [audit archiving feature](../../../epas_security_guide/05_edb_audit_logging/08_audit_log_archiving/). + +## edb_audit_archiver + +**Parameter type:** Enum + +**Default value:** `false` + +**Range:** `{true | false}` + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Restart + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Enables or disables database audit archiving. + +## edb_audit_archiver_timeout + +**Parameter type:** Integer + +**Default value:** 300s + +**Range:** 30s to 1d + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Enforces a timeout in seconds when a database attempts to archive a log file. + +## edb_audit_archiver_filename_prefix + +**Parameter type:** String + +**Default value:** `audit-` + +**Range:** n/a + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the file name of an audit log file that needs to be archived. The file name must align with the `edb_audit_filename` parameter. The audit files with `edb_audit_archiver_filename_prefix` in the `edb_audit_directory` are eligible for compression or expiration. + +## edb_audit_archiver_compress_time_limit + +**Parameter type:** Integer + +**Default value:** -1 + +**Allowed value:** 0, -1, or any positive number value in seconds + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the time in seconds after which audit logs are eligible for compression. The possible values to set this parameter are: + +- `0`. Compression starts as soon as the log file isn't a current file. +- `-1`. Compression of the log file on a timely basis doesn't occur. + +## edb_audit_archiver_compress_size_limit + +**Parameter type:** Integer + +**Default value:** -1 + +**Allowed value:** 0, -1, or any positive number value in megabytes + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies a file size threshold in megabytes, after which audit logs are eligible for compression. If the parameter is set to -1, no compression of the log file occurs based on size. + +## edb_audit_archiver_compress_command + +**Parameter type:** String + +**Default value:** `gzip %p` + +**Range:** n/a + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the command to execute compressing of the audit log files. The default value for `edb_audit_archiver_compress_command` is `gzip %p`. The `gzip` provides a standard method of compressing files. The `%p` in the string is replaced by the path name of the file to archive. + +## edb_audit_archiver_compress_suffix + +**Parameter type:** String + +**Default value:** `.gz` + +**Range:** n/a + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the file name of an already compressed log file. The file name must align with `edb_audit_archiver_compress_command`. The default file name is `.gz`. + +## edb_audit_archiver_expire_time_limit + +**Parameter type:** Integer + +**Default value:** -1 + +**Allowed value:** 0, -1, or any positive number value in seconds + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the time in seconds after which audit logs are eligible to expire. The possible values to set this parameter are: + +- `0`. Expiration starts as soon as the log file isn't a current file. +- `-1`. Expiration of the log file on a timely basis doesn't occur. + +## edb_audit_archiver_expire_size_limit + +**Parameter type:** Integer + +**Default value:** -1 + +**Allowed value:** 0, -1, or any positive number value in megabytes + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies a file size threshold in megabytes, after which audit logs are eligible to expire. If the parameter is set to -1, no expiration of a log file based on size occurs. + +## edb_audit_archiver_expire_command + +**Parameter type:** String + +**Default value:** '' + +**Range:** n/a + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Specifies the command to execute on an expired audit log file before removal. + +## edb_audit_archiver_sort_file + +**Parameter type:** String + +**Default value:** `mtime` + +**Range:** n/a + +**Minimum scope of effect:** Cluster + +**When value changes take effect:** Reload + +**Required authorization to activate:** EDB Postgres Advanced Server service account + +Identifies the oldest log file to sort alphabetically or based on `mtime`. + +- `mtime` sorts files based on file modification time. +- `alphabetic` sorts files alphabetically based on the file name. diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/attribute_descriptions.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/attribute_descriptions.mdx new file mode 100644 index 00000000000..65b3f98a07a --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/attribute_descriptions.mdx @@ -0,0 +1,15 @@ +--- +title: "Description of parameter attributes" +--- + +The description of each group of parameters includes this list of attributes: + +- **Parameter type.** Type of values the parameter can accept. See [Setting configuration parameters](../01_setting_new_parameters/#setting_configuration_parameters) for a discussion of parameter type values. +- **Default value.** Default setting if a value isn't explicitly set in the configuration file. +- **Range.** Allowed range of values. +- **Minimum scope of effect.** Smallest scope for which a distinct setting can be made. Generally, the minimal scope of a distinct setting is either: + - The entire cluster, meaning the setting is the same for all databases in the cluster and its sessions + - `per session`, which means the setting might vary by role, database, or individual session. + This attribute has the same meaning as the `Scope of effect` column in the table of [Summary of configuration parameters](/epas/latest/reference/database_administrator_reference/02_summary_of_configuration_parameters/). +- **When value changes take effect.** Least invasive action required to activate a change to a parameter’s value. All parameter setting changes made in the configuration file can be put into effect by restarting the database server. However, certain parameters require a database server `restart`. Some parameter setting changes can be put into effect with a `reload` of the configuration file without stopping the database server. Finally, other parameter setting changes can be put into effect with some client-side action whose result is `immediate`. This attribute has the same meaning as the `When takes effect` column in the table of [Summary of configuration parameters](/epas/latest/reference/database_administrator_reference/02_summary_of_configuration_parameters/). +- **Required authorization to activate.** The type of user authorization to activate a change to a parameter’s setting. If a database server restart or a configuration file reload is required, then the user must be an EDB Postgres Advanced Server service account (`enterprisedb` or `postgres`, depending on the EDB Postgres Advanced Server compatibility installation mode). This attribute has the same meaning as the `Authorized user` column in the table of [Summary of configuration parameters](/epas/latest/reference/database_administrator_reference/02_summary_of_configuration_parameters/). diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/index.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/index.mdx new file mode 100644 index 00000000000..7a299650e61 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/index.mdx @@ -0,0 +1,21 @@ +--- +title: "Configuration parameters by functionality" +indexCards: simple +description: "Provides a detailed description of the configuration parameters, grouped by function" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.11.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.011.html" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/03_configuration_parameters_by_functionality/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Configuration parameters can be grouped by function. See [Description of parameter attributes](attribute_descriptions) for the list of attributes included in each configuration parameter description. + +
+ +top_performance_related_parameters resource_usage_memory resource_usage_edb_resource_manager query_tuning query_tuning_planner_method_configuration reporting_and_logging_what_to_log auditing_settings ccd_locale_and_formatting ccd_statement_behaviour ccd_other_defaults compatibility_options customized_options ungrouped audit_archiver + +
diff --git a/product_docs/docs/epas/17/database_administration/01_configuration_parameters/index.mdx b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/index.mdx new file mode 100644 index 00000000000..0dd62389ff1 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/01_configuration_parameters/index.mdx @@ -0,0 +1,25 @@ +--- +title: "Setting configuration parameters" +indexCards: simple +description: "How to set configuration parameters that control the database server’s behavior and environment" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.08.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.008.html" +redirects: + - /epas/latest/epas_guide/03_database_administration/01_configuration_parameters/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +The EDB Postgres Advanced Server configuration parameters control various aspects of the database server’s behavior and environment such as data file and log file locations, connection, authentication and security settings, resource allocation and consumption, archiving and replication settings, error logging and statistics gathering, optimization and performance tuning, and locale and formatting settings + +Configuration parameters that apply only to EDB Postgres Advanced Server are noted in [Summary of configuration parameters](/epas/latest/reference/database_administrator_reference/02_summary_of_configuration_parameters/), which lists all EDB Postgres Advanced Server configuration parameters along with a number of key attributes of the parameters. + +You can find more information about configuration parameters in the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/runtime-config.html). + +
+ +setting_new_parameters summary_of_configuration_parameters configuration_parameters_by_functionality + +
diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/building_the_control_file/control_file_examples.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/building_the_control_file/control_file_examples.mdx new file mode 100644 index 00000000000..1fc84dd2d81 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/building_the_control_file/control_file_examples.mdx @@ -0,0 +1,486 @@ +--- +title: "EDB*Loader control file examples" +navTitle: "Control file examples" +description: "Provides examples of control files and their corresponding data files" +redirects: + - /epas/latest/database_administration/02_edb_loader/control_file_examples/ #generated for docs/epas/reorg-role-use-case-mode +--- + +The following are some examples of control files and their corresponding data files. + +## Delimiter-separated field data file + +This control file uses a delimiter-separated data file that appends rows to the `emp` table. The `APPEND` clause is used to allow inserting additional rows into the `emp` table. + + +```sql +LOAD DATA + INFILE 'emp.dat' + BADFILE 'emp.bad' + APPEND + INTO TABLE emp + FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' + TRAILING NULLCOLS + ( + empno, + ename, + job, + mgr, + hiredate, + sal, + deptno, + comm + ) +``` + +The following is the corresponding delimiter-separated data file: + +```sql +9101,ROGERS,CLERK,7902,17-DEC-10,1980.00,20 +9102,PETERSON,SALESMAN,7698,20-DEC-10,2600.00,30,2300.00 +9103,WARREN,SALESMAN,7698,22-DEC-10,5250.00,30,2500.00 +9104,"JONES, JR.",MANAGER,7839,02-APR-09,7975.00,20 +``` + +The use of the `TRAILING NULLCOLS` clause allows you to omit the last field supplying the `comm` column from the first and last records. The `comm` column is set to null for the rows inserted from these records. + +Double quotation marks surround the value `JONES, JR.` in the last record since the comma delimiter character is part of the field value. + +This query displays the rows added to the table after the EDB\*Loader session: + +```sql +SELECT * FROM emp WHERE empno > 9100; +__OUTPUT__ + empno| ename | job | mgr| hiredate | sal | comm |deptno +------+-----------+---------+----+-------------------+-------+--------+------ + 9101| ROGERS | CLERK |7902| 17-DEC-10 00:00:00|1980.00| | 20 + 9102| PETERSON | SALESMAN|7698| 20-DEC-10 00:00:00|2600.00| 2300.00| 30 + 9103| WARREN | SALESMAN|7698| 22-DEC-10 00:00:00|5250.00| 2500.00| 30 + 9104| JONES, JR.| MANAGER |7839| 02-APR-09 00:00:00|7975.00| | 20 +(4 rows) +``` + +## Fixed-width field data file + +This control file loads the same rows into the `emp` table. It uses a data file containing fixed-width fields. The `FIELDS TERMINATED BY` and `OPTIONALLY ENCLOSED BY` clauses are absent. Instead, each field includes the `POSITION` clause. + +```sql +LOAD DATA + INFILE 'emp_fixed.dat' + BADFILE 'emp_fixed.bad' + APPEND + INTO TABLE emp + TRAILING NULLCOLS + ( + empno POSITION (1:4), + ename POSITION (5:14), + job POSITION (15:23), + mgr POSITION (24:27), + hiredate POSITION (28:38), + sal POSITION (39:46), + deptno POSITION (47:48), + comm POSITION (49:56) + ) +``` + +The following is the corresponding data file containing fixed-width fields: + +```sql +9101ROGERS CLERK 790217-DEC-10 1980.0020 +9102PETERSON SALESMAN 769820-DEC-10 2600.0030 2300.00 +9103WARREN SALESMAN 769822-DEC-10 5250.0030 2500.00 +9104JONES, JR.MANAGER 783902-APR-09 7975.0020 +``` + +## Single physical record data file – RECORDS DELIMITED BY clause + +This control file loads the same rows into the `emp` table but uses a data file with one physical record. Terminate each record loaded as a row in the table using a semicolon (`;`). The `RECORDS DELIMITED BY` clause specifies this value. + +```sql +LOAD DATA + INFILE 'emp_recdelim.dat' + BADFILE 'emp_recdelim.bad' + APPEND + INTO TABLE emp + FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' + RECORDS DELIMITED BY ';' + TRAILING NULLCOLS + ( + empno, + ename, + job, + mgr, + hiredate, + sal, + deptno, + comm + ) +``` + +The following is the corresponding data file. The content is a single physical record in the data file. The record delimiter character is included following the last record, that is, at the end of the file. + +```sql +9101,ROGERS,CLERK,7902,17-DEC-10,1980.00,20,;9102,PETERSON,SALESMAN,7698,20-DEC-10, +2600.00,30,2300.00;9103,WARREN,SALESMAN,7698,22-DEC-10,5250.00,30,2500.00;9104,"JONES, +JR.",MANAGER,7839,02-APR-09,7975.00,20,; +``` + +## FILLER clause + +This control file uses the `FILLER` clause in the data fields for the `sal` and `comm` columns. EDB\*Loader ignores the values in these fields and sets the corresponding columns to null. + +```sql +LOAD DATA + INFILE 'emp_fixed.dat' + BADFILE 'emp_fixed.bad' + APPEND + INTO TABLE emp + TRAILING NULLCOLS + ( + empno POSITION (1:4), + ename POSITION (5:14), + job POSITION (15:23), + mgr POSITION (24:27), + hiredate POSITION (28:38), + sal FILLER POSITION (39:46), + deptno POSITION (47:48), + comm FILLER POSITION (49:56) + ) +``` + +Using the same fixed-width data file as in the prior fixed-width field example, the resulting rows in the table appear as follows: + +```sql +SELECT * FROM emp WHERE empno > 9100; +__OUTPUT__ + empno| ename | job | mgr| hiredate | sal | comm |deptno +------+-----------+---------+----+-------------------+-------+--------+------ + 9101| ROGERS | CLERK |7902| 17-DEC-10 00:00:00| | | 20 + 9102| PETERSON | SALESMAN|7698| 20-DEC-10 00:00:00| | | 30 + 9103| WARREN | SALESMAN|7698| 22-DEC-10 00:00:00| | | 30 + 9104| JONES, JR.| MANAGER |7839| 02-APR-09 00:00:00| | | 20 +(4 rows) +``` + +## BOUNDFILLER clause + +This control file uses the `BOUNDFILLER` clause in the data fields for the `job` and `mgr` columns. EDB\*Loader ignores the values in these fields and sets the corresponding columns to null in the same manner as the `FILLER` clause. However, unlike columns with the `FILLER` clause, you can use columns with the `BOUNDFILLER` clause in an expression, as shown for column `jobdesc`. + +```sql +LOAD DATA + INFILE 'emp.dat' + BADFILE 'emp.bad' + APPEND + INTO TABLE empjob + FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' + TRAILING NULLCOLS + ( + empno, + ename, + job BOUNDFILLER, + mgr BOUNDFILLER, + hiredate FILLER, + sal FILLER, + deptno FILLER, + comm FILLER, + jobdesc ":job || ' for manager ' || :mgr" + ) +``` + +The following is the delimiter-separated data file used in this example: + +```sql +9101,ROGERS,CLERK,7902,17-DEC-10,1980.00,20 +9102,PETERSON,SALESMAN,7698,20-DEC-10,2600.00,30,2300.00 +9103,WARREN,SALESMAN,7698,22-DEC-10,5250.00,30,2500.00 +9104,"JONES, JR.",MANAGER,7839,02-APR-09,7975.00,20 +``` + +The following table is loaded using the preceding control file and data file: + +```sql +CREATE TABLE empjob ( + empno NUMBER(4) NOT NULL CONSTRAINT empjob_pk PRIMARY KEY, + ename VARCHAR2(10), + job VARCHAR2(9), + mgr NUMBER(4), + jobdesc VARCHAR2(25) +); +``` + +The resulting rows in the table appear as follows: + +```sql +SELECT * FROM empjob; +__OUTPUT__ + empno | ename | job | mgr | jobdesc +-------+------------+-----+-----+--------------------------- + 9101 | ROGERS | | | CLERK for manager 7902 + 9102 | PETERSON | | | SALESMAN for manager 7698 + 9103 | WARREN | | | SALESMAN for manager 7698 + 9104 | JONES, JR. | | | MANAGER for manager 7839 +(4 rows) +``` + +## Field types with length specification + +This control file contains the field-type clauses with the length specification: + +```sql +LOAD DATA + INFILE 'emp_fixed.dat' + BADFILE 'emp_fixed.bad' + APPEND + INTO TABLE emp + TRAILING NULLCOLS + ( + empno CHAR(4), + ename CHAR(10), + job POSITION (15:23) CHAR(9), + mgr INTEGER EXTERNAL(4), + hiredate DATE(11) "DD-MON-YY", + sal DECIMAL EXTERNAL(8), + deptno POSITION (47:48), + comm POSITION (49:56) DECIMAL EXTERNAL(8) + ) +``` + +!!! Note + You can use the `POSITION` clause and the `fieldtype(length)` clause individually or in combination as long as each field definition contains at least one of the two clauses. + +The following is the corresponding data file containing fixed-width fields: + +```sql +9101ROGERS CLERK 790217-DEC-10 1980.0020 +9102PETERSON SALESMAN 769820-DEC-10 2600.0030 2300.00 +9103WARREN SALESMAN 769822-DEC-10 5250.0030 2500.00 +9104JONES, JR. MANAGER 783902-APR-09 7975.0020 +``` + +The resulting rows in the table appear as follows: + +```sql +SELECT * FROM emp WHERE empno > 9100; +__OUTPUT__ + empno| ename | job | mgr| hiredate | sal | comm |deptno +------+-----------+---------+----+-------------------+-------+--------+------ + 9101| ROGERS | CLERK |7902| 17-DEC-10 00:00:00|1980.00| | 20 + 9102| PETERSON | SALESMAN|7698| 20-DEC-10 00:00:00|2600.00| 2300.00| 30 + 9103| WARREN | SALESMAN|7698| 22-DEC-10 00:00:00|5250.00| 2500.00| 30 + 9104| JONES, JR.| MANAGER |7839| 02-APR-09 00:00:00|7975.00| | 20 +(4 rows) +``` + +## NULLIF clause + +This example uses the `NULLIF` clause on the `sal` column to set it to null for employees of job `MANAGER`. It also uses the clause on the `comm` column to set it to null if the employee isn't a `SALESMAN` and isn't in department `30`. In other words, a `comm` value is accepted if the employee is a `SALESMAN` or is a member of department `30`. + +The following is the control file: + +```sql +LOAD DATA + INFILE 'emp_fixed_2.dat' + BADFILE 'emp_fixed_2.bad' + APPEND + INTO TABLE emp + TRAILING NULLCOLS + ( + empno POSITION (1:4), + ename POSITION (5:14), + job POSITION (15:23), + mgr POSITION (24:27), + hiredate POSITION (28:38), + sal POSITION (39:46) NULLIF job = 'MANAGER', + deptno POSITION (47:48), + comm POSITION (49:56) NULLIF job <> 'SALESMAN' AND deptno <> '30' + ) +``` + +The following is the corresponding data file: + +```sql +9101ROGERS CLERK 790217-DEC-10 1980.0020 +9102PETERSON SALESMAN 769820-DEC-10 2600.0030 2300.00 +9103WARREN SALESMAN 769822-DEC-10 5250.0030 2500.00 +9104JONES, JR. MANAGER 783902-APR-09 7975.0020 +9105ARNOLDS CLERK 778213-SEP-10 3750.0030 800.00 +9106JACKSON ANALYST 756603-JAN-11 4500.0040 2000.00 +9107MAXWELL SALESMAN 769820-DEC-10 2600.0010 1600.00 +``` + +The resulting rows in the table appear as follows: + +```sql +SELECT empno, ename, job, NVL(TO_CHAR(sal),'--null--') "sal", + NVL(TO_CHAR(comm),'--null--') "comm", deptno FROM emp WHERE empno > 9100; +__OUTPUT__ + empno | ename | job | sal | comm | deptno +-------+------------+----------+----------+----------+------- + 9101 | ROGERS | CLERK | 1980.00 | --null-- | 20 + 9102 | PETERSON | SALESMAN | 2600.00 | 2300.00 | 30 + 9103 | WARREN | SALESMAN | 5250.00 | 2500.00 | 30 + 9104 | JONES, JR. | MANAGER | --null-- | --null-- | 20 + 9105 | ARNOLDS | CLERK | 3750.00 | 800.00 | 30 + 9106 | JACKSON | ANALYST | 4500.00 | --null-- | 40 + 9107 | MAXWELL | SALESMAN | 2600.00 | 1600.00 | 10 +(7 rows) +``` + +!!! Note + The `sal` column for employee `JONES, JR.` is null since the job is `MANAGER`. + +The `comm` values from the data file for employees `PETERSON`, `WARREN`, `ARNOLDS`, and `MAXWELL` are all loaded into the `comm` column of the `emp` table since these employees are either `SALESMAN` or members of department `30`. + +The `comm` value of `2000.00` in the data file for employee `JACKSON` is ignored, and the `comm` column of the `emp` table is set to null. This employee isn't a `SALESMAN` or a member of department `30`. + +## SELECT statement in a field expression + +This example uses a `SELECT` statement in the expression of the field definition to return the value to load into the column: + +```sql +LOAD DATA + INFILE 'emp_fixed.dat' + BADFILE 'emp_fixed.bad' + APPEND + INTO TABLE emp + TRAILING NULLCOLS + ( + empno POSITION (1:4), + ename POSITION (5:14), + job POSITION (15:23) "(SELECT dname FROM dept WHERE deptno = :deptno)", + mgr POSITION (24:27), + hiredate POSITION (28:38), + sal POSITION (39:46), + deptno POSITION (47:48), + comm POSITION (49:56) + ) +``` + +The following is the content of the `dept` table used in the `SELECT` statement: + +```sql +SELECT * FROM dept; +__OUTPUT__ + deptno | dname | loc +---------+------------+--------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON +(4 rows) +``` + +The following is the corresponding data file: + +```sql +9101ROGERS CLERK 790217-DEC-10 1980.0020 +9102PETERSON SALESMAN 769820-DEC-10 2600.0030 2300.00 +9103WARREN SALESMAN 769822-DEC-10 5250.0030 2500.00 +9104JONES, JR. MANAGER 783902-APR-09 7975.0020 +``` + +The resulting rows in the table appear as follows: + +```sql +SELECT * FROM emp WHERE empno > 9100; +__OUTPUT__ + empno| ename | job | mgr| hiredate | sal | comm |deptno +------+-----------+---------+----+-------------------+-------+--------+------ + 9101| ROGERS | RESEARCH|7902| 17-DEC-10 00:00:00|1980.00| | 20 + 9102| PETERSON | SALES |7698| 20-DEC-10 00:00:00|2600.00| 2300.00| 30 + 9103| WARREN | SALES |7698| 22-DEC-10 00:00:00|5250.00| 2500.00| 30 + 9104| JONES, JR.| RESEARCH|7839| 02-APR-09 00:00:00|7975.00| | 20 +(4 rows) +``` + +!!! Note + The `job` column contains the value from the `dname` column of the `dept` table returned by the `SELECT` statement instead of the job name from the data file. + +## Multiple INTO TABLE clauses + +This example uses multiple `INTO TABLE` clauses. For this example, two empty tables are created with the same data definition as the `emp` table. The following `CREATE TABLE` commands create these two empty tables without inserting rows from the original `emp` table: + +```sql +CREATE TABLE emp_research AS SELECT * FROM emp WHERE deptno = 99; +CREATE TABLE emp_sales AS SELECT * FROM emp WHERE deptno = 99; +``` + +This control file contains two `INTO TABLE` clauses. Without an `APPEND` clause, it uses the default operation of `INSERT`. For this operation, the tables `emp_research` and `emp_sales` must be empty. + +```sql +LOAD DATA + INFILE 'emp_multitbl.dat' + BADFILE 'emp_multitbl.bad' + DISCARDFILE 'emp_multitbl.dsc' + INTO TABLE emp_research + WHEN (47:48) = '20' + TRAILING NULLCOLS + ( + empno POSITION (1:4), + ename POSITION (5:14), + job POSITION (15:23), + mgr POSITION (24:27), + hiredate POSITION (28:38), + sal POSITION (39:46), + deptno CONSTANT '20', + comm POSITION (49:56) + ) + INTO TABLE emp_sales + WHEN (47:48) = '30' + TRAILING NULLCOLS + ( + empno POSITION (1:4), + ename POSITION (5:14), + job POSITION (15:23), + mgr POSITION (24:27), + hiredate POSITION (28:38), + sal POSITION (39:46), + deptno CONSTANT '30', + comm POSITION (49:56) "ROUND(:comm + (:sal * .25), 0)" + ) +``` + +The `WHEN` clauses specify that when the field designated by columns 47 through 48 contains `20`, the record is inserted into the `emp_research` table. When that same field contains `30`, the record is inserted into the `emp_sales` table. If neither condition is true, the record is written to the discard file `emp_multitbl.dsc`. + +The `CONSTANT` clause is given for column `deptno`, so the specified constant value is inserted into `deptno` for each record. When the `CONSTANT` clause is used, it must be the only clause in the field definition other than the column name to which the constant value is assigned. + +Column `comm` of the `emp_sales` table is assigned a SQL expression. Expressions can reference column names by prefixing the column name with a colon character (`:`). + +The following is the corresponding data file: + +```sql +9101ROGERS CLERK 790217-DEC-10 1980.0020 +9102PETERSON SALESMAN 769820-DEC-10 2600.0030 2300.00 +9103WARREN SALESMAN 769822-DEC-10 5250.0030 2500.00 +9104JONES, JR. MANAGER 783902-APR-09 7975.0020 +9105ARNOLDS CLERK 778213-SEP-10 3750.0010 +9106JACKSON ANALYST 756603-JAN-11 4500.0040 +``` + +The records for employees `ARNOLDS` and `JACKSON` contain `10` and `40` in columns 47 through 48, which don't satisfy any of the `WHEN` clauses. EDB\*Loader writes these two records to the discard file, `emp_multitbl.dsc`, with the following content: + +```sql +9105ARNOLDS CLERK 778213-SEP-10 3750.0010 +9106JACKSON ANALYST 756603-JAN-11 4500.0040 +``` + +The following are the rows loaded into the `emp_research` and `emp_sales` tables: + +```sql +SELECT * FROM emp_research; +__OUTPUT__ +empno | ename | job | mgr | hiredate | sal | comm | deptno +-------+------------+---------+------+--------------------+---------+------+------- + 9101 | ROGERS | CLERK | 7902 | 17-DEC-10 00:00:00 | 1980.00 | | 20.00 + 9104 | JONES, JR. | MANAGER | 7839 | 02-APR-09 00:00:00 | 7975.00 | | 20.00 +(2 rows) +``` +```sql +SELECT * FROM emp_sales; +__OUTPUT__ +empno | ename | job | mgr | hiredate | sal | comm | deptno +-------+----------+----------+------+--------------------+---------+---------+-------- + 9102 | PETERSON | SALESMAN | 7698 | 20-DEC-10 00:00:00 | 2600.00 | 2950.00 | 30.00 + 9103 | WARREN | SALESMAN | 7698 | 22-DEC-10 00:00:00 | 5250.00 | 3813.00 | 30.00 +(2 rows) +``` diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/building_the_control_file/index.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/building_the_control_file/index.mdx new file mode 100644 index 00000000000..e9859ab365d --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/building_the_control_file/index.mdx @@ -0,0 +1,114 @@ +--- +title: "Building the EDB*Loader control file" +navTitle: "Building the control file" +description: "Provides information necessary to build a control file" +redirects: + - /epas/latest/database_administration/02_edb_loader/building_the_control_file/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +When you invoke EDB\*Loader, the list of arguments provided must include the name of a control file. The control file includes the instructions that EDB\*Loader uses to load the tables from the input data file. + +## Contents of the control file + +The control file includes information such as: + +- The name of the input data file containing the data to load +- The name of the tables to load from the data file +- Names of the columns in the tables and their corresponding field placement in the data file +- Specification of whether the data file uses a delimiter string to separate the fields or if the fields occupy fixed column positions +- Optional selection criteria to choose the records from the data file to load into a given table +- The name of the file that collects illegally formatted records +- The name of the discard file that collects records that don't meet the selection criteria of any table + +## Control file syntax + +The syntax for the EDB\*Loader control file is: + +```sql +[ OPTIONS ( [, ] ...) ] +LOAD DATA + [ CHARACTERSET ] + [ INFILE '{ | }' ] + [ BADFILE '' ] + [ DISCARDFILE '' ] + [ { DISCARDMAX | DISCARDS } ] +[ INSERT | APPEND | REPLACE | TRUNCATE ] +[ PRESERVE BLANKS ] +{ INTO TABLE + [ WHEN [ AND ] ...] + [ FIELDS TERMINATED BY '' + [ OPTIONALLY ENCLOSED BY '' ] ] + [ RECORDS DELIMITED BY '' ] + [ TRAILING NULLCOLS ] + ( [, ] ...) +} ... +``` + +Where `field_def` defines a field in the specified `data_file`. The field describes the location, data format, or value of the data to insert into `column_name` of `target_table`. The syntax of `field_def` is: + +```sql + { + CONSTANT | + FILLER [ POSITION () ] [ ] | + BOUNDFILLER [ POSITION () ] [ ] | + [ POSITION () ] [ ] + [ NULLIF [ AND ] ...] + [ PRESERVE BLANKS ] [ "" ] +} +``` + +Where `fieldtype` is one of: + +```sql +CHAR [()] | DATE [()] | TIMESTAMP [()] [ "" ] | +INTEGER EXTERNAL [()] | +FLOAT EXTERNAL [()] | DECIMAL EXTERNAL [()] | +ZONED EXTERNAL [()] | ZONED [( [,])] +``` + +## Setting the variables + +The specification of `data_file`, `bad_file`, and `discard_file` can include the full directory path or a relative directory path to the filename. If the filename is specified alone or with a relative directory path, the file is then assumed to exist, in the case of `data_file`, relative to the current working directory from which you invoke `edbldr`. In the case of `bad_file` or `discard_file`, it's created. + +You can include references to environment variables in the EDB\*Loader control file when referring to a directory path or filename. Environment variable references are formatted differently on Windows systems than on Linux systems: + +- On Linux, the format is `$ENV_VARIABLE` or `${ENV_VARIABLE}`. +- On Windows, the format is `%ENV_VARIABLE%`. + +Where `ENV_VARIABLE` is the environment variable that's set to the directory path or filename. + +Set the `EDBLDR_ENV_STYLE` environment variable to interpret environment variable references as Windows-styled references or Linux-styled references regardless of the operating system on which EDB\*Loader resides. You can use this environment variable to create portable control files for EDB\*Loader. + +- On a Windows system, set `EDBLDR_ENV_STYLE` to `linux` or `unix` to recognize Linux-style references in the control file. +- On a Linux system, set `EDBLDR_ENV_STYLE` to `windows` to recognize Windows-style references in the control file. + +The operating system account enterprisedb must have read permission on the directory and file specified by `data_file`. It must have write permission to the directories where `bad_file` and `discard_file` are written. + +!!! Note + The filenames for `data_file`, `bad_file`, and `discard_file` must have extensions `.dat`, `.bad`, and `.dsc`, respectively. If the provided filename doesn't have an extension, EDB\*Loader assumes the actual filename includes the appropriate extension. + +## Example scenarios + +Suppose an EDB\*Loader session results in data format errors, the `BADFILE` clause isn't specified, and the `BAD` parameter isn't given on the command line when `edbldr` is invoked. In this case, a bad file is created with the name `control_file_base.bad` in the directory from which `edbldr` is invoked. `control_file_base` is the base name of the control file used in the `edbldr` session. + +If all of the following conditions are true, the discard file isn't created even if the EDB\*Loader session results in discarded records: + +- The `DISCARDFILE` clause for specifying the discard file isn't included in the control file. +- The `DISCARD` parameter for specifying the discard file isn't included on the command line. +- The `DISCARDMAX` clause for specifying the maximum number of discarded records isn't included in the control file. +- The `DISCARDS` clause for specifying the maximum number of discarded records isn't included in the control file. +- The `DISCARDMAX` parameter for specifying the maximum number of discarded records isn't included on the command line. + +Suppose you don't specify the `DISCARDFILE` clause and the `DISCARD` parameter for explicitly specifying the discard filename, but you do specify `DISCARDMAX` or `DISCARDS`. In this case, the EDB\*Loader session creates a discard file using the data filename with an extension of `.dsc`. + +!!! Note + The keywords `DISCARD` and `DISCARDS` differ. `DISCARD` is an EDB\*Loader command line parameter used to specify the discard filename. `DISCARDS` is a clause of the `LOAD DATA` directive that can appear only in the control file. Keywords `DISCARDS` and `DISCARDMAX` provide the same functionality of specifying the maximum number of discarded records allowed before terminating the EDB\*Loader session. Records loaded into the database before terminating the EDB\*Loader session due to exceeding the `DISCARDS` or `DISCARDMAX` settings are kept in the database. They aren't rolled back. + +Specifying one of `INSERT`, `APPEND`, `REPLACE`, or `TRUNCATE` establishes the default action for adding rows to target tables. The default action is `INSERT`. + +If you specify the `FIELDS TERMINATED BY` clause, then you can't specify the `POSITION (start:end)` clause for any `field_def`. Alternatively, if you don't specify the `FIELDS TERMINATED BY` clause, then every `field_def` must contain the `POSITION (start:end)` clause, the `fieldtype(length)` clause, or the `CONSTANT` clause. + +For complete descriptions of the parameters available for each clause, see [EDB\*Loader control file parameters](../../../reference/database_administrator_reference/edb_loader_control_file_parameters/). + diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/data_loading_methods.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/data_loading_methods.mdx new file mode 100644 index 00000000000..5bde3714902 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/data_loading_methods.mdx @@ -0,0 +1,17 @@ +--- +title: "Overview of data loading methods" +navTitle: "Data loading methods" +description: "Description of the data loading methods supported by EDB*Loader" +--- + +As with Oracle SQL\*Loader, EDB\*Loader supports three data loading methods: + +- **Conventional path load** — Conventional path load is the default method used by EDB\*Loader. Use basic insert processing to add rows to the table. The advantage of a conventional path load is that table constraints and database objects defined on the table are enforced during a conventional path load. Table constraints and database objects include primary keys, not null constraints, check constraints, unique indexes, foreign key constraints, triggers, and so on. One exception is that the EDB Postgres Advanced Server rules defined on the table aren't enforced. EDB\*Loader can load tables on which rules are defined. However, the rules aren't executed. As a consequence, you can't load partitioned tables implemented using rules with EDB\*Loader. See [Conventional path load](invoking_edb_loader/conventional_path_load.mdx). + +- **Direct path load** — A direct path load is faster than a conventional path load but requires removing most types of constraints and triggers from the table. See [Direct path load](invoking_edb_loader/direct_path_load.mdx). + +- **Parallel direct path load** — A parallel direct path load provides even greater performance improvement by permitting multiple EDB\*Loader sessions to run simultaneously to load a single table. See [Parallel direct path load](invoking_edb_loader/parallel_direct_path_load.mdx). + +!!! Note + Create EDB Postgres Advanced Server rules using the `CREATE RULE` command. EDB Postgres Advanced Server rules aren't the same database objects as rules and rule sets used in Oracle. + diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/edb_loader_overview_and_restrictions.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/edb_loader_overview_and_restrictions.mdx new file mode 100644 index 00000000000..ebd6632085d --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/edb_loader_overview_and_restrictions.mdx @@ -0,0 +1,32 @@ +--- +title: "EDB*Loader key concepts and compatability" +navTitle: "Key concepts and compatability" +description: "Provides an overview of the key features of EDB*Loader as well as important compatability information" +--- + +## Key features + +EDB\*Loader features include: + +- Support for the Oracle SQL\*Loader data loading methods (conventional path load, direct path load, and parallel direct path load) +- Syntax for control file directives compatible with Oracle SQL\*Loader +- Input data with delimiter-separated or fixed-width fields +- Bad file for collecting rejected records +- Loading of multiple target tables +- Discard file for collecting records that don't meet the selection criteria of any target table +- Log file for recording the EDB\*Loader session and any error messages +- Data loading from standard input and remote loading, particularly useful for large data sources on remote hosts + +## Version compatibility restrictions + +The important version compatibility restrictions between the EDB\*Loader client and the database server are: + +- When you invoke the EDB\*Loader program (called `edbldr`), you pass in parameters and directive information to the database server. We strongly recommend that you use the version of the EDB\*Loader client, the `edbldr` program supplied with the version of EDB Postgres Advanced Server you are using, to load data only into the database server. In general, use the same version for the EDB\*Loader client and database server. + +- Using EDB\*Loader with connection poolers such as PgPool-II and PgBouncer isn't supported. EDB\*Loader must connect directly to EDB Postgres Advanced Server version 17. Alternatively, you can use these commands for loading data through connection poolers: + +```shell +psql \copy +jdbc copyIn +psycopg2 copy_from +``` diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/index.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/index.mdx new file mode 100644 index 00000000000..8e9503d26d5 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/index.mdx @@ -0,0 +1,53 @@ +--- +title: "Loading bulk data" +navTitle: "EDB*Loader" +indexCards: simple +description: "How to use EDB*Loader, the high-performance bulk data loader" +navigation: + - edb_loader_overview_and_restrictions + - data_loading_methods + - using_edb_loader + - building_the_control_file + - invoking_edb_loader +redirects: + - /epas/latest/epas_compat_tools_guide/02_edb_loader +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/9.6/DB_Compat_for_Oracle_Dev_Tools_Guide.1.16.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/9.6/DB_Compat_for_Oracle_Dev_Tools_Guide.1.15.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/9.6/DB_Compat_for_Oracle_Dev_Tools_Guide.1.11.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/9.6/DB_Compat_for_Oracle_Dev_Tools_Guide.1.12.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/9.6/DB_Compat_for_Oracle_Dev_Tools_Guide.1.08.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/9.6/DB_Compat_for_Oracle_Dev_Tools_Guide.1.14.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/9.6/DB_Compat_for_Oracle_Dev_Tools_Guide.1.13.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/9.6/DB_Compat_for_Oracle_Dev_Tools_Guide.1.10.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/9.6/DB_Compat_for_Oracle_Dev_Tools_Guide.1.09.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.314.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.315.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.311.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.312.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.313.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.309.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.310.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.308.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.307.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.092.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.093.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.091.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.089.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.087.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.090.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.088.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.086.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.085.html" +--- + +EDB\*Loader is a high-performance bulk data loader that provides an interface compatible with Oracle databases for EDB Postgres Advanced Server. The EDB\*Loader command line utility loads data from an input source, typically a file, into one or more tables using a subset of the parameters offered by Oracle SQL\*Loader. + + + + + + + + diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/conventional_path_load.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/conventional_path_load.mdx new file mode 100644 index 00000000000..8e36c81fbd6 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/conventional_path_load.mdx @@ -0,0 +1,108 @@ +--- +title: "Updating a table with a conventional path load" +navTitle: "Conventional path load" +description: "Describes how to use EDB*Loader with a conventional path load to update the rows in a table" +redirects: + - /epas/latest/database_administration/02_edb_loader/conventional_path_load/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can use EDB\*Loader with a conventional path load to update the rows in a table, merging new data with the existing data. When you invoke EDB\*Loader to perform an update, the server searches the table for an existing row with a matching primary key: + +- If the server locates a row with a matching key, it replaces the existing row with the new row. +- If the server doesn't locate a row with a matching key, it adds the new row to the table. + +To use EDB\*Loader to update a table, the table must have a primary key. You can't use EDB\*Loader to update a partitioned table. + +## Performing the update + +To perform `UPDATE`, use the same steps as when performing a conventional path load: + +1. Create a data file that contains the rows you want to update or insert. +2. Define a control file that uses the `INFILE` keyword to specify the name of the data file. For information about building the EDB\*Loader control file, see [Building the EDB\*Loader control file](../building_the_control_file/). +3. Invoke EDB\*Loader, specifying the database name, connection information, and the name of the control file. For information about invoking EDB\*Loader, see [Invoking EDB\*Loader](../). + +This example uses the `emp` table that's distributed with the EDB Postgres Advanced Server sample data. By default, the table contains: + +```sql +edb=# select * from emp; +__OUTPUT__ +empno|ename | job | mgr | hiredate | sal | comm | deptno +-----+------+---------+------+--------------------+--------- +-------+-------- +7369 |SMITH |CLERK | 7902 | 17-DEC-80 00:00:00 | 800.00 | | 20 +7499 |ALLEN |SALESMAN | 7698 | 20-FEB-81 00:00:00 | 1600.00 |300.00 | 30 +7521 |WARD |SALESMAN | 7698 | 22-FEB-81 00:00:00 | 1250.00 |500.00 | 30 +7566 |JONES |MANAGER | 7839 | 02-APR-81 00:00:00 | 2975.00 | | 20 +7654 |MARTIN|SALESMAN | 7698 | 28-SEP-81 00:00:00 | 1250.00 |1400.00| 30 +7698 |BLAKE |MANAGER | 7839 | 01-MAY-81 00:00:00 | 2850.00 | | 30 +7782 |CLARK |MANAGER | 7839 | 09-JUN-81 00:00:00 | 2450.00 | | 10 +7788 |SCOTT |ANALYST | 7566 | 19-APR-87 00:00:00 | 3000.00 | | 20 +7839 |KING |PRESIDENT| | 17-NOV-81 00:00:00 | 5000.00 | | 10 +7844 |TURNER|SALESMAN | 7698 | 08-SEP-81 00:00:00 | 1500.00 | 0.00 | 30 +7876 |ADAMS |CLERK | 7788 | 23-MAY-87 00:00:00 | 1100.00 | | 20 +7900 |JAMES |CLERK | 7698 | 03-DEC-81 00:00:00 | 950.00 | | 30 +7902 |FORD |ANALYST | 7566 | 03-DEC-81 00:00:00 | 3000.00 | | 20 +7934 |MILLER|CLERK | 7782 | 23-JAN-82 00:00:00 | 1300.00 | | 10 +(14 rows) +``` + +This control file (`emp_update.ctl`) specifies the fields in the table in a comma-delimited list. The control file performs an `UPDATE` on the `emp` table. + +```sql +LOAD DATA + INFILE 'emp_update.dat' + BADFILE 'emp_update.bad' + DISCARDFILE 'emp_update.dsc' +UPDATE INTO TABLE emp +FIELDS TERMINATED BY "," +(empno, ename, job, mgr, hiredate, sal, comm, deptno) +``` + +The data that's being updated or inserted is saved in the `emp_update.dat` file. `emp_update.dat` contains: + +```sql +7521,WARD,MANAGER,7839,22-FEB-81 00:00:00,3000.00,0.00,30 +7566,JONES,MANAGER,7839,02-APR-81 00:00:00,3500.00,0.00,20 +7903,BAKER,SALESMAN,7521,10-JUN-13 00:00:00,1800.00,500.00,20 +7904,MILLS,SALESMAN,7839,13-JUN-13 00:00:00,1800.00,500.00,20 +7654,MARTIN,SALESMAN,7698,28-SEP-81 00:00:00,1500.00,400.00,30 +``` + +Invoke EDB\*Loader, specifying the name of the database (`edb`), the name of a database user and their associated password, and the name of the control file (`emp_update.ctl`): + +```shell +edbldr -d edb userid=user_name/password control=emp_update.ctl +``` + +## Results of the update + +After performing the update, the `emp` table contains: + +```sql +edb=# select * from emp; +__OUTPUT__ +empno|ename | job | mgr | hiredate | sal | comm | deptno +-----+------+---------+------+--------------------+---------+--------+-------- +7369 |SMITH |CLERK | 7902 | 17-DEC-80 00:00:00 | 800.00 | | 20 +7499 |ALLEN |SALESMAN | 7698 | 20-FEB-81 00:00:00 | 1600.00 | 300.00 | 30 +7521 |WARD |MANAGER | 7839 | 22-FEB-81 00:00:00 | 3000.00 | 0.00 | 30 +7566 |JONES |MANAGER | 7839 | 02-APR-81 00:00:00 | 3500.00 | 0.00 | 20 +7654 |MARTIN|SALESMAN | 7698 | 28-SEP-81 00:00:00 | 1500.00 | 400.00 | 30 +7698 |BLAKE |MANAGER | 7839 | 01-MAY-81 00:00:00 | 2850.00 | | 30 +7782 |CLARK |MANAGER | 7839 | 09-JUN-81 00:00:00 | 2450.00 | | 10 +7788 |SCOTT |ANALYST | 7566 | 19-APR-87 00:00:00 | 3000.00 | | 20 +7839 |KING |PRESIDENT| | 17-NOV-81 00:00:00 | 5000.00 | | 10 +7844 |TURNER|SALESMAN | 7698 | 08-SEP-81 00:00:00 | 1500.00 | 0.00 | 30 +7876 |ADAMS |CLERK | 7788 | 23-MAY-87 00:00:00 | 1100.00 | | 20 +7900 |JAMES |CLERK | 7698 | 03-DEC-81 00:00:00 | 950.00 | | 30 +7902 |FORD |ANALYST | 7566 | 03-DEC-81 00:00:00 | 3000.00 | | 20 +7903 |BAKER |SALESMAN | 7521 | 10-JUN-13 00:00:00 | 1800.00 | 500.00 | 20 +7904 |MILLS |SALESMAN | 7839 | 13-JUN-13 00:00:00 | 1800.00 | 500.00 | 20 +7934 |MILLER|CLERK | 7782 | 23-JAN-82 00:00:00 | 1300.00 | | 10 +(16 rows) +``` + +The rows containing information for the three employees that are currently in the `emp` table are updated, while rows are added for the new employees (`BAKER` and `MILLS`). + + diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/direct_path_load.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/direct_path_load.mdx new file mode 100644 index 00000000000..062b20938ad --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/direct_path_load.mdx @@ -0,0 +1,45 @@ +--- +title: "Running a direct path load" +navTitle: "Direct path load" +description: "Describes how to use EDB*Loader to write data directly to the database pages" +redirects: + - /epas/latest/database_administration/02_edb_loader/direct_path_load/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +During a direct path load, EDB\*Loader writes the data directly to the database pages, which is then synchronized to disk. The insert processing associated with a conventional path load is bypassed, resulting in performance improvement. Bypassing insert processing reduces the types of constraints on the target table. The types of constraints permitted on the target table of a direct path load are: + +- Primary key +- Not null constraints +- Indexes (unique or non-unique) + +## Restrictions + +The restrictions on the target table of a direct path load are: + +- Triggers aren't permitted. +- Check constraints aren't permitted. +- Foreign key constraints on the target table referencing another table aren't permitted. +- Foreign key constraints on other tables referencing the target table aren't permitted. +- You must not partition the table. +- Rules can exist on the target table, but they aren't executed. + +!!! Note + Currently, a direct path load in EDB\*Loader is more restrictive than in Oracle SQL\*Loader. The preceding restrictions don't apply to Oracle SQL\*Loader in most cases. The following restrictions apply to a control file used in a direct path load: + + - Multiple table loads aren't supported. You can specify only one `INTO TABLE` clause in the control file. + - You can't use SQL expressions in the data field definitions of the `INTO TABLE` clause. + - The `FREEZE` option isn't supported for direct path loading. + +## Running the direct path load + +To run a direct path load, add the `DIRECT=TRUE` option: + +```shell +$ /usr/edb/as17/bin/edbldr -d edb USERID=enterprisedb/password +CONTROL=emp.ctl DIRECT=TRUE +EDB*Loader: Copyright (c) 2007-2021, EnterpriseDB Corporation. + +Successfully loaded (4) records +``` diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/index.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/index.mdx new file mode 100644 index 00000000000..2b4b8a6a41e --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/index.mdx @@ -0,0 +1,18 @@ +--- +title: "Invoking EDB*Loader" +description: "Describes how to run EDB*Loader" +indexCards: simple +navigation: + - running_edb_loader + - conventional_path_load + - direct_path_load + - parallel_direct_path_load + - performing_remote_loading +redirects: + - /epas/latest/database_administration/02_edb_loader/invoking_edb_loader/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can run EDB\*Loader as superuser or as a normal user. + diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/parallel_direct_path_load.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/parallel_direct_path_load.mdx new file mode 100644 index 00000000000..3d0fe590667 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/parallel_direct_path_load.mdx @@ -0,0 +1,153 @@ +--- +title: "Running a parallel direct path load" +navTitle: "Parallel direct path load" +description: "Describes how to run a parallel direct path load to distribute the loading process over two or more sessions" +redirects: + - /epas/latest/database_administration/02_edb_loader/parallel_direct_path_load/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +You can further improve the performance of a direct path load by distributing the loading process over two or more sessions running concurrently. Each session runs a direct path load into the same table. + +Since the same table is loaded from multiple sessions, the input records to load into the table must be divided among several data files. This way, each EDB\*Loader session uses its own data file, and the same record isn't loaded into the table more than once. + +## Restrictions + +The target table of a parallel direct path load is under the same restrictions as a direct path load run in a single session. + +The restrictions on the target table of a direct path load are: + +- Triggers aren't permitted. +- Check constraints aren't permitted. +- Foreign key constraints on the target table referencing another table aren't permitted. +- Foreign key constraints on other tables referencing the target table aren't permitted. +- You must not partition the table. +- Rules can exist on the target table, but they aren't executed. + +In addition, you must specify the `APPEND` clause in the control file used by each EDB\*Loader session. + +## Running a parallel direct path load + +To run a parallel direct path load, run EDB\*Loader in a separate session for each participant of the parallel direct path load. You must include the `DIRECT=TRUE` and `PARALLEL=TRUE` parameters when invoking each such EDB\*Loader session. + +Each EDB\*Loader session runs as an independent transaction. Aborting and rolling back changes of one of the parallel sessions doesn't affect the loading done by the other parallel sessions. + +!!! Note + In a parallel direct path load, each EDB\*Loader session reserves a fixed number of blocks in the target table using turns. Some of the blocks in the last allocated chunk might not be used, and those blocks remain uninitialized. A later use of the `VACUUM` command on the target table might show warnings about these uninitialized blocks, such as the following: + +```text +WARNING: relation "emp" page 98264 is uninitialized --- fixing + +WARNING: relation "emp" page 98265 is uninitialized --- fixing + +WARNING: relation "emp" page 98266 is uninitialized --- fixing +``` + +This behavior is expected and doesn't indicate data corruption. + +Indexes on the target table aren't updated during a parallel direct path load. They are therefore marked as invalid after the load is complete. You must use the `REINDEX` command to rebuild the indexes. + +This example shows the use of a parallel direct path load on the `emp` table. + +!!! Note + If you attempt a parallel direct path load on the sample `emp` table provided with EDB Postgres Advanced Server, you must first remove the triggers and constraints referencing the `emp` table. In addition, the primary key column, `empno`, was expanded from `NUMBER(4)` to `NUMBER` in this example to allow for inserting more rows. + +This is the control file used in the first session: + +```sql +LOAD DATA + INFILE '/home/user/loader/emp_parallel_1.dat' + APPEND + INTO TABLE emp + FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' + TRAILING NULLCOLS + ( + empno, + ename, + job, + mgr, + hiredate, + sal, + deptno, + comm + ) +``` + +You must specify the `APPEND` clause in the control file for a parallel direct path load. + +This example invokes EDB\*Loader in the first session. You must specify the `DIRECT=TRUE` and `PARALLEL=TRUE` parameters. + +```shell +$ /usr/edb/as17/bin/edbldr -d edb USERID=enterprisedb/password +CONTROL=emp_parallel_1.ctl DIRECT=TRUE PARALLEL=TRUE +WARNING: index maintenance will be skipped with PARALLEL load +EDB*Loader: Copyright (c) 2007-2021, EnterpriseDB Corporation. +``` + +The control file used for the second session appears as follows. It's the same as the one used in the first session, but it uses a different data file. + +```sql +LOAD DATA + INFILE '/home/user/loader/emp_parallel_2.dat' + APPEND + INTO TABLE emp + FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' + TRAILING NULLCOLS + ( + empno, + ename, + job, + mgr, + hiredate, + sal, + deptno, + comm + ) +``` + +This control file is used in a second session: + +```shell +$ /usr/edb/as17/bin/edbldr -d edb USERID=enterprisedb/password +CONTROL=emp_parallel_2.ctl DIRECT=TRUE PARALLEL=TRUE +WARNING: index maintenance will be skipped with PARALLEL load +EDB*Loader: Copyright (c) 2007-2021, EnterpriseDB Corporation. +``` + +EDB\*Loader displays a message in each session when the load operation completes: + +```text +Successfully loaded (10000) records +``` + +This query shows that the index on the emp table was marked `INVALID`: + +```sql +SELECT index_name, status FROM user_indexes WHERE table_name = 'EMP'; +__OUTPUT__ + index_name | status +-------------+--------- + EMP_PK | INVALID + (1 row) +``` + +!!! Note + `user_indexes` is the view of indexes compatible with Oracle databases owned by the current user. + +Queries on the `emp` table don't use the index unless you rebuild it using the `REINDEX` command: + +```sql +REINDEX INDEX emp_pk; +``` + +A later query on `user_indexes` shows that the index is now marked as `VALID`: + +```sql +SELECT index_name, status FROM user_indexes WHERE table_name = 'EMP'; +__OUTPUT__ + index_name | status +-------------+-------- + EMP_PK | VALID + (1 row) +``` diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/performing_remote_loading.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/performing_remote_loading.mdx new file mode 100644 index 00000000000..bc539ea9247 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/performing_remote_loading.mdx @@ -0,0 +1,41 @@ +--- +title: "Performing remote loading" +navTitle: "Remote loading" +description: "Describes how to load a database running on a database server using data piped from a different source" +redirects: + - /epas/latest/database_administration/02_edb_loader/performing_remote_loading/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +EDB\*Loader supports a feature called *remote loading*. In remote loading, the database containing the table to load is running on a database server on a host different from the one where EDB\*Loader is invoked with the input data source. + +This feature is useful if you have a large amount of data to load, and you don't want to create a large data file on the host running the database server. + +In addition, you can use the standard input feature to pipe the data from the data source, such as another program or script, directly to EDB\*Loader. EDB\*Loader then loads the table in the remote database. This feature bypasses having to create a data file on disk for EDB\*Loader. + +## Requirements + +Performing remote loading using standard input requires: + +- The `edbldr` program must be installed on the client host on which to invoke it with the data source for the EDB\*Loader session. +- The control file must contain the clause `INFILE 'stdin'` so you can pipe the data directly into EDB\*Loader’s standard input. For information on the `INFILE` clause and the EDB\*Loader control file, see [Building the EDB\*Loader control file](../building_the_control_file/). +- All files used by EDB\*Loader, such as the control file, bad file, discard file, and log file, must reside on or be created on the client host on which `edbldr` is invoked. +- When invoking EDB\*Loader, use the `-h` option to specify the IP address of the remote database server. For more information, see [Invoking EDB\*Loader](../). +- Use the operating system pipe operator (`|`) or input redirection operator (`<`) to supply the input data to EDB\*Loader. + +## Loading a database + +This example loads a database running on a database server at `192.168.1.14` using data piped from a source named `datasource`: + +```shell +datasource | ./edbldr -d edb -h 192.168.1.14 USERID=enterprisedb/password +CONTROL=remote.ctl +``` + +This example also shows how you can use standard input: + +```shell +./edbldr -d edb -h 192.168.1.14 USERID=enterprisedb/password +CONTROL=remote.ctl < datasource +``` diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/running_edb_loader.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/running_edb_loader.mdx new file mode 100644 index 00000000000..64d9598d041 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/invoking_edb_loader/running_edb_loader.mdx @@ -0,0 +1,292 @@ +--- +title: "Running EDB*Loader" +description: "Describes how to run EDB*Loader from the command line" +--- + +Use the following command to invoke EDB\*Loader from the command line: + +```sql +edbldr [ -d ] [ -p ] [ -h ] +[ USERID={ | / | | / } ] +[ { -c | connstr= } ] + CONTROL= +[ DATA= ] +[ BAD=] +[ DISCARD= ] +[ DISCARDMAX= ] +[ HANDLE_CONFLICTS={ FALSE | TRUE } ] +[ LOG= ] +[ PARFILE= ] +[ DIRECT={ FALSE | TRUE } ] +[ FREEZE={ FALSE | TRUE } ] +[ ERRORS= ] +[ PARALLEL={ FALSE | TRUE } ] +[ ROWS= ] +[ SKIP= ] +[ SKIP_INDEX_MAINTENANCE={ FALSE | TRUE } ] +[ edb_resource_group= ] +``` + +## Description + +You can specify parameters listed in the syntax diagram in a *parameter file*. Exeptions include the `-d` option, `-p` option, `-h` option, and the `PARFILE` parameter. Specify the parameter file on the command line when you invoke edbldr using `PARFILE=param_file`. You can specify some parameters in the `OPTIONS` clause in the control file. For more information on the control file, see [Building the EDB\*Loader control file](../building_the_control_file/). + +You can include the full directory path or a relative directory path to the file name when specifying `control_file`, `data_file`, `bad_file`, `discard_file`, `log_file`, and `param_file`. If you specify the file name alone or with a relative directory path, the file is assumed to exist in the case of `control_file`, `data_file`, or `param_file` relative to the current working directory from which edbldr is invoked. In the case of `bad_file`, `discard_file`, or `log_file`, the file is created. + +If you omit the `-d` option, the `-p` option, or the `-h` option, the defaults for the database, port, and host are determined by the same rules as other EDB Postgres Advanced Server utility programs, such as edb-psql. + +## Requirements + +- The control file must exist in the character set encoding of the client where edbldr is invoked. If the client is in an encoding different from the database encoding, then you must set the `PGCLIENTENCODING` environment variable on the client to the client’s encoding prior to invoking edbldr. This technique ensures character set conversion between the client and the database server is done correctly. + +- The file names must include these extensions: + - `control_file` must use the `.ctl` extension. + - `data_file` must use the `.dat` extension. + - `bad_file` must use the `.bad` extension + - `discard_file` must use the `.dsc` extension + - `log_file` must include the `.log` extension + + If the provided file name doesn't have an extension, EDB\*Loader assumes the actual file name includes the appropriate extension. + +- The operating system account used to invoke edbldr must have read permission on the directories and files specified by `control_file`, `data_file`, and `param_file`. + +- The operating system account enterprisedb must have write permission on the directories where `bad_file`, `discard_file`, and `log_file` are written. + +## Parameters + +`dbname` + + Name of the database containing the tables to load. + +`port` + + Port number on which the database server is accepting connections. + +`host` + + IP address of the host on which the database server is running. + +`USERID={ | | | / }` + + EDB\*Loader connects to the database with ``. `` must be a superuser or a username with the required privileges. `` is the password for ``. + + If you omit the `USERID` parameter, EDB\*Loader prompts for `username` and `password`. If you specify `USERID=username/`, then EDB\*Loader either: + - Uses the password file specified by the environment variable `PGPASSFILE` if `PGPASSFILE` is set + - Uses the `.pgpass` password file (`pgpass.conf` on Windows systems) if `PGPASSFILE` isn't set + + If you specify `USERID=username`, then EDB\*Loader prompts for `password`. If you specify `USERID=/`, the connection is attempted using the operating system account as the user name. + + !!! Note + EDB\*Loader ignores the EDB Postgres Advanced Server connection environment variables `PGUSER` and `PGPASSWORD`. See the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/libpq-pgpass.html) for information on the `PGPASSFILE` environment variable and the password file. + +`-c CONNECTION_STRING` + +`connstr=CONNECTION_STRING` + + The `-c` or `connstr=` option allows you to specify all the connection parameters supported by libpq. With this option, you can also specify SSL connection parameters or other connection parameters supported by libpq. If you provide connection options such as `-d`, `-h`, `-p`, or `userid=dbuser/dbpass` separately, they might override the values provided by the `-c` or `connstr=` option. + +`CONTROL=control_file` + + `control_file` specifies the name of the control file containing EDB\*Loader directives. If you don't specify a file extension, an extension of `.ctl` is assumed. + + For more information on the control file, see [Building the EDB\*Loader control file](../building_the_control_file/). + +`DATA=data_file` + + `data_file` specifies the name of the file containing the data to load into the target table. If you don't specify a file extension, an extension of `.dat` is assumed. Specifying a `data_file` on the command line overrides the `INFILE` clause specified in the control file. + + For more information about `data_file`, see [Building the EDB\*Loader control file](../building_the_control_file/). + +`BAD=bad_file` + + `bad_file` specifies the name of a file that receives input data records that can't be loaded due to errors. Specifying `bad_file` on the command line overrides any `BADFILE` clause specified in the control file. + + For more information about `bad_file`, see [Building the EDB\*Loader control file](../building_the_control_file/). + +`DISCARD=discard_file` + + `discard_file` is the name of the file that receives input data records that don't meet any table’s selection criteria. Specifying `discard_file` on the command line overrides the `DISCARDFILE` clause in the control file. + + For more information about `discard_file`, see [Building the EDB\*Loader control file](../building_the_control_file/). + +`DISCARDMAX=max_discard_recs` + + `max_discard_recs` is the maximum number of discarded records that can be encountered from the input data records before terminating the EDB\*Loader session. Specifying `max_discard_recs` on the command line overrides the `DISCARDMAX` or `DISCARDS` clause in the control file. + + For more information about `max_discard_recs`, see [Building the EDB\*Loader control file](../building_the_control_file/). + +`HANDLE_CONFLICTS={ FALSE | TRUE }` + + If any record insertion fails due to a unique constraint violation, EDB\*Loader aborts the entire operation. You can instruct EDB\*Loader to instead move the duplicate record to the `BAD` file and continue processing by setting `HANDLE_CONFLICTS` to `TRUE`. This behavior applies only if indexes are present. By default, `HANDLE_CONFLICTS` is set to `FALSE`. + + Setting `HANDLE_CONFLICTS` to `TRUE` isn't supported with direct path loading. If you set this parameter to `TRUE` when direct path loading, EDB\*Loader throws an error. + +`LOG=log_file` + + `log_file` specifies the name of the file in which EDB\*Loader records the results of the EDB\*Loader session. + + If you omit the `LOG` parameter, EDB\*Loader creates a log file with the name `control_file_base.log` in the directory from which edbldr is invoked. `control_file_base` is the base name of the control file used in the EDB\*Loader session. The operating system account `enterprisedb` must have write permission on the directory where the log file is written. + +`PARFILE=param_file` + + `param_file` specifies the name of the file that contains command line parameters for the EDB\*Loader session. You can specify command line parameters listed in this section in `param_file` instead of on the command line. Exceptions are the `-d`, `-p`, and `-h` options, and the `PARFILE` parameter. + + Any parameter given in `param_file` overrides the same parameter supplied on the command line before the `PARFILE` option. Any parameter given on the command line that appears after the `PARFILE` option overrides the same parameter given in `param_file`. + + !!! Note + Unlike other EDB\*Loader files, there's no default file name or extension assumed for `param_file`. However, by Oracle SQL\*Loader convention, `.par` is typically used as an extension. It isn't required. + +`DIRECT= { FALSE | TRUE }` + + If `DIRECT` is set to `TRUE`, EDB\*Loader performs a direct path load instead of a conventional path load. The default value of `DIRECT` is `FALSE`. + + Don't set `DIRECT=true` when loading the data into a replicated table. If you're using EDB\*Loader to load data into a replicated table and set `DIRECT=true`, indexes might omit rows that are in a table or might contain references to rows that were deleted. EnterpriseDB doesn't support direct inserts to load data into replicated tables. + + For information about direct path loads, see [Direct path load](direct_path_load). + +`FREEZE= { FALSE | TRUE }` + + Set `FREEZE` to `TRUE` to copy the data with the rows *frozen*. A tuple guaranteed to be visible to all current and future transactions is marked as frozen to prevent transaction ID wraparound. For more information about frozen tuples, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/routine-vacuuming.html). + + You must specify a data-loading type of `TRUNCATE` in the control file when using the `FREEZE` option. `FREEZE` isn't supported for direct loading. + + By default, `FREEZE` is `FALSE`. + +`ERRORS=error_count` + + `error_count` specifies the number of errors permitted before aborting the EDB\*Loader session. The default is `50`. + +`PARALLEL= { FALSE | TRUE }` + + Set `PARALLEL` to `TRUE` to indicate that this EDB\*Loader session is one of a number of concurrent EDB\*Loader sessions participating in a parallel direct path load. The default value of `PARALLEL` is `FALSE`. + + When `PARALLEL` is `TRUE`, the `DIRECT` parameter must also be set to `TRUE`. + + For more information about parallel direct path loads, see [Parallel direct path load](parallel_direct_path_load). + +`ROWS=n` + + `n` specifies the number of rows that EDB\*Loader commits before loading the next set of `n` rows. + +`SKIP=skip_count` + + Number of records at the beginning of the input data file to skip before loading begins. The default is `0`. + +`SKIP_INDEX_MAINTENANCE= { FALSE | TRUE }` + + If set to `TRUE`, index maintenance isn't performed as part of a direct path load, and indexes on the loaded table are marked as invalid. The default value of `SKIP_INDEX_MAINTENANCE` is `FALSE`. + + During a parallel direct path load, target table indexes aren't updated. They're marked as invalid after the load is complete. + + You can use the `REINDEX` command to rebuild an index. For more information about the `REINDEX` command, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-reindex.htm). + +`edb_resource_group=group_name` + + `group_name` specifies the name of an EDB Resource Manager resource group to which to assign the EDB\*Loader session. + + Any default resource group that was assigned to the session is overridden by the resource group given by the `edb_resource_group` parameter specified on the edbldr command line. An example of such a group is a database user running the EDB\*Loader session who was assigned a default resource group with the `ALTER ROLE ... SET` `edb_resource_group` command. + +## Examples + +This example invokes EDB\*Loader using a control file named `emp.ctl` to load a table in database `edb`. The file is located in the current working directory. + +```shell +$ /usr/edb/as17/bin/edbldr -d edb USERID=enterprisedb/password +CONTROL=emp.ctl +EDB*Loader: Copyright (c) 2007-2021, EnterpriseDB Corporation. + +Successfully loaded (4) records +``` + +In this example, EDB\*Loader prompts for the user name and password since they're omitted from the command line. In addition, the files for the bad file and log file are specified with the `BAD` and `LOG` command line parameters. + +```shell +$ /usr/edb/as17/bin/edbldr -d edb CONTROL=emp.ctl BAD=/tmp/emp.bad +LOG=/tmp/emp.log +Enter the user name : enterprisedb +Enter the password : +EDB*Loader: Copyright (c) 2007-2021, EnterpriseDB Corporation. + +Successfully loaded (4) records +``` + +This example runs EDB\*Loader using a parameter file located in the current working directory. The `SKIP` and `ERRORS` parameter default values are specified in the parameter file in addition the `CONTROL`, `BAD`, and `LOG` files. The parameter file, `emp.par`, contains: + +```ini +CONTROL=emp.ctl +BAD=/tmp/emp.bad +LOG=/tmp/emp.log +SKIP=1 +ERRORS=10 +``` + +Invoke EDB\*Loader with the parameter file: + +```shell +$ /usr/edb/as17/bin/edbldr -d edb PARFILE=emp.par +Enter the user name : enterprisedb +Enter the password : +EDB*Loader: Copyright (c) 2007-2021, EnterpriseDB Corporation. + +Successfully loaded (3) records +``` + +This example invokes EDB\*Loader using a `connstr=` option that uses the `emp.ctl` control file located in the current working directory to load a table in a database named `edb`: + +```shell +$ /usr/edb/as17/bin/edbldr connstr=\"sslmode=verify-ca sslcompression=0 +host=127.0.0.1 dbname=edb port=5444 user=enterprisedb\" CONTROL=emp.ctl +EDB*Loader: Copyright (c) 2007-2021, EnterpriseDB Corporation. + +Successfully loaded (4) records +``` + +This example invokes EDB\*Loader using a normal user. For this example, one empty table `bar` is created and a normal user `bob` is created. The `bob` user is granted all privileges on the table `bar`. The CREATE TABLE command creates the empty table. The CREATE USER command creates the user, and the GRANT command gives required privileges to the user `bob` on the `bar` table: + +```sql +CREATE TABLE bar(i int); +CREATE USER bob identified by '123'; +GRANT ALL on bar TO bob; +``` + +The control file and data file: + +```shell +## Control file +EDBAS/ - (master) $ cat /tmp/edbldr.ctl +LOAD DATA INFILE '/tmp/edbldr.dat' +truncate into table bar +( +i position(1:1) +) + +## Data file +EDBAS/ - (master) $ cat /tmp/edbldr.dat +1 +2 +3 +5 +``` + +Invoke EDB\*Loader: + +```shell +EDBAS/ - (master) $ /usr/edb/as17/bin/edbldr -d edb userid=bob/123 control=/tmp/edbldr.ctl +EDB*Loader: Copyright (c) 2007-2022, EnterpriseDB Corporation. + +Successfully loaded (4) records +``` + + + +## Exit codes + +When EDB\*Loader exits, it returns one of the following codes: + +| Exit code | Description | +| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `0` | All rows loaded successfully. | +| `1` | EDB\*Loader encountered command line or syntax errors or aborted the load operation due to an unrecoverable error. | +| `2` | The load completed, but some or all rows were rejected or discarded. | +| `3` | EDB\*Loader encountered fatal errors, such as OS errors. This class of errors is equivalent to the `FATAL` or `PANIC` severity levels of PostgreSQL errors. | + diff --git a/product_docs/docs/epas/17/database_administration/02_edb_loader/using_edb_loader.mdx b/product_docs/docs/epas/17/database_administration/02_edb_loader/using_edb_loader.mdx new file mode 100644 index 00000000000..e053521548d --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/02_edb_loader/using_edb_loader.mdx @@ -0,0 +1,30 @@ +--- +title: "EDB*Loader error handling" +description: "Describes the types of errors the EDB*Loader utility can encounter" +--- + + +EDB\*Loader can load data files with either delimiter-separated or fixed-width fields in single-byte or multibyte character sets. The delimiter can be a string consisting of one or more single-byte or multibyte characters. Data file encoding and the database encoding can differ. Character set conversion of the data file to the database encoding is supported. + +Each EDB\*Loader session runs as a single, independent transaction. If an error occurs during the EDB\*Loader session that aborts the transaction, all changes made during the session are rolled back. + +Generally, formatting errors in the data file don't result in an aborted transaction. Instead, the badly formatted records are written to a text file called the *bad file*. The reason for the error is recorded in the *log file*. + +Records causing database integrity errors result in an aborted transaction and rollback. As with formatting errors, the record causing the error is written to the bad file and the reason is recorded in the log file. + +!!! Note + EDB\*Loader differs from Oracle SQL\*Loader in that a database integrity error results in a rollback in EDB\*Loader. In Oracle SQL\*Loader, only the record causing the error is rejected. Records that were previously inserted into the table are retained, and loading continues after the rejected record. + +The following are examples of types of formatting errors that don't abort the transaction: + +- Attempt to load non-numeric value into a numeric column +- Numeric value too large for a numeric column +- Character value too long for the maximum length of a character column +- Attempt to load improperly formatted date value into a date column + +The following are examples of types of database errors that abort the transaction and result in the rollback of all changes made in the EDB\*Loader session: + +- Violating a unique constraint such as a primary key or unique index +- Violating a referential integrity constraint +- Violating a check constraint +- Error thrown by a trigger fired as a result of inserting rows \ No newline at end of file diff --git a/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/cpu_usage_throttling.mdx b/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/cpu_usage_throttling.mdx new file mode 100644 index 00000000000..bf816166131 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/cpu_usage_throttling.mdx @@ -0,0 +1,325 @@ +--- +title: "CPU usage throttling" +description: "How to use EDB Resource Manager to control the CPU use of a resource group" +--- + +EDB Resource Manager uses *CPU throttling* to keep the aggregate CPU usage of all processes in the group within the limit specified by the `cpu_rate_limit` parameter. A process in the group might be interrupted and put into sleep mode for a short time to maintain the defined limit. When and how such interruptions occur is defined by a proprietary algorithm used by EDB Resource Manager. + +To control CPU use of a resource group, set the `cpu_rate_limit` resource type parameter. + +- Set the `cpu_rate_limit` parameter to the fraction of CPU time over wall-clock time to which the combined, simultaneous CPU usage of all processes in the group must not exceed. The value assigned to `cpu_rate_limit` is typically less than or equal to 1. + +- On multicore systems, you can apply the `cpu_rate_limit` to more than one CPU core by setting it to greater than 1. For example, if `cpu_rate_limit` is set to 2.0, you use 100% of two CPUs. The valid range of the `cpu_rate_limit` parameter is 0 to 1.67772e+07. A setting of 0 means no CPU rate limit was set for the resource group. + +- When the value is multiplied by 100, you can also interpret the `cpu_rate_limit` as the CPU usage percentage for a resource group. + +## Setting the CPU rate limit for a resource group + +Use the `ALTER RESOURCE GROUP` command with the `SET cpu_rate_limit` clause to set the CPU rate limit for a resource group. + +In this example, the CPU usage limit is set to 50% for `resgrp_a`, 40% for `resgrp_b`, and 30% for `resgrp_c`. This means that the combined CPU usage of all processes assigned to `resgrp_a` is maintained at approximately 50%. Similarly, for all processes in `resgrp_b`, the combined CPU usage is kept to approximately 40%, and so on. + +```sql +edb=# ALTER RESOURCE GROUP resgrp_a SET cpu_rate_limit TO .5; +ALTER RESOURCE GROUP +edb=# ALTER RESOURCE GROUP resgrp_b SET cpu_rate_limit TO .4; +ALTER RESOURCE GROUP +edb=# ALTER RESOURCE GROUP resgrp_c SET cpu_rate_limit TO .3; +ALTER RESOURCE GROUP +``` + +This query shows the settings of `cpu_rate_limit` in the catalog: + +```sql +edb=# SELECT rgrpname, rgrpcpuratelimit FROM edb_resource_group; +__OUTPUT__ + rgrpname | rgrpcpuratelimit +----------+------------------ + resgrp_a | 0.5 + resgrp_b | 0.4 + resgrp_c | 0.3 +(3 rows) +``` + +Changing the `cpu_rate_limit` of a resource group affects new processes that are assigned to the group. It also immediately affects any currently running processes that are members of the group. That is, if the `cpu_rate_limit` is changed from .5 to .3, currently running processes in the group are throttled downward so that the aggregate group CPU usage is near 30% instead of 50%. + +To show the effect of setting the CPU rate limit for resource groups, the following `psql` command-line examples use a CPU-intensive calculation of 20000 factorial (multiplication of 20000 \* 19999 \* 19998, and so on) performed by the query `SELECT 20000!`. + +The resource groups with the CPU rate limit settings shown in the previous query are used in these examples. + +## Example: Single process in a single group + +This example shows that the current process is set to use resource group `resgrp_b`. The factorial calculation then starts. + +```sql +edb=# SET edb_resource_group TO resgrp_b; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + resgrp_b +(1 row) +edb=# SELECT 20000!; +``` + +In a second session, the Linux `top` command is used to display the CPU usage under the `%CPU` column. Because the `top` command output periodically changes, it represents a snapshot at an arbitrary point in time: + +```shell +$ top +top - 16:37:03 up 4:15, 7 users, load average: 0.49, 0.20, 0.38 +Tasks: 202 total, 1 running, 201 sleeping, 0 stopped, 0 zombie +Cpu(s): 42.7%us, 2.3%sy, 0.0%ni, 55.0%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0 +Mem: 1025624k total, 791160k used, 234464k free, 23400k buffers +Swap: 103420k total, 13404k used, 90016k free, 373504k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +28915 enterpri 20 0 195m 5900 4212 S 39.9 0.6 3:36.98 edb-postgres + 1033 root 20 0 171m 77m 2960 S 1.0 7.8 3:43.96 Xorg + 3040 user 20 0 278m 22m 14m S 1.0 2.2 3:41.72 knotify4 + . + . + . +``` + +The row where `edb-postgres` appears under the `COMMAND` column shows the `psql` session performing the factorial calculation. The CPU usage of the session shown under the `%CPU` column is 39.9, which is close to the 40% CPU limit set for resource group `resgrp_b`. + +By contrast, if the `psql` session is removed from the resource group and the factorial calculation is performed again, the CPU usage is much higher. + +```sql +edb=# SET edb_resource_group TO DEFAULT; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + +(1 row) + +edb=# SELECT 20000!; +``` + +Under the `%CPU` column for `edb-postgres`, the CPU usage is now 93.6, which is significantly higher than the 39.9 when the process was part of the resource group: + +```shell +$ top +top - 16:43:03 up 4:21, 7 users, load average: 0.66, 0.33, 0.37 +Tasks: 202 total, 5 running, 197 sleeping, 0 stopped, 0 zombie +Cpu(s): 96.7%us, 3.3%sy, 0.0%ni, 0.0%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0 +Mem: 1025624k total, 791228k used, 234396k free, 23560k buffers +Swap: 103420k total, 13404k used, 90016k free, 373508k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +28915 enterpri 20 0 195m 5900 4212 R 93.6 0.6 5:01.56 edb-postgres + 1033 root 20 0 171m 77m 2960 S 1.0 7.8 3:48.15 Xorg + 2907 user 20 0 98.7m 11m 9100 S 0.3 1.2 0:46.51 vmware-user-lo + . + . + . +``` + +## Example: Multiple processes in a single group + +As stated previously, the CPU rate limit applies to the aggregate of all processes in the resource group. This concept is shown in the following example. + +The factorial calculation is performed simultaneously in two separate `psql` sessions, each of which was added to resource group `resgrp_b` that has `cpu_rate_limit` set to .4 (CPU usage of 40%). + +### Session 1 + +```sql +edb=# SET edb_resource_group TO resgrp_b; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + resgrp_b +(1 row) + +edb=# SELECT 20000!; +``` + +### Session 2 + +```sql +edb=# SET edb_resource_group TO resgrp_b; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + resgrp_b +(1 row) + +edb=# SELECT 20000!; +``` + +A third session monitors the CPU usage: + +```shell +$ top +top - 16:53:03 up 4:31, 7 users, load average: 0.31, 0.19, 0.27 +Tasks: 202 total, 1 running, 201 sleeping, 0 stopped, 0 zombie +Cpu(s): 41.2%us, 3.0%sy, 0.0%ni, 55.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0 +Mem: 1025624k total, 792020k used, 233604k free, 23844k buffers +Swap: 103420k total, 13404k used, 90016k free, 373508k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +29857 enterpri 20 0 195m 4708 3312 S 19.9 0.5 0:57.35 edb-postgres +28915 enterpri 20 0 195m 5900 4212 S 19.6 0.6 5:35.49 edb-postgres + 3040 user 20 0 278m 22m 14m S 1.0 2.2 3:54.99 knotify4 + 1033 root 20 0 171m 78m 2960 S 0.3 7.8 3:55.71 Xorg + . + . + . +``` + +Two new processes named `edb-postgres` have `%CPU` values of 19.9 and 19.6. The sum is close to the 40% CPU usage set for resource group `resgrp_b`. + +This command sequence displays the sum of all `edb-postgres` processes sampled over half-second time intervals. This example shows how the total CPU usage of the processes in the resource group changes over time as EDB Resource Manager throttles the processes to keep the total resource group CPU usage near 40%. + +```shell +$ while [[ 1 -eq 1 ]]; do top -d0.5 -b -n2 | grep edb-postgres | awk '{ SUM ++= $9} END { print SUM / 2 }'; done +37.2 +39.1 +38.9 +38.3 +44.7 +39.2 +42.5 +39.1 +39.2 +39.2 +41 +42.85 +46.1 + . + . + . +``` + +## Example: Multiple processes in multiple groups + +In this example, two additional `psql` sessions are used along with the previous two sessions. The third and fourth sessions perform the same factorial calculation in resource group `resgrp_c` with a `cpu_rate_limit` of `.3` (30% CPU usage). + +### Session 3 + +```sql +edb=# SET edb_resource_group TO resgrp_c; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + resgrp_c +(1 row) + +edb=# SELECT 20000!; +``` + +### Session 4 + +```sql +edb=# SET edb_resource_group TO resgrp_c; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + resgrp_c +(1 row) +edb=# SELECT 20000!; +``` + +The `top` command displays the following output: + +```shell +$ top +top - 17:45:09 up 5:23, 8 users, load average: 0.47, 0.17, 0.26 +Tasks: 203 total, 4 running, 199 sleeping, 0 stopped, 0 zombie +Cpu(s): 70.2%us, 0.0%sy, 0.0%ni, 29.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0 +Mem: 1025624k total, 806140k used, 219484k free, 25296k buffers +Swap: 103420k total, 13404k used, 90016k free, 374092k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +29857 enterpri 20 0 195m 4820 3324 S 19.9 0.5 4:25.02 edb-postgres +28915 enterpri 20 0 195m 5900 4212 R 19.6 0.6 9:07.50 edb-postgres +29023 enterpri 20 0 195m 4744 3248 R 16.3 0.5 4:01.73 edb-postgres +11019 enterpri 20 0 195m 4120 2764 R 15.3 0.4 0:04.92 edb-postgres + 2907 user 20 0 98.7m 12m 9112 S 1.3 1.2 0:56.54 vmware-user-lo + 3040 user 20 0 278m 22m 14m S 1.3 2.2 4:38.73 knotify4 +``` + +The two resource groups in use have CPU usage limits of 40% and 30%. The sum of the `%CPU` column for the first two `edb-postgres` processes is 39.5 (approximately 40%, which is the limit for `resgrp_b`). The sum of the `%CPU` column for the third and fourth `edb-postgres` processes is 31.6 (approximately 30%, which is the limit for `resgrp_c`). + +The sum of the CPU usage limits of the two resource groups to which these processes belong is 70%. The following output shows that the sum of the four processes borders around 70%: + +```shell +$ while [[ 1 -eq 1 ]]; do top -d0.5 -b -n2 | grep edb-postgres | awk '{ SUM ++= $9} END { print SUM / 2 }'; done +61.8 +76.4 +72.6 +69.55 +64.55 +79.95 +68.55 +71.25 +74.85 +62 +74.85 +76.9 +72.4 +65.9 +74.9 +68.25 +``` + +By contrast, if three sessions are processing. where two sessions remain in `resgrp_b` but the third session doesn't belong to any resource group, the `top` command shows the following output: + +```shell +$ top +top - 17:24:55 up 5:03, 7 users, load average: 1.00, 0.41, 0.38 +Tasks: 199 total, 3 running, 196 sleeping, 0 stopped, 0 zombie +Cpu(s): 99.7%us, 0.3%sy, 0.0%ni, 0.0%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0 +Mem: 1025624k total, 797692k used, 227932k free, 24724k buffers +Swap: 103420k total, 13404k used, 90016k free, 374068k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +29023 enterpri 20 0 195m 4744 3248 R 58.6 0.5 2:53.75 edb-postgres +28915 enterpri 20 0 195m 5900 4212 S 18.9 0.6 7:58.45 edb-postgres +29857 enterpri 20 0 195m 4820 3324 S 18.9 0.5 3:14.85 edb-postgres + 1033 root 20 0 174m 81m 2960 S 1.7 8.2 4:26.50 Xorg + 3040 user 20 0 278m 22m 14m S 1.0 2.2 4:21.20 knotify4 +``` + +The second and third `edb-postgres` processes belonging to the resource group where the CPU usage is limited to 40% have a total CPU usage of 37.8. However, the first `edb-postgres` process has a 58.6% CPU usage, as it isn't within a resource group. It basically uses the remaining available CPU resources on the system. + +Likewise, the following output shows the sum of all three sessions is around 95%, since one of the sessions has no set limit on its CPU usage: + +```shell +$ while [[ 1 -eq 1 ]]; do top -d0.5 -b -n2 | grep edb-postgres | awk '{ SUM ++= $9} END { print SUM / 2 }'; done +96 +90.35 +92.55 +96.4 +94.1 +90.7 +95.7 +95.45 +93.65 +87.95 +96.75 +94.25 +95.45 +97.35 +92.9 +96.05 +96.25 +94.95 + . + . + . +``` diff --git a/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/creating_resource_groups.mdx b/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/creating_resource_groups.mdx new file mode 100644 index 00000000000..0723a4a35b7 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/creating_resource_groups.mdx @@ -0,0 +1,278 @@ +--- +title: "Working with resource groups" +description: "How to use EDB Resource Manager to create and manage resource groups" +--- + +Use these data definition language commands to create and manage resource groups. + +## Creating a resource group + +Use the `CREATE RESOURCE GROUP` command to create a new resource group. + +```sql +CREATE RESOURCE GROUP ; +``` + +### Description + +The `CREATE RESOURCE GROUP` command creates a resource group with the specified name. You can then define resource limits on the group with the `ALTER RESOURCE GROUP` command. The resource group is accessible from all databases in the EDB Postgres Advanced Server instance. + +To use the `CREATE RESOURCE GROUP` command, you must have superuser privileges. + +### Parameters + +`group_name` + +The name of the resource group. + +### Example + +This example creates three resource groups named `resgrp_a`, `resgrp_b`, and `resgrp_c`: + +```sql +edb=# CREATE RESOURCE GROUP resgrp_a; +CREATE RESOURCE GROUP +edb=# CREATE RESOURCE GROUP resgrp_b; +CREATE RESOURCE GROUP +edb=# CREATE RESOURCE GROUP resgrp_c; +CREATE RESOURCE GROUP +``` + +This query shows the entries for the resource groups in the `edb_resource_group` catalog: + +```sql +edb=# SELECT * FROM edb_resource_group; +__OUTPUT__ + rgrpname | rgrpcpuratelimit | rgrpdirtyratelimit +-----------+------------------+-------------------- + resgrp_a | 0 | 0 + resgrp_b | 0 | 0 + resgrp_c | 0 | 0 +(3 rows) +``` + +## Modifying a resource group + +Use the `ALTER RESOURCE GROUP` command to change the attributes of an existing resource group. The command syntax comes in three forms. + +This form renames the resource group: + +```sql +ALTER RESOURCE GROUP RENAME TO ; +``` + +This form assigns a resource type to the resource group: + +```sql +ALTER RESOURCE GROUP SET + { TO | = } { | DEFAULT }; +``` + +This form resets the assignment of a resource type to its default in the group: + +```sql +ALTER RESOURCE GROUP RESET ; +``` + +### Description + +The `ALTER RESOURCE GROUP` command changes certain attributes of an existing resource group. + +The form with the `RENAME TO` clause assigns a new name to an existing resource group. + +The form with the `SET resource_type TO` clause assigns the specified literal value to a resource type. Or, when you specify `DEFAULT`, it resets the resource type. Resetting a resource type means that the resource group has no defined limit on that resource type. + +The form with the `RESET resource_type` clause resets the resource type for the group. + +To use the `ALTER RESOURCE GROUP` command, you must have superuser privileges. + +### Parameters + +`group_name` + +The name of the resource group to alter. + +`new_name` + +The new name to assign to the resource group. + +`resource_type` + +Specifies the type of resource to which to set a usage value. + +`value | DEFAULT` + +When `value` is specified, the literal value to assign to `resource_type`. Specify `DEFAULT` to reset the assignment of `resource_type` for the resource group. + +### Example + +These examples show the use of the `ALTER RESOURCE GROUP` command: + +```sql +edb=# ALTER RESOURCE GROUP resgrp_a RENAME TO newgrp; +ALTER RESOURCE GROUP +edb=# ALTER RESOURCE GROUP resgrp_b SET cpu_rate_limit = .5; +ALTER RESOURCE GROUP +edb=# ALTER RESOURCE GROUP resgrp_b SET dirty_rate_limit = 6144; +ALTER RESOURCE GROUP +edb=# ALTER RESOURCE GROUP resgrp_c RESET cpu_rate_limit; +ALTER RESOURCE GROUP +``` + +This query shows the results of the `ALTER RESOURCE GROUP` commands to the entries in the `edb_resource_group` catalog: + +```sql +edb=# SELECT * FROM edb_resource_group; +__OUTPUT__ + rgrpname | rgrpcpuratelimit | rgrpdirtyratelimit +-----------+------------------+-------------------- + newgrp | 0 | 0 + resgrp_b | 0.5 | 6144 + resgrp_c | 0 | 0 +(3 rows) +``` + +## Removing a resource group + +Use the `DROP RESOURCE GROUP` command to remove a resource group. + +```sql +DROP RESOURCE GROUP [ IF EXISTS ] ; +``` + +### Description + +The `DROP RESOURCE GROUP` command removes a resource group with the specified name. + +To use the `DROP RESOURCE GROUP` command, you must have superuser privileges. + +### Parameters + +`group_name` + +The name of the resource group to remove. + +`IF EXISTS` + +Don't throw an error if the resource group doesn't exist. Instead, issue a notice. + +### Example + +This example removes the resource group `newgrp`: + +```sql +edb=# DROP RESOURCE GROUP newgrp; +DROP RESOURCE GROUP +``` + +## Assigning a process to a resource group + +Use the `SET edb_resource_group TO group_name` command to assign the current process to a specified resource group: + +```sql +edb=# SET edb_resource_group TO resgrp_b; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + resgrp_b +(1 row) +``` + +The resource type settings of the group take effect on the current process immediately. If you use the command to change the resource group assigned to the current process, the resource type settings of the newly assigned group take effect immediately. + +You can include processes in a resource group by default by assigning a default resource group to roles, databases, or an entire database server instance. + +You can assign a default resource group to a role using the `ALTER ROLE ... SET` command. For more information about the `ALTER ROLE` command, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-alterrole.html). + +You can assign a default resource group to a database by using the `ALTER DATABASE ... SET` command. For more information about the `ALTER DATABASE` command, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-alterdatabase.html). + +You can assign the entire database server instance a default resource group by setting the `edb_resource_group` configuration parameter in the `postgresql.conf` file: + +```ini +# - EDB Resource Manager - +#edb_max_resource_groups = 16 # 0-65536 (change requires restart) +edb_resource_group = 'resgrp_b' +``` + +If you change `edb_resource_group` in the `postgresql.conf` file, reload the configuration file to make it take effect on the database server instance. + +## Removing a process from a resource group + +Set `edb_resource_group` to `DEFAULT` or use `RESET edb_resource_group` to remove the current process from a resource group: + +```sql +edb=# SET edb_resource_group TO DEFAULT; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- +(1 row) +``` + +To remove a default resource group from a role, use the `ALTER ROLE ... RESET` form of the `ALTER ROLE` command. + +To remove a default resource group from a database, use the `ALTER DATABASE ... RESET` form of the `ALTER DATABASE` command. + +To remove a default resource group from the database server instance, set the `edb_resource_group` configuration parameter to an empty string in the `postgresql.conf` file. Then, reload the configuration file. + +## Monitoring processes in resource groups + +After you create resource groups, you can get the number of processes actively using these resource groups from the view `edb_all_resource_groups`. + +The following are the columns in `edb_all_resource_groups`: + +- **group_name.** Name of the resource group. +- **active_processes.** Number of active processes in the resource group. +- **cpu_rate_limit.** The value of the CPU rate limit resource type assigned to the resource group. +- **per_process_cpu_rate_limit.** The CPU rate limit that applies to an individual active process in the resource group. +- **dirty_rate_limit.** The value of the dirty rate limit resource type assigned to the resource group. +- **per_process_dirty_rate_limit.** The dirty rate limit that applies to an individual active process in the resource group. + +!!! Note + Columns `per_process_cpu_rate_limit` and `per_process_dirty_rate_limit` don't show the actual resource consumption used by the processes. They indicate how `EDB Resource Manager` sets the resource limit for an individual process based on the number of active processes in the resource group. + +This example shows `edb_all_resource_groups` when resource group `resgrp_a` contains no active processes, resource group `resgrp_b` contains two active processes, and resource group `resgrp_c` contains one active process: + +```sql +edb=# SELECT * FROM edb_all_resource_groups ORDER BY group_name; +__OUTPUT__ +-[ RECORD 1 ]-----------------+------------------ + group_name | resgrp_a + active_processes | 0 + cpu_rate_limit | 0.5 + per_process_cpu_rate_limit | + dirty_rate_limit | 12288 + per_process_dirty_rate_limit | +-[ RECORD 2 ]-----------------+------------------ + group_name | resgrp_b + active_processes | 2 + cpu_rate_limit | 0.4 + per_process_cpu_rate_limit | 0.195694289022895 + dirty_rate_limit | 6144 + per_process_dirty_rate_limit | 3785.92924684337 +-[ RECORD 3 ]-----------------+------------------ + group_name | resgrp_c + active_processes | 1 + cpu_rate_limit | 0.3 + per_process_cpu_rate_limit | 0.292342129631091 + dirty_rate_limit | 3072 + per_process_dirty_rate_limit | 3072 +``` + +The CPU rate limit and dirty rate limit settings that are assigned to these resource groups are: + +```sql +edb=# SELECT * FROM edb_resource_group; +__OUTPUT__ + rgrpname | rgrpcpuratelimit | rgrpdirtyratelimit +-----------+------------------+-------------------- + resgrp_a | 0.5 | 12288 + resgrp_b | 0.4 | 6144 + resgrp_c | 0.3 | 3072 +(3 rows) +``` + +In the `edb_all_resource_groups` view, the `per_process_cpu_rate_limit` and `per_process_dirty_rate_limit` values are roughly the corresponding CPU rate limit and dirty rate limit divided by the number of active processes. \ No newline at end of file diff --git a/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/dirty_buffer_throttling.mdx b/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/dirty_buffer_throttling.mdx new file mode 100644 index 00000000000..5f6ad2cf0ee --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/dirty_buffer_throttling.mdx @@ -0,0 +1,397 @@ +--- +title: "Dirty buffer throttling" +description: "How to use EDB Resource Manager to control writing to shared buffers" +--- + +EDB Resource Manager uses *dirty buffer throttling* to keep the aggregate shared buffer writing rate of all processes in the group near the limit specified by the `dirty_rate_limit` parameter. A process in the group might be interrupted and put into sleep mode for a short time to maintain the defined limit. When and how such interruptions occur is defined by a proprietary algorithm used by EDB Resource Manager. + +To control writing to shared buffers, set the `dirty_rate_limit` resource type parameter. + +- Set the `dirty_rate_limit` parameter to the number of kilobytes per second for the combined rate at which all the processes in the group write to, or “dirty”, the shared buffers. An example setting is 3072 kilobytes per seconds. + +- The valid range of the `dirty_rate_limit` parameter is 0 to 1.67772e+07. A setting of 0 means no dirty rate limit was set for the resource group. + +## Setting the dirty rate limit for a resource group + +Use the `ALTER RESOURCE GROUP` command with the `SET dirty_rate_limit` clause to set the dirty rate limit for a resource group. + +In this example, the dirty rate limit is set to 12288 kilobytes per second for `resgrp_a`, 6144 kilobytes per second for `resgrp_b`, and 3072 kilobytes per second for `resgrp_c`. This means that the combined writing rate to the shared buffer of all processes assigned to `resgrp_a` is maintained at approximately 12288 kilobytes per second. Similarly, for all processes in `resgrp_b`, the combined writing rate to the shared buffer is kept to approximately 6144 kilobytes per second, and so on. + +```sql +edb=# ALTER RESOURCE GROUP resgrp_a SET dirty_rate_limit TO 12288; +ALTER RESOURCE GROUP +edb=# ALTER RESOURCE GROUP resgrp_b SET dirty_rate_limit TO 6144; +ALTER RESOURCE GROUP +edb=# ALTER RESOURCE GROUP resgrp_c SET dirty_rate_limit TO 3072; +ALTER RESOURCE GROUP +``` + +This query shows the settings of `dirty_rate_limit` in the catalog; + +```sql +edb=# SELECT rgrpname, rgrpdirtyratelimit FROM edb_resource_group; +__OUTPUT__ + rgrpname | rgrpdirtyratelimit +-----------+-------------------- + resgrp_a | 12288 + resgrp_b | 6144 + resgrp_c | 3072 +(3 rows) +``` +## Changing the dirty rate limit + +Changing the `dirty_rate_limit` of a resource group affects new processes that are assigned to the group. Any currently running processes that are members of the group are also immediately affected by the change. That is, if the `dirty_rate_limit` is changed from 12288 to 3072, currently running processes in the group are throttled downward so that the aggregate group dirty rate is near 3072 kilobytes per second instead of 12288 kilobytes per second. + +To show the effect of setting the dirty rate limit for resource groups, the examples use the following table for intensive I/O operations: + +```sql +CREATE TABLE t1 (c1 INTEGER, c2 CHARACTER(500)) WITH (FILLFACTOR = 10); +``` + +The `FILLFACTOR = 10` clause results in `INSERT` commands packing rows up to only 10% per page. The result is a larger sampling of dirty shared blocks for the purpose of these examples. + +## Displaying the number of dirty buffers + +The `pg_stat_statements` module is used to display the number of shared buffer blocks that are dirtied by a SQL command and the amount of time the command took to execute. This information is used to calculate the actual kilobytes per second writing rate for the SQL command and thus compare it to the dirty rate limit set for a resource group. + +To use the `pg_stat_statements` module: + +1. In the `postgresql.conf` file, add `$libdir/pg_stat_statements` to the `shared_preload_libraries` configuration parameter: + + ```ini + shared_preload_libraries = '$libdir/dbms_pipe,$libdir/edb_gen,$libdir/pg_stat_statements' + ``` + +2. Restart the database server. + +3. Use the `CREATE EXTENSION` command to finish creating the `pg_stat_statements` module: + + ```sql + edb=# CREATE EXTENSION pg_stat_statements SCHEMA public; + CREATE EXTENSION + ``` + + The `pg_stat_statements_reset()` function clears out the `pg_stat_statements` view for clarity of each example. + +The resource groups with the dirty rate limit settings shown in the previous query are used in these examples. + + + +## Example: Single process in a single group + +This sequence of commands creates table `t1`. The current process is set to use resource group `resgrp_b`. The `pg_stat_statements` view is cleared out by running the `pg_stat_statements_reset()` function. + +The `INSERT` command then generates a series of integers from 1 to 10,000 to populate the table and dirty approximately 10,000 blocks: + +```sql +edb=# CREATE TABLE t1 (c1 INTEGER, c2 CHARACTER(500)) WITH (FILLFACTOR = 10); +CREATE TABLE +edb=# SET edb_resource_group TO resgrp_b; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + resgrp_b +(1 row) +``` +```sql +edb=# SELECT pg_stat_statements_reset(); +__OUTPUT__ + pg_stat_statements_reset +-------------------------- + +(1 row) + +edb=# INSERT INTO t1 VALUES (generate_series (1,10000), 'aaa'); +INSERT 0 10000 +``` + +This example shows the results from the `INSERT` command: + +```sql +edb=# SELECT query, rows, total_time, shared_blks_dirtied FROM +pg_stat_statements; +__OUTPUT__ +-[ RECORD 1 ]--------+-------------------------------------------------- + query | INSERT INTO t1 VALUES (generate_series (?,?), ?); + rows | 10000 + total_time | 13496.184 + shared_blks_dirtied | 10003 +``` + +The actual dirty rate is calculated as follows: + +- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 13496.184 ms, which yields 0.74117247 blocks per millisecond. +- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 741.17247 blocks per second. +- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 6072 kilobytes per second. + +The actual dirty rate of 6072 kilobytes per second is close to the dirty rate limit for the resource group, which is 6144 kilobytes per second. + +By contrast, if you repeat the steps without the process belonging to any resource group, the dirty buffer rate is much higher: + +```sql +edb=# CREATE TABLE t1 (c1 INTEGER, c2 CHARACTER(500)) WITH (FILLFACTOR = 10); +CREATE TABLE +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + +(1 row) +``` +```sql +edb=# SELECT pg_stat_statements_reset(); +__OUTPUT__ + pg_stat_statements_reset +-------------------------- + +(1 row) + +edb=# INSERT INTO t1 VALUES (generate_series (1,10000), 'aaa'); +INSERT 0 10000 +``` + +This example shows the results from the `INSERT` command without the use of a resource group: + +```sql +edb=# SELECT query, rows, total_time, shared_blks_dirtied FROM +pg_stat_statements; +__OUTPUT__ +-[ RECORD 1 ]--------+-------------------------------------------------- + query | INSERT INTO t1 VALUES (generate_series (?,?), ?); + rows | 10000 + total_time | 2432.165 + shared_blks_dirtied | 10003 +``` + +The total time was only 2432.165 milliseconds, compared to 13496.184 milliseconds when using a resource group with a dirty rate limit set to 6144 kilobytes per second. + +The actual dirty rate without the use of a resource group is calculated as follows: + +- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 2432.165 ms, which yields 4.112797 blocks per millisecond. +- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 4112.797 blocks per second. +- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 33692 kilobytes per second. + +The actual dirty rate of 33692 kilobytes per second is much higher than when the resource group with a dirty rate limit of 6144 kilobytes per second was used. + + + +## Example: Multiple processes in a single group + +As stated previously, the dirty rate limit applies to the aggregate of all processes in the resource group. This concept is illustrated in the following example. + +For this example, the inserts are performed simultaneously on two different tables in two separate `psql` sessions, each of which was added to resource group `resgrp_b` that has a `dirty_rate_limit` set to 6144 kilobytes per second. + +### Session 1 + +```sql +edb=# CREATE TABLE t1 (c1 INTEGER, c2 CHARACTER(500)) WITH (FILLFACTOR = 10); +CREATE TABLE +edb=# SET edb_resource_group TO resgrp_b; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + resgrp_b +(1 row) + +edb=# INSERT INTO t1 VALUES (generate_series (1,10000), 'aaa'); +INSERT 0 10000 +``` + +### Session 2 + +```sql +edb=# CREATE TABLE t2 (c1 INTEGER, c2 CHARACTER(500)) WITH (FILLFACTOR = 10); +CREATE TABLE +edb=# SET edb_resource_group TO resgrp_b; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + resgrp_b +(1 row) +``` +```sql +edb=# SELECT pg_stat_statements_reset(); +__OUTPUT__ + pg_stat_statements_reset +-------------------------- +(1 row) + +edb=# INSERT INTO t2 VALUES (generate_series (1,10000), 'aaa'); +INSERT 0 10000 +``` + +!!! Note + The `INSERT` commands in session 1 and session 2 started after the `SELECT pg_stat_statements_reset()` command in session 2 ran. + +This example shows the results from the `INSERT` commands in the two sessions. `RECORD 3` shows the results from session 1. `RECORD 2` shows the results from session 2. + +```sql +edb=# SELECT query, rows, total_time, shared_blks_dirtied FROM +pg_stat_statements; +__OUTPUT__ +-[ RECORD 1 ]--------+-------------------------------------------------- + query | SELECT pg_stat_statements_reset(); + rows | 1 + total_time | 0.43 + shared_blks_dirtied | 0 +-[ RECORD 2 ]--------+-------------------------------------------------- + query | INSERT INTO t2 VALUES (generate_series (?,?), ?); + rows | 10000 + total_time | 30591.551 + shared_blks_dirtied | 10003 +-[ RECORD 3 ]--------+-------------------------------------------------- + query | INSERT INTO t1 VALUES (generate_series (?,?), ?); + rows | 10000 + total_time | 33215.334 + shared_blks_dirtied | 10003 +``` + +The total time was 33215.334 milliseconds for session 1 and 30591.551 milliseconds for session 2. When only one session was active in the same resource group, the time was 13496.184 milliseconds. Thus, more active processes in the resource group result in a slower dirty rate for each active process in the group. The following calculations show this. + +The actual dirty rate for session 1 is calculated as follows: + +- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 33215.334 ms, which yields 0.30115609 blocks per millisecond. +- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 301.15609 blocks per second. +- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 2467 kilobytes per second. + +The actual dirty rate for session 2 is calculated as follows: + +- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 30591.551 ms, which yields 0.32698571 blocks per millisecond. +- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 326.98571 blocks per second. +- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 2679 kilobytes per second. + +The combined dirty rate from session 1 (2467 kilobytes per second) and from session 2 (2679 kilobytes per second) yields 5146 kilobytes per second, which is below the set dirty rate limit of the resource group (6144 kilobytes per seconds). + + + +## Example: Multiple processes in multiple groups + +In this example, two additional `psql` sessions are used along with the previous two sessions. The third and fourth sessions perform the same `INSERT` command in resource group `resgrp_c` with a `dirty_rate_limit` of 3072 kilobytes per second. + +Repeat sessions 1 and 2 from the prior example using resource group `resgrp_b` with a `dirty_rate_limit` of 6144 kilobytes per second: + +### Session 3 + +```sql +edb=# CREATE TABLE t3 (c1 INTEGER, c2 CHARACTER(500)) WITH (FILLFACTOR = 10); +CREATE TABLE +edb=# SET edb_resource_group TO resgrp_c; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + + +resgrp_c +(1 row) + +edb=# INSERT INTO t3 VALUES (generate_series (1,10000), 'aaa'); +INSERT 0 10000 +``` + +### Session 4 + +```sql +edb=# CREATE TABLE t4 (c1 INTEGER, c2 CHARACTER(500)) WITH (FILLFACTOR = 10); +CREATE TABLE +edb=# SET edb_resource_group TO resgrp_c; +SET +edb=# SHOW edb_resource_group; +__OUTPUT__ + edb_resource_group +-------------------- + resgrp_c +(1 row) +``` +```sql +edb=# SELECT pg_stat_statements_reset(); +__OUTPUT__ + pg_stat_statements_reset +-------------------------- + +(1 row) + +edb=# INSERT INTO t4 VALUES (generate_series (1,10000), 'aaa'); +INSERT 0 10000 +``` + +!!! Note + The `INSERT` commands in all four sessions started after the `SELECT pg_stat_statements_reset()` command in session 4 ran. + +This example shows the results from the `INSERT` commands in the four sessions: + +- `RECORD 3` shows the results from session 1. `RECORD 2` shows the results from session 2. + +- `RECORD 4` shows the results from session 3. `RECORD 5` shows the results from session 4. + +```sql +edb=# SELECT query, rows, total_time, shared_blks_dirtied FROM +pg_stat_statements; +__OUTPUT__ +-[ RECORD 1 ]--------+-------------------------------------------------- + query | SELECT pg_stat_statements_reset(); + rows | 1 + total_time | 0.467 + shared_blks_dirtied | 0 +-[ RECORD 2 ]--------+-------------------------------------------------- + query | INSERT INTO t2 VALUES (generate_series (?,?), ?); + rows | 10000 + total_time | 31343.458 + shared_blks_dirtied | 10003 +-[ RECORD 3 ]--------+-------------------------------------------------- + query | INSERT INTO t1 VALUES (generate_series (?,?), ?); + rows | 10000 + total_time | 28407.435 + shared_blks_dirtied | 10003 +-[ RECORD 4 ]--------+-------------------------------------------------- + query | INSERT INTO t3 VALUES (generate_series (?,?), ?); + rows | 10000 + total_time | 52727.846 + shared_blks_dirtied | 10003 +-[ RECORD 5 ]--------+-------------------------------------------------- + query | INSERT INTO t4 VALUES (generate_series (?,?), ?); + rows | 10000 + total_time | 56063.697 + shared_blks_dirtied | 10003 +``` + +The times of session 1 (28407.435) and session 2 (31343.458) are close to each other, as they are both in the same resource group with `dirty_rate_limit` set to 6144. These times differe from the times of session 3 (52727.846) and session 4 (56063.697), which are in the resource group with `dirty_rate_limit` set to 3072. The latter group has a slower dirty rate limit, so the expected processing time is longer, as is the case for sessions 3 and 4. + +The actual dirty rate for session 1 is calculated as follows: + +- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 28407.435 ms, which yields 0.35212612 blocks per millisecond. +- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 352.12612 blocks per second. +- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 2885 kilobytes per second. + +The actual dirty rate for session 2 is calculated as follows: + +- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 31343.458 ms, which yields 0.31914156 blocks per millisecond. +- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 319.14156 blocks per second. +- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 2614 kilobytes per second. + +The combined dirty rate from session 1 (2885 kilobytes per second) and from session 2 (2614 kilobytes per second) yields 5499 kilobytes per second, which is near the set dirty rate limit of the resource group (6144 kilobytes per seconds). + +The actual dirty rate for session 3 is calculated as follows: + +- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 52727.846 ms, which yields 0.18971001 blocks per millisecond. +- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 189.71001 blocks per second. +- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 1554 kilobytes per second. + +The actual dirty rate for session 4 is calculated as follows: + +- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 56063.697 ms, which yields 0.17842205 blocks per millisecond. +- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 178.42205 blocks per second. +- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 1462 kilobytes per second. + +The combined dirty rate from session 3 (1554 kilobytes per second) and from session 4 (1462 kilobytes per second) yields 3016 kilobytes per second, which is near the set dirty rate limit of the resource group (3072 kilobytes per seconds). + +This example shows how EDB Resource Manager keeps the aggregate dirty rate of the active processes in its groups close to the dirty rate limit set for each group. + + diff --git a/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/edb_resource_manager_key_concepts.mdx b/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/edb_resource_manager_key_concepts.mdx new file mode 100644 index 00000000000..873501369b8 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/edb_resource_manager_key_concepts.mdx @@ -0,0 +1,20 @@ +--- +title: "EDB Resource Manager key concepts" +navTitle: "Key concepts" +description: "Describes key points about using EDB Resource Manager" +--- + +You use EDB Resource Manager to control the use of operating system resources used by EDB Postgres Advanced Server processes. + +Some key points about using EDB Resource Manager are: + +- The basic component of EDB Resource Manager is a *resource group*. A resource group is a named, global group. It's available to all databases in an EDB Postgres Advanced Server instance, and you can define various resource usage limits on it. EDB Postgres Advanced Server processes that are assigned as members of a given resource group are then controlled by EDB Resource Manager. This configuration keeps the aggregate resource use of all processes in the group near the limits defined on the group. +- Data definition language commands are used to create, alter, and drop resource groups. Only a database user with superuser privileges can use these commands. +- *Resource type parameters* define the desired aggregate consumption level of all processes belonging to a resource group. You use different resource type parameters for the different types of system resources currently supported by EDB Resource Manager. +- You can create multiple resource groups, each with different settings for its resource type parameters, which defines different consumption levels for each resource group. +- EDB Resource Manager throttles processes in a resource group to keep resource consumption near the limits defined by the resource type parameters. If multiple resource type parameters have defined settings in a resource group, the actual resource consumption might be significantly lower for certain resource types than their defined resource type parameter settings. This lower consumption happens because EDB Resource Manager throttles processes, attempting to keep all resources with defined resource type settings within their defined limits. +- The definitions of available resource groups and their resource type settings are stored in a shared global system catalog. Thus, all databases in a given EDB Postgres Advanced Server instance can use resource groups. +- The `edb_max_resource_groups` configuration parameter sets the maximum number of resource groups that can be active at the same time as running processes. The default setting is 16 resource groups. Changes to this parameter take effect when you restart the database server. +- Use the `SET edb_resource_group TO group_name` command to assign the current process to a specified resource group. Use the `RESET edb_resource_group` command or `SET edb_resource_group` to `DEFAULT` to remove the current process from a resource group. +- You can assign a default resource group to a role using the `ALTER ROLE ... SET` command or to a database using the `ALTER DATABASE ... SET` command. You can assign the entire database server instance a default resource group by setting the parameter in the `postgresql.conf` file. +- To include resource groups in a backup file of the database server instance, use the `pg_dumpall` backup utility with default settings. That is, don't specify any of the `--globals-only`, `--roles-only`, or `--tablespaces-only` options. diff --git a/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/index.mdx b/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/index.mdx new file mode 100644 index 00000000000..4c059c256a4 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/10_edb_resource_manager/index.mdx @@ -0,0 +1,32 @@ +--- +title: "Throttling CPU and I/O at the process level" +navTitle: "EDB Resource Manager" +indexCards: simple +description: "How to use EDB Resource Manager to control the use of operating system resources used by EDB Postgres Advanced Server processes" +navigation: + - edb_resource_manager_key_concepts + - creating_resource_groups + - cpu_usage_throttling + - dirty_buffer_throttling + - edb_resource_manager_system_catalogs +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.37.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.35.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.34.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.33.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.36.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.083.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.082.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.079.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.081.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.080.html" +--- + + + +EDB Resource Manager is an EDB Postgres Advanced Server feature that lets you control the use of operating system resources used by EDB Postgres Advanced Server processes. + +This capability allows you to protect the system from processes that might uncontrollably overuse and monopolize certain system resources. + + diff --git a/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/checking_the_status.mdx b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/checking_the_status.mdx new file mode 100644 index 00000000000..b5a9e7eadb3 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/checking_the_status.mdx @@ -0,0 +1,45 @@ +--- +title: "Checking the status of the cloning process" +navTitle: "Checking status" +description: "Describes how to obtain the status of the cloning function" +--- + + + +The `process_status_from_log` function provides the status of a cloning function from its log file: + +```sql +process_status_from_log ( + TEXT +) +``` + +The function returns the following fields from the log file: + +| Field name | Description | +| ---------------- | --------------------------------------------------------------------------------------------------------- | +| `status` | Displays either `STARTING`, `RUNNING`, `FINISH`, or `FAILED`. | +| `execution_time` | When the command was executed. Displayed in timestamp format. | +| `pid` | Session process ID in which clone schema is getting called. | +| `level` | Displays either `INFO`, `ERROR`, or `SUCCESSFUL`. | +| `stage` | Displays either `STARTUP`, `INITIAL`, `DDL-COLLECTION`, `PRE-DATA`, `DATA-COPY`, `POST-DATA`, or `FINAL`. | +| `message` | Information respective to each command or failure. | + +## Parameters + +`log_file` + +Name of the log file recording the cloning of a schema as specified when the cloning function was invoked. + +## Example + +The following shows the use of the `process_status_from_log` function: + +```sql +edb=# SELECT edb_util.process_status_from_log('clone_edb_edbcopy'); +__OUTPUT__ + process_status_from_log +--------------------------------------------------------------------------------------------------- + (FINISH,"26-JUN-17 11:57:03.214458 -04:00",3691,INFO,"STAGE: FINAL","successfully cloned schema") +(1 row) +``` diff --git a/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/cloning_with_non_super_user.mdx b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/cloning_with_non_super_user.mdx new file mode 100644 index 00000000000..47f4c6443f8 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/cloning_with_non_super_user.mdx @@ -0,0 +1,108 @@ +--- +title: "Cloning schema as a non-super user" +--- + +You can now clone the schema as a non-super user. This two functions are created while creating the extension: + +- GRANT_CLONE_SCHEMA_PRIVILEGES - Grants the privileges to a non-super user to clone the schema. +- REVOKE_CLONE_SCHEMA_PRIVILEGES - Revokes the privileges from a non-super user for cloning the schema. + +## GRANT_CLONE_SCHEMA_PRIVILEGES + +You can grant the clone schema privileges to a non-super user using this function. + +Syntax: + +```sql +GRANT_CLONE_SCHEMA_PRIVILEGES( TEXT, [ BOOLEAN], [ BOOLEAN]) +``` + +Where, + +`user_name` + +Name of the user to whom privileges are to be granted to do local cloning. + +`allow_remote_schema_clone` + +Optionally provide a boolean value to this parameter to control the remote cloning by the user. By default the value is set to False. The true value grants user the privileges to do remote cloning. + +`print_commands` + +Optionally provide a boolean value to this parameter to control printing of the executed commands. By default the value is set to false. The true value prints the executed commands on the terminal. + +This example shows how to grant a non-super user ec2-user the privileges for local and remote cloning: + +```sql +SELECT edb_util.grant_clone_schema(user_name => 'ec2-user', + allow_remote_schema => true, + print_commands => true); +__OUTPUT__ +INFO: Executed command: GRANT USAGE ON SCHEMA edb_util TO "ec2-user" +INFO: Executed command: GRANT pg_read_all_settings TO "ec2-user" +INFO: Executed command: GRANT EXECUTE ON PACKAGE SYS.UTL_FILE TO "ec2-user" +INFO: Executed command: GRANT EXECUTE ON FUNCTION pg_catalog.pg_stat_file(text) TO "ec2-user" +INFO: Executed command: GRANT EXECUTE ON FUNCTION pg_catalog.pg_read_file(text, bigint, bigint) TO "ec2-user" +INFO: Executed command: GRANT SELECT ON pg_authid TO "ec2-user" +INFO: Executed command: GRANT SELECT ON pg_user_mapping TO "ec2-user" +INFO: Executed command: GRANT EXECUTE ON FUNCTION dblink_connect_u(text, text) TO "ec2-user" +INFO: Executed command: GRANT EXECUTE ON FUNCTION dblink(text, text) TO "ec2-user" +INFO: Executed command: GRANT EXECUTE ON FUNCTION pg_catalog.pg_file_write(text, text, boolean) TO "ec2-user" +INFO: Executed command: GRANT USAGE ON FOREIGN DATA WRAPPER postgres_fdw TO "ec2-user" +┌───────────────────────────────┐ +│ grant_clone_schema_privileges │ +├───────────────────────────────┤ +│ t │ +└───────────────────────────────┘ +(1 row) +``` + +## REVOKE_CLONE_SCHEMA_PRIVILEGES + +You can revoke the clone schema privileges from a non-super user using this function. + +Syntax: + +```sql +revoke_clone_schema_privileges( TEXT[, BOOLEAN][, BOOLEAN]) +``` + +Where, + +`user_name` + +Name of the user from whom we want to revoke the cloning privileges. + +`revoke_remote_schema_clone` + +Optionally provide a boolean value to this parameter to control the remote cloning by the user. By default the value is set to False. The true value revokes the remote cloning privileges from the user. + +`print_commands` + +Optionally provide a boolean value to this parameter to control printing of the executed commands. By default the value is set to false. The true value prints the executed commands on the terminal. + +This example shows how to revoke cloning privileges from the ec2-user user. + +```sql +SELECT edb_util.revoke_clone_schema_privileges(user_name => 'ec2-user', + revoke_remote_schema_clone => true, + print_commands => true); +__OUTPUT__ +INFO: Revoked USAGE on schema edb_util from ec2-user. +INFO: Revoked pg_read_all_settings from ec2-user. +INFO: Revoked EXECUTE on package SYS.UTL_FILE from ec2-user. +INFO: Revoked EXECUTE on function pg_catalog.pg_stat_file(text) from ec2-user. +INFO: Revoked EXECUTE on function pg_catalog.pg_read_file(text, bigint, bigint) from ec2-user. +INFO: Revoked SELECT on pg_authid from ec2-user. +INFO: Revoked SELECT on pg_user_mapping from ec2-user. +INFO: Revoked EXECUTE on function dblink_connect_u(text, text) from ec2-user. +INFO: Revoked EXECUTE on function dblink(text, text) from ec2-user. +INFO: Revoked EXECUTE on function pg_catalog.pg_file_write(text, text, boolean) from ec2-user. +INFO: Revoked USAGE on foreign data wrapper postgres_fdw from ec2-user. +┌────────────────────────────────┐ +│ revoke_clone_schema_privileges │ +├────────────────────────────────┤ +│ t │ +└────────────────────────────────┘ +(1 row) +``` \ No newline at end of file diff --git a/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/copying_a_remote_schema.mdx b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/copying_a_remote_schema.mdx new file mode 100644 index 00000000000..ef2ddf62a0b --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/copying_a_remote_schema.mdx @@ -0,0 +1,321 @@ +--- +title: "Copying database objcts from a remote source" +navTitle: "Copying a remote database" +description: "Describes how to copy a database when the source schema and the copy will reside in separate databases" +--- + +There are two functions you can use with EDB Clone Schema to perform a remote copy of a schema and its database objects: + +- `remotecopyschema` — This function copies a schema and its database objects from a source database to a different target database. Use this function when the source schema and the copy will reside in separate databases. The separate databases can reside in the same EDB Postgres Advanced Server database clusters or in different ones. See [remotecopyschema](#remotecopyschema) for more information. +- `remotecopyschema_nb` — This function performs the same purpose as `remotecopyschema` but as a background job, which frees up the terminal from which the function was initiated. This function is a non-blocking function. See [remotecopyschema_nb](#remotecopyschema_nb) for more information. + +## Copying a remote schema + + + +The `remotecopyschema` function copies a schema and its database objects from a source schema in the remote source database specified in the `source_fdw` foreign server to a target schema in the local target database specified in the `target_fdw` foreign server: + +```sql +remotecopyschema( + TEXT, + TEXT, + TEXT, + TEXT, + TEXT + [, BOOLEAN + [, BOOLEAN + [, BOOLEAN + [, INTEGER ]]]] +) +``` + +The function returns a Boolean value. If the function succeeds, then `true` is returned. If the function fails, then `false` is returned. + +The `source_fdw`, `target_fdw`, `source_schema`, `target_schema`, and `log_filename` are required parameters. All other parameters are optional. + +### Parameters + +`source_fdw` + +Name of the foreign server managed by the `postgres_fdw` foreign data wrapper from which to clone database objects. + +`target_fdw` + +Name of the foreign server managed by the `postgres_fdw` foreign data wrapper to which to clone database objects. + +`source_schema` + +Name of the schema from which to clone database objects. + +`target_schema` + +Name of the schema into which to clone database objects from the source schema. + +`log_filename` + +Name of the log file in which information from the function is recorded. The log file is created under the directory specified by the `log_directory` configuration parameter in the `postgresql.conf` file. + +`on_tblspace` + +Boolean value to specify whether to create database objects in their tablespaces. If `false`, then the `TABLESPACE` clause isn't included in the applicable `CREATE` DDL statement when added to the target schema. If `true`, then the `TABLESPACE` clause is included in the `CREATE` DDL statement when added to the target schema. The default value is `false`. + +!!! Note + If you specify `true` and a database object has a `TABLESPACE` clause, the tablespace must exist in the target database cluseter. Otherwise, the cloning function fails. + +`verbose_on` + +Boolean value to specify whether to print the DDLs in `log_filename` when creating objects in the target schema. If `false`, then DDLs aren't printed. If `true`, then DDLs are printed. The default value is `false`. + +`copy_acls` + +Boolean value to specify whether to include the access control list (ACL) while creating objects in the target schema. The access control list is the set of `GRANT` privilege statements. If `false`, then the access control list isn't included for the target schema. If `true`, then the access control list is included for the target schema. The default value is `false`. + +!!! Note + If you specify `true`, a role with `GRANT` privilege must exist in the target database cluster. Otherwise, the cloning function fails. + +`worker_count` + +Number of background workers to perform the clone in parallel. The default value is `1`. + +### Example + +This example shows cloning schema `srcschema` in database `srcdb` (as defined by `src_server`) to target schema `tgtschema` in database `tgtdb` (as defined by `tgt_server`). + +The source server environment: + +- Host on which the source database server is running: `192.168.2.28` +- Port of the source database server: `5444` +- Database source of the clone: `srcdb` +- Foreign server (`src_server`) and user mapping with the information of the preceding bullet points +- Source schema: `srcschema` + +The target server environment: + +- Host on which the target database server is running: `localhost` +- Port of the target database server: `5444` +- Database target of the clone: `tgtdb` +- Foreign server (`tgt_server`) and user mapping with the information of the preceding bullet points +- Target schema: `tgtschema` +- Database superuser to invoke `remotecopyschema: enterprisedb` + +Before invoking the function, the connection database user `enterprisedb` connects to database `tgtdb`. A `worker_count` of `4` is specified for this function. + +```sql +tgtdb=# SELECT edb_util.remotecopyschema +('src_server','tgt_server','srcschema','tgtschema','clone_rmt_src_tgt',worker_count => 4); +__OUTPUT__ + remotecopyschema +------------------ + t +(1 row) +``` + +This example displays the status from the log file during various points in the cloning process: + +```sql +tgtdb=# SELECT edb_util.process_status_from_log('clone_rmt_src_tgt'); +__OUTPUT__ + process_status_from_log + +----------------------------------------------------------------------------------------------------- +-------------------------------------- + (RUNNING,"28-JUN-17 13:18:05.299953 -04:00",4021,INFO,"STAGE: DATA-COPY","[0][0] successfully + copied data in [tgtschema.pgbench_tellers] +") + (1 row) +``` +```sql +tgtdb=# SELECT edb_util.process_status_from_log('clone_rmt_src_tgt'); +__OUTPUT__ + process_status_from_log + +----------------------------------------------------------------------------------------------------- +--------------------------------------- + (RUNNING,"28-JUN-17 13:18:06.634364 -04:00",4022,INFO,"STAGE: DATA-COPY","[0][1] successfully + copied data in [tgtschema.pgbench_history] +") + (1 row) +``` +```sql +tgtdb=# SELECT edb_util.process_status_from_log('clone_rmt_src_tgt'); +__OUTPUT__ + process_status_from_log + +------------------------------------------------------------------------------------------------------ +------------------------------------- + (RUNNING,"28-JUN-17 13:18:10.550393 -04:00",4039,INFO,"STAGE: POST-DATA","CREATE PRIMARY KEY + CONSTRAINT pgbench_tellers_pkey successful" +) + (1 row) +``` +```sql +tgtdb=# SELECT edb_util.process_status_from_log('clone_rmt_src_tgt'); +__OUTPUT__ + process_status_from_log +------------------------------------------------------------------------------------------------------- +--------------------------------- + (FINISH,"28-JUN-17 13:18:12.019627 -04:00",4039,INFO,"STAGE: FINAL","successfully clone + schema into tgtschema") + (1 row) +``` + +### Results + +The following shows the cloned tables: + +```sql +tgtdb=# \dt+ +__OUTPUT__ + List of relations + Schema | Name | Type | Owner | Size | Description +-----------+------------------+-------+--------------+------------+------------- + tgtschema | pgbench_accounts | table | enterprisedb | 256 MB | + tgtschema | pgbench_branches | table | enterprisedb | 8192 bytes | + tgtschema | pgbench_history | table | enterprisedb | 25 MB | + tgtschema | pgbench_tellers | table | enterprisedb | 16 kB | +(4 rows) +``` + +When the `remotecopyschema` function was invoked, four background workers were specified. + +The following portion of the log file `clone_rmt_src_tgt` shows the status of the parallel data copying operation using four background workers: + +```text +Wed Jun 28 13:18:05.232949 2017 EDT: [4019] INFO: [STAGE: DATA-COPY] [0] table count [4] +Wed Jun 28 13:18:05.233321 2017 EDT: [4019] INFO: [STAGE: DATA-COPY] [0][0] worker started to +copy data +Wed Jun 28 13:18:05.233640 2017 EDT: [4019] INFO: [STAGE: DATA-COPY] [0][1] worker started to +copy data +Wed Jun 28 13:18:05.233919 2017 EDT: [4019] INFO: [STAGE: DATA-COPY] [0][2] worker started to +copy data +Wed Jun 28 13:18:05.234231 2017 EDT: [4019] INFO: [STAGE: DATA-COPY] [0][3] worker started to +copy data +Wed Jun 28 13:18:05.298174 2017 EDT: [4024] INFO: [STAGE: DATA-COPY] [0][3] successfully +copied data in [tgtschema.pgbench_branches] +Wed Jun 28 13:18:05.299913 2017 EDT: [4021] INFO: [STAGE: DATA-COPY] [0][0] successfully +copied data in [tgtschema.pgbench_tellers] +Wed Jun 28 13:18:06.634310 2017 EDT: [4022] INFO: [STAGE: DATA-COPY] [0][1] successfully +copied data in [tgtschema.pgbench_history] +Wed Jun 28 13:18:10.477333 2017 EDT: [4023] INFO: [STAGE: DATA-COPY] [0][2] successfully +copied data in [tgtschema.pgbench_accounts] +Wed Jun 28 13:18:10.477609 2017 EDT: [4019] INFO: [STAGE: DATA-COPY] [0] all workers finished +[4] +Wed Jun 28 13:18:10.477654 2017 EDT: [4019] INFO: [STAGE: DATA-COPY] [0] copy done [4] tables +Wed Jun 28 13:18:10.493938 2017 EDT: [4019] INFO: [STAGE: DATA-COPY] successfully copied data +into tgtschema +``` + +The `DATA-COPY` log message includes two square-bracket numbers, for example, `[0][3]`. The first number is the job index. The second number is the worker index. The worker index values range from 0 to 3 for the four background workers. + +In case two clone schema jobs are running in parallel, the first log file has `0` as the job index, and the second has `1` as the job index. + +## Copying a remote schema using a batch job + + + +The `remotecopyschema_nb` function copies a schema and its database objects from a source schema in the remote source database specified in the `source_fdw` foreign server to a target schema in the local target database specified in the `target_fdw` foreign server. Copying occurs in a non-blocking manner as a job submitted to DBMS_JOB. + +```sql +remotecopyschema_nb( + TEXT, + TEXT, + TEXT, + TEXT, + TEXT + [, BOOLEAN + [, BOOLEAN + [, BOOLEAN + [, INTEGER ]]]] +) +``` + +The function returns an `INTEGER` value job ID for the job submitted to DBMS_JOB. If the function fails, then null is returned. + +The `source_fdw`, `target_fdw`, `source`, `target`, and `log_filename` parameters are required. All other parameters are optional. + +After the job is complete, remove it with the `remove_log_file_and_job` function. + +### Parameters + +`source_fdw` + +Name of the foreign server managed by the `postgres_fdw` foreign data wrapper from which to clone database objects. + +`target_fdw` + +Name of the foreign server managed by the `postgres_fdw` foreign data wrapper to which to clone database objects. + +`source` + +Name of the schema from which to clone database objects. + +`target` + +Name of the schema into which to clone database objects from the source schema. + +`log_filename` + +Name of the log file in which to record information from the function. The log file is created under the directory specified by the `log_directory` configuration parameter in the `postgresql.conf` file. + +`on_tblspace` + +Boolean value to specify whether to create database objects in their tablespaces. If `false`, then the `TABLESPACE` clause isn't included in the applicable `CREATE` DDL statement when added to the target schema. If `true`, then the `TABLESPACE` clause is included in the `CREATE` DDL statement when added to the target schema. The default value is `false`. + +!!! Note + If you specify `true` is specified and a database object has a `TABLESPACE` clause, that tablespace must exist in the target database cluster. Otherwise, the cloning function fails. + +`verbose_on` + +Boolean value to specify whether to print the DDLs in `log_filename` when creating objects in the target schema. If `false`, then DDLs aren't printed. If `true`, then DDLs are printed. The default value is `false`. + +`copy_acls` + +Boolean value to specify whether to include the access control list (ACL) while creating objects in the target schema. The access control list is the set of `GRANT` privilege statements. If `false`, then the access control list isn't included for the target schema. If `true`, then the access control list is included for the target schema. The default value is `false`. + +!!! Note + If you specify `true`, a role with `GRANT` privilege must exist in the target database cluster. Otherwise the cloning function fails. + +`worker_count` + +Number of background workers to perform the clone in parallel. The default value is `1`. + +### Example + +The same cloning operation is performed as the example in [`remotecopyschema`](#remotecopyschema) but using the non-blocking function `remotecopyschema_nb`. + +### Results + +The `remotecopyschema_nb` function returns the job ID shown as `2` in the example: + +```sql +tgtdb=# SELECT edb_util.remotecopyschema_nb +('src_server','tgt_server','srcschema','tgtschema','clone_rmt_src_tgt',worker_count => 4); +__OUTPUT__ + remotecopyschema_nb +--------------------- + 2 +(1 row) +``` + +The following shows the completed status of the job: + +```sql +tgtdb=# SELECT edb_util.process_status_from_log('clone_rmt_src_tgt'); +__OUTPUT__ + process_status_from_log +-------------------------------------------------------------------------------------------------------------------- + (FINISH,"29-JUN-17 current:16:00.100284 -04:00",3849,INFO,"STAGE: FINAL","successfully clone schema into tgtschema") +(1 row) +``` + +The following command removes the log file and the job: + +```sql +tgtdb=# SELECT edb_util.remove_log_file_and_job ('clone_rmt_src_tgt',2); +__OUTPUT__ + remove_log_file_and_job +------------------------- + t +(1 row) +``` + diff --git a/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/copying_a_schema.mdx b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/copying_a_schema.mdx new file mode 100644 index 00000000000..15d61e25dcd --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/copying_a_schema.mdx @@ -0,0 +1,288 @@ +--- +title: "Copying database objects from a local source to a target" +navTitle: "Copying a local database" +description: "Describes how to copy database objects from a source database into the same database but with a different schema name" +--- + +There are two functions you can use with EDB Clone Schema to perform a local copy of a schema and its database objects: + +- `localcopyschema` — This function copies a schema and its database objects from a source database into the same database (the target) but with a different schema name from the original. Use this function when the source schema and the copy will reside within the same database. See [localcopyschema](#localcopyschema) for more information. +- `localcopyschema_nb` — This function performs the same purpose as `localcopyschema` but as a background job, which frees up the terminal from which the function was initiated. This function is referred to as a *non-blocking* function. See [localcopyschema_nb](#localcopyschema_nb) for more information. + +## Performing a local copy of a schema + + + +The `localcopyschema` function copies a schema and its database objects in a local database specified in the `source_fdw` foreign server from the source schema to the specified target schema in the same database. + +```sql +localcopyschema( + TEXT, + TEXT, + TEXT, + TEXT + [, BOOLEAN + [, BOOLEAN + [, BOOLEAN + [, INTEGER ]]]] +) +``` + +The function returns a Boolean value. If the function succeeds, then `true` is returned. If the function fails, then `false` is returned. + +The `source_fdw, source_schema, target_schema`, and `log_filename` are required parameters while all other parameters are optional. + +### Parameters + +`source_fdw` + +Name of the foreign server managed by the `postgres_fdw` foreign data wrapper from which to clone database objects. + +`source_schema` + +Name of the schema from which to clone database objects. + +`target_schema` + +Name of the schema into which to clone database objects from the source schema. + +`log_filename` + +Name of the log file in which information from the function is recorded. The log file is created under the directory specified by the `log_directory` configuration parameter in the `postgresql.conf` file. + +`on_tblspace` + +Boolean value to specify whether to create database objects in their tablespaces. If `false`, then the `TABLESPACE` clause isn't included in the applicable `CREATE` DDL statement when added to the target schema. If `true`, then the `TABLESPACE` clause is included in the `CREATE` DDL statement when added to the target schema. The default value is `false`. + +`verbose_on` + +Boolean value to specify whether to print the DDLs in `log_filename` when creating objects in the target schema. If `false`, then DDLs aren't printed. If `true`, then DDLs are printed. The default value is `false`. + +`copy_acls` + +Boolean value to specify whether to include the access control list (ACL) while creating objects in the target schema. The access control list is the set of `GRANT` privilege statements. If `false`, then the access control list isn't included for the target schema. If `true`, then the access control list is included for the target schema. The default value is `false`. + +`worker_count` + +Number of background workers to perform the clone in parallel. The default value is `1`. + +### Example + +This example shows the cloning of schema `edb` containing a set of database objects to target schema `edbcopy`. Both schemas are in database `edb` as defined by `local_server`. + +The example is for the following environment: + +- Host on which the database server is running: `localhost` +- Port of the database server: `5444` +- Database source/target of the clone: `edb` +- Foreign server (`local_server`) and user mapping with the information of the preceding bullet points +- Source schema: `edb` +- Target schema: `edbcopy` +- Database superuser to invoke `localcopyschema: enterprisedb` + +Before invoking the function, database user `enterprisedb` connects to to database `edb`: + +```sql +edb=# SET search_path TO "$user",public,edb_util; +SET +edb=# SHOW search_path; +__OUTPUT__ + search_path +--------------------------- + "$user", public, edb_util +(1 row) +``` +```sql +edb=# SELECT localcopyschema ('local_server','edb','edbcopy','clone_edb_edbcopy'); +__OUTPUT__ + localcopyschema +----------------- + t +(1 row) +``` + +The following displays the logging status using the `process_status_from_log` function: + +```sql +edb=# SELECT process_status_from_log('clone_edb_edbcopy'); +__OUTPUT__ + process_status_from_log +------------------------------------------------------------------------------------------------ + (FINISH,"2017-06-29 11:07:36.830783-04",3855,INFO,"STAGE: FINAL","successfully cloned schema") +(1 row) +``` +## Results + +After the clone is complete, the following shows some of the database objects copied to the `edbcopy` schema: + +```sql +edb=# SET search_path TO edbcopy; +SET +edb=# \dt+ +__OUTPUT__ + List of relations + Schema | Name | Type | Owner | Size | Description +---------+---------+-------+--------------+------------+------------- + edbcopy | dept | table | enterprisedb | 8192 bytes | + edbcopy | emp | table | enterprisedb | 8192 bytes | + edbcopy | jobhist | table | enterprisedb | 8192 bytes | +(3 rows) +``` +```sql +edb=# \dv +__OUTPUT__ + List of relations + Schema | Name | Type | Owner +---------+----------+------+-------------- + edbcopy | salesemp | view | enterprisedb +(1 row) +``` +```sql +edb=# \di +__OUTPUT__ + List of relations + Schema | Name | Type | Owner | Table +---------+---------------+-------+--------------+--------- + edbcopy | dept_dname_uq | index | enterprisedb | dept + edbcopy | dept_pk | index | enterprisedb | dept + edbcopy | emp_pk | index | enterprisedb | emp + edbcopy | jobhist_pk | index | enterprisedb | jobhist +(4 rows) +``` +```sql +edb=# \ds +__OUTPUT__ + List of relations + Schema | Name | Type | Owner +---------+------------+----------+-------------- + edbcopy | next_empno | sequence | enterprisedb +(1 row) +``` +```sql +edb=# SELECT DISTINCT schema_name, name, type FROM user_source WHERE +schema_name = 'EDBCOPY' ORDER BY type, name; +__OUTPUT__ + schema_name | name | type +-------------+--------------------------------+-------------- + EDBCOPY | EMP_COMP | FUNCTION + EDBCOPY | HIRE_CLERK | FUNCTION + EDBCOPY | HIRE_SALESMAN | FUNCTION + EDBCOPY | NEW_EMPNO | FUNCTION + EDBCOPY | EMP_ADMIN | PACKAGE + EDBCOPY | EMP_ADMIN | PACKAGE BODY + EDBCOPY | EMP_QUERY | PROCEDURE + EDBCOPY | EMP_QUERY_CALLER | PROCEDURE + EDBCOPY | LIST_EMP | PROCEDURE + EDBCOPY | SELECT_EMP | PROCEDURE + EDBCOPY | EMP_SAL_TRIG | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_a_19991" | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_a_19992" | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_a_19999" | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_a_20000" | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_a_20004" | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_a_20005" | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_c_19993" | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_c_19994" | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_c_20001" | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_c_20002" | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_c_20006" | TRIGGER + EDBCOPY | "RI_ConstraintTrigger_c_20007" | TRIGGER + EDBCOPY | USER_AUDIT_TRIG | TRIGGER +(24 rows) +``` + +## Performing a local copy of a schema as a batch job + + + +The `localcopyschema_nb` function copies a schema and its database objects in a local database specified in the `source_fdw` foreign server from the source schema to the specified target schema in the same database. The copy occurs in a non-blocking manner as a job submitted to DBMS_JOB. + +```sql +localcopyschema_nb( + TEXT, + TEXT, + TEXT, + TEXT + [, BOOLEAN + [, BOOLEAN + [, BOOLEAN + [, INTEGER ]]]] +) +``` + +The function returns an `INTEGER` value job ID for the job submitted to DBMS_JOB. If the function fails, then null is returned. + +The `source_fdw`, `source`, `target`, and `log_filename` parameters are required. All other parameters are optional. + +After the job completes, remove it with the `remove_log_file_and_job` function. + +### Parameters + +`source_fdw` + +Name of the foreign server managed by the `postgres_fdw` foreign data wrapper from which to clone database objects. + +`source` + +Name of the schema from which to clone database objects. + +`target` + +Name of the schema into which to clone database objects from the source schema. + +`log_filename` + +Name of the log file in which to record information from the function. The log file is created under the directory specified by the `log_directory` configuration parameter in the `postgresql.conf` file. + +`on_tblspace` + +Boolean value to specify whether to create database objects in their tablespaces. If `false`, then the `TABLESPACE` clause isn't included in the applicable `CREATE` DDL statement when added to the target schema. If `true`, then the `TABLESPACE` clause is included in the `CREATE` DDL statement when added to the target schema. The default value is `false`. + +`verbose_on` + +Boolean value to specify whether to print the DDLs in `log_filename` when creating objects in the target schema. If `false`, then DDLs aren't printed. If `true`, then DDLs are printed. The default value is `false`. + +`copy_acls` + +Boolean value to specify whether to include the access control list (ACL) while creating objects in the target schema. The access control list is the set of `GRANT` privilege statements. If `false`, then the access control list isn't included for the target schema. If `true`, then the access control list is included for the target schema. The default value is `false`. + +`worker_count` + +Number of background workers to perform the clone in parallel. The default value is `1`. + +### Example + +The same cloning operation is performed as the example in [`localcopyschema`](#localcopyschema) but using the non-blocking function `localcopyschema_nb`. + +The `localcopyschema_nb` function returns the job ID shown as `4` in the example. + +```sql +edb=# SELECT edb_util.localcopyschema_nb ('local_server','edb','edbcopy','clone_edb_edbcopy'); +__OUTPUT__ + localcopyschema_nb +-------------------- + 4 +(1 row) +``` + +The following displays the job status: + +```sql +edb=# SELECT edb_util.process_status_from_log('clone_edb_edbcopy'); +__OUTPUT__ + process_status_from_log +--------------------------------------------------------------------------------------------------- + (FINISH,"29-JUN-17 11:39:11.620093 -04:00",4618,INFO,"STAGE: FINAL","successfully cloned schema") +(1 row) +``` + +The following removes the log file and the job: + +```sql +edb=# SELECT edb_util.remove_log_file_and_job (4); +__OUTPUT__ + remove_log_file_and_job +------------------------- + t +(1 row) +``` diff --git a/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/edb_clone_schema_overview.mdx b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/edb_clone_schema_overview.mdx new file mode 100644 index 00000000000..f0ee0fec73c --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/edb_clone_schema_overview.mdx @@ -0,0 +1,89 @@ +--- +title: "EDB Clone Schema key concepts and limitations" +navTitle: "Overview and limitations" +description: "Provides an overview of the key features of EDB Clone Schema as well as important information about limitations" +--- + +## EDB Clone Schema functions + +The EDB Clone Schema functions are created in the `edb_util` schema when the `parallel_clone` and `edb_cloneschema` extensions are installed. + +## Prerequisites + +Verify the following conditions before using an EDB Clone Schema function: + +- You're connected to the target or local database as the database superuser defined in the `CREATE USER MAPPING` command for the foreign server of the target or local database. +- The `edb_util` schema is in the search path, or invoke the cloning function with the `edb_util` prefix. +- The target schema doesn't exist in the target database. +- When using the remote copy functions, if the `on_tblspace` parameter is set to `true`, then the target database cluster contains all tablespaces that are referenced by objects in the source schema. Otherwise, creating the DDL statements for those database objects fails in the target schema, which causes a failure of the cloning process. +- When using the remote copy functions, if you set the `copy_acls` parameter to `true`, then all roles that have `GRANT` privileges on objects in the source schema exist in the target database cluster. Otherwise granting privileges to those roles fails in the target schema, which causes a failure of the cloning process. +- Add the name of the database on which the clone schema is to be installed or used in the postgresql.conf file. For example, if the clone schema is to be installed or used on the `edb` database then add the following entry in the postgresql.conf file: + ``` + edb_job_scheduler.database_list='edb' + ``` + + Also, add `parallel_clone` and `edb_job_scheduler` to the shared libraries in the postgresql.conf file: + + ``` + shared_preload_libraries='....,$libdir/parallel_clone, $libdir/edb_job_scheduler' + ``` + + Restart the database server to load the changes in postgresql.conf file. + +## Overview of the functions + +Use the following functions with EDB Clone Schema: + +- `localcopyschema`. This function copies a schema and its database objects from a source database into the same database (the target) but with a different schema name from the original. Use this function when the source schema and the copy will reside within the same database. See [localcopyschema](copying_a_schema) for more information. +- `localcopyschema_nb`. This function performs the same purpose as `localcopyschema` but as a background job, which frees up the terminal from which the function was initiated. This function is referred to as a *non-blocking* function. See [localcopyschema_nb](copying_a_schema) for more information. +- `remotecopyschema`. This function copies a schema and its database objects from a source database to a different target database. Use this function when the source schema and the copy will reside in separate databases. The separate databases can reside in the same EDB Postgres Advanced Server database clusters or in different ones. See [remotecopyschema](copying_a_remote_schema) for more information. +- `remotecopyschema_nb`. This function performs the same purpose as `remotecopyschema` but as a background job, which frees up the terminal from which the function was initiated. This function is a non-blocking function. See [remotecopyschema_nb](copying_a_remote_schema) for more information. +- `process_status_from_log`. This function displays the status of the cloning functions. The information is obtained from a log file you specify when invoking a cloning function. See [process_status_from_log](checking_the_status) for more information. +- `remove_log_file_and_job`. This function deletes the log file created by a cloning function. You can also use this function to delete a job created by the non-blocking form of the function. See [remove_log_file_and_job](performing_cleanup_tasks) for more information. + +## List of supported database objects + +You can clone these database objects from one schema to another: + +- Data types +- Tables including partitioned tables, excluding foreign tables +- Indexes +- Constraints +- Sequences +- View definitions +- Materialized views +- Private synonyms +- Table triggers, but excluding event triggers +- Rules +- Functions +- Procedures +- Packages +- Comments for all supported object types +- Access control lists (ACLs) for all supported object types + +You can't clone the following database objects: + +- Large objects (Postgres `LOBs` and `BFILEs`) +- Logical replication attributes for a table +- Database links +- Foreign data wrappers +- Foreign tables +- Event triggers +- Extensions + +For cloning objects that rely on extensions, see the limitations that follow. + +- Row-level security +- Policies +- Operator class + +## Limitations + +The following limitations apply: + +- EDB Clone Schema is supported on EDB Postgres Advanced Server when you specify a dialect of **Compatible with Oracle** on the EDB Postgres Advanced Server Dialect dialog box during installation. It's also supported when you include the `--redwood-like` keywords during a text-mode installation or cluster initialization. +- The source code in functions, procedures, triggers, packages, and so on, aren't modified after being copied to the target schema. If such programs contain coded references to objects with schema names, the programs might fail when invoked in the target schema if such schema names are no longer consistent in the target schema. +- Cross-schema object dependencies aren't resolved. If an object in the target schema depends on an object in another schema, this dependency isn't resolved by the cloning functions. +- For remote cloning, if an object in the source schema depends on an extension, then you must create this extension in the public schema of the remote database before invoking the remote cloning function. +- At most, 16 copy jobs can run in parallel to clone schemas. Each job can have at most 16 worker processes to copy table data in parallel. +- You can't cancel queries run by background workers. diff --git a/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/index.mdx b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/index.mdx new file mode 100644 index 00000000000..a5755f91ddf --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/index.mdx @@ -0,0 +1,23 @@ +--- +title: "Copying a database" +navTitle: "EDB Clone Schema" +indexCards: simple +description: "How to use the EDB Clone Schema module to copy a schema and its database objects from a source database to a target database" +navigation: + - edb_clone_schema_overview + - setting_up_edb_clone_schema + - copying_a_schema + - copying_a_remote_schema + - checking_the_status + - performing_cleanup_tasks +--- + + + +EDB Clone Schema is an extension module for EDB Postgres Advanced Server that allows you to copy a schema and its database objects from a local or remote database (the source database) to a receiving database (the target database). + +The source and target databases can be either: + +- The same physical database +- Different databases in the same database cluster +- Separate databases running under different database clusters on separate database server hosts diff --git a/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/performing_cleanup_tasks.mdx b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/performing_cleanup_tasks.mdx new file mode 100644 index 00000000000..dd2784bd469 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/performing_cleanup_tasks.mdx @@ -0,0 +1,68 @@ +--- +title: "Performing cleanup tasks" +description: "Describes how to cleanup log files" +--- + + + +The `remove_log_file_and_job` function performs cleanup tasks by removing the log files created by the schema cloning functions and the jobs created by the non-blocking functions. + +```sql +remove_log_file_and_job ( + { TEXT | + INTEGER | + TEXT, INTEGER + } +) +``` + +You can specify values for either or both of the two parameters when invoking the `remove_log_file_and_job` function: + +- If you specify only `log_file`, then the function removes only the log file. +- If you specify only `job_id`, then the function removes only the job. +- If you specify both, then the function removes only the log file and the job. + +## Parameters + +`log_file` + +Name of the log file to remove. + +`job_id` + +Job ID of the job to remove. + +## Example + +This example removes only the log file, given the log file name: + +```sql +edb=# SELECT edb_util.remove_log_file_and_job ('clone_edb_edbcopy'); +__OUTPUT__ + remove_log_file_and_job +------------------------- + t +(1 row) +``` + +This example removes only the job, given the job ID: + +```sql +edb=# SELECT edb_util.remove_log_file_and_job (3); +__OUTPUT__ + remove_log_file_and_job +------------------------- + t +(1 row) +``` + +This example removes the log file and the job, given both values: + +```sql +tgtdb=# SELECT edb_util.remove_log_file_and_job ('clone_rmt_src_tgt',2); +__OUTPUT__ + remove_log_file_and_job +------------------------- + t +(1 row) +``` diff --git a/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/setting_up_edb_clone_schema.mdx b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/setting_up_edb_clone_schema.mdx new file mode 100644 index 00000000000..b4726993910 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/14_edb_clone_schema/setting_up_edb_clone_schema.mdx @@ -0,0 +1,301 @@ +--- +title: "Setting up EDB Clone Schema" +description: "Describes how to install and configure the EDB Clone Schema extension" +--- + +To use EDB Clone Schema, you must first install several extensions along with the PL/Perl language on any database used as the source or target database by an EDB Clone Schema function. + +In addition, it might help to modify some configuration parameters in the `postgresql.conf` file of the database servers. + +## Installing extensions + +Perform this installation on any database to be used as the source or target database by an EDB Clone Schema function. + +1. Install the following extensions on the database: `postgres_fdw`, `dblink`, `edb_job_scheduler`, and `DBMS_JOB`. + + Install the extensions: + + ```sql + CREATE EXTENSION postgres_fdw SCHEMA public; + CREATE EXTENSION dblink SCHEMA public; + CREATE EXTENSION edb_job_scheduler; + CREATE EXTENSION dbms_job; + ``` + + For more information about using the `CREATE EXTENSION` command, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-createextension.html). + +## Modifying the configuration file + +Modify the `postgresql.conf` file by adding `$libdir/parallel_clone` and `$libdir/edb_job_scheduler` to the `shared_preload_libraries` configuration parameter: + + ```ini + shared_preload_libraries = '$libdir/dbms_pipe,$libdir/dbms_aq,$libdir/parallel_clone,$libdir/edb_job_scheduler' + ``` + +## Installing PL/Perl + +1. Install the Perl procedural language (PL/Perl) on the database, and run the `CREATE TRUSTED LANGUAGE plperl` command. For Linux, install PL/Perl using the `edb-as-server-plperl` RPM package, where `` is the EDB Postgres Advanced Server version number. For Windows, use the EDB Postgres Language Pack. For information on EDB Language Pack, see the [EDB Postgres Language Pack](/language_pack/latest). + +1. Connect to the database as a superuser and run the following command: + + ```sql + CREATE TRUSTED LANGUAGE plperl; + ``` + +For more information about using the `CREATE LANGUAGE` command, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-createlanguage.html). + + + +## Setting configuration parameters + +You might need to modify configuration parameters in the `postgresql.conf` file. + +### Performance configuration parameters + +You might need to tune the system for copying a large schema as part of one transaction. Tuning of configuration parameters is for the source database server referenced in a cloning function. + +You might need to tune the following configuration parameters in the `postgresql.conf` file: + +- `work_mem`. Specifies the amount of memory for internal sort operations and hash tables to use before writing to temporary disk files. +- `maintenance_work_mem`. Specifies the maximum amount of memory for maintenance operations such as `VACUUM`, `CREATE INDEX`, and `ALTER TABLE ADD FOREIGN KEY` to use. +- `max_worker_processes`. Sets the maximum number of background processes that the system can support. +- `checkpoint_timeout`. Maximum time between automatic WAL checkpoints, in seconds. +- `checkpoint_completion_target`. Specifies the target of checkpoint completion as a fraction of total time between checkpoints. +- `checkpoint_flush_after`. Whenever more than `checkpoint_flush_after` bytes are written while performing a checkpoint, attempt to force the OS to issue these writes to the underlying storage. +- `max_wal_size`. Maximum size to let the WAL grow to between automatic WAL checkpoints. +- `max_locks_per_transaction`. Controls the average number of object locks allocated for each transaction. Individual transactions can lock more objects as long as the locks of all transactions fit in the lock table. + +For information about the configuration parameters, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/runtime-config.html). + +### Status logging + +Status logging by the cloning functions creates log files in the directory specified by the `log_directory` parameter in the `postgresql.conf` file for the database server to which you're connected when invoking the cloning function. + +The default location is `PGDATA/log`: + +```ini +#log_directory = 'log' # directory where log files are written, + # can be absolute or relative to PGDATA +``` + +This directory must exist before running a cloning function. + +The name of the log file is determined by what you specify in the parameter list when invoking the cloning function. + +To display the status from a log file, use the `process_status_from_log` function. + +To delete a log file, use the `remove_log_file_and_job` function, or delete it manually from the log directory. + +## Installing EDB Clone Schema + +Install the EDB Clone Schema on any database to be used as the source or target database by an EDB Clone Schema function. + +1. If you previously installed an older version of the `edb_cloneschema` extension, run the following command: + + ```sql + DROP EXTENSION parallel_clone CASCADE; + ``` + + This command also drops the `edb_cloneschema` extension. + +1. Install the extensions. Make sure that you create the `parallel_clone` extension before creating the `edb_cloneschema` extension. + + ```sql + CREATE EXTENSION parallel_clone SCHEMA public; + + CREATE EXTENSION edb_cloneschema; + ``` +## Creating Log directory + +The Log directory is required to store all the log files. + +After creating the extensions the following statement must be executed, as a superuser, to create the log directory: + + ```sql + SELECT edb_util.create_clone_log_dir(); + ``` + +It will return the value true on successful execution. + +## Creating the foreign servers and user mappings + +When using one of the local cloning functions `localcopyschema` or `localcopyschema_nb`, one of the required parameters includes a single, foreign server. This server is for identifying the database server and its database that's the source and receiver of the cloned schema. + +When using one of the remote cloning functions `remotecopyschema` or `remotecopyschema_nb`, two of the required parameters include two foreign servers. The foreign server specified as the first parameter identifies the source database server and its database that's the provider of the cloned schema. The foreign server specified as the second parameter identifies the target database server and its database that's the receiver of the cloned schema. + +For each foreign server, you must create a user mapping. When a selected database superuser invokes a cloning function, that superuser must be mapped to a database user name and password that has access to the foreign server that's specified as a parameter in the cloning function. + +For general information about foreign data, foreign servers, and user mappings, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/ddl-foreign-data.html). + +### Foreign server and user mapping for local cloning functions + +For the `localcopyschema` and `localcopyschema_nb` functions, the source and target schemas are both in the same database of the same database server. You must define and specify only one foreign server for these functions. This foreign server is also referred to as the *local server* because this server is the one to which you must be connected when invoking the `localcopyschema` or `localcopyschema_nb` function. + +The user mapping defines the connection and authentication information for the foreign server. You must create this foreign server and user mapping in the database of the local server in which the cloning occurs. + +The database user for whom the user mapping is defined must be a superuser and connected to the local server when invoking an EDB Clone Schema function. + +This example creates the foreign server for the database containing the schema to clone and to receive the cloned schema: + +```sql +CREATE SERVER local_server FOREIGN DATA WRAPPER postgres_fdw + OPTIONS( + host 'localhost', + port '5444', + dbname 'edb' +); +``` + +For more information about using the `CREATE SERVER` command, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-createserver.html). + +The user mapping for this server is: + +```sql +CREATE USER MAPPING FOR enterprisedb SERVER local_server + OPTIONS ( + user 'enterprisedb', + password 'password' +); +``` + +For more information about using the `CREATE USER MAPPING` command, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/sql-createusermapping.html). + +These psql commands show the foreign server and user mapping: + +```sql +edb=# \des+ +__OUTPUT__ +List of foreign servers +-[ RECORD 1 ]--------+---------------------------------------------- +Name | local_server +Owner | enterprisedb +Foreign-data wrapper | postgres_fdw +Access privileges | +Type | +Version | +FDW options | (host 'localhost', port '5444', dbname 'edb') +Description | +``` +```sql +edb=# \deu+ +__OUTPUT__ + List of user mappings + Server | User name | FDW options +--------------+--------------+---------------------------------------------- + local_server | enterprisedb | ("user" 'enterprisedb', password 'password') +(1 row) +``` + +When database superuser `enterprisedb` invokes a cloning function, the database user `enterprisedb` with its password is used to connect to `local_server` on the `localhost` with port `5444` to database `edb`. + +In this case, the mapped database user, `enterprisedb`, and the database user, `enterprisedb`, used to connect to the local `edb` database are the same database user. However, that's not required. + +For specific use of these foreign server and user mapping examples, see the example given in [`localcopyschema`](copying_a_schema). + +### Foreign server and user mapping for remote cloning functions + +For the `remotecopyschema` and `remotecopyschema_nb` functions, the source and target schemas are in different databases of either the same or different database servers. You must define and specify two foreign servers for these functions. + +The foreign server defining the originating database server and its database containing the source schema to clone is referred to as the *source server* or the *remote server*. + +The foreign server defining the database server and its database to receive the schema to clone is referred to as the *target server* or the *local server*. The target server is also referred to as the local server because this server is the one to which you must be connected when invoking the `remotecopyschema` or `remotecopyschema_nb` function. + +The user mappings define the connection and authentication information for the foreign servers. You must create all of these foreign servers and user mappings in the target database of the target/local server. The database user for whom the user mappings are defined must be a superuser and the user connected to the local server when invoking an EDB Clone Schema function. + +This example creates the foreign server for the local, target database that receives the cloned schema: + +```sql +CREATE SERVER tgt_server FOREIGN DATA WRAPPER postgres_fdw + OPTIONS( + host 'localhost', + port '5444', + dbname 'tgtdb' +); +``` + +The user mapping for this server is: + +```sql +CREATE USER MAPPING FOR enterprisedb SERVER tgt_server + OPTIONS ( + user 'tgtuser', + password 'tgtpassword' +); +``` + +This example creates the foreign server for the remote, source database that's the source for the cloned schema: + +```sql +CREATE SERVER src_server FOREIGN DATA WRAPPER postgres_fdw + OPTIONS( + host '192.168.2.28', + port '5444', + dbname 'srcdb' +); +``` + +The user mapping for this server is: + +```sql +CREATE USER MAPPING FOR enterprisedb SERVER src_server + OPTIONS ( + user 'srcuser', + password 'srcpassword' +); +``` +## Displaying foreign servers and user mappings + +These psql commands show the foreign servers and user mappings: + +```sql +tgtdb=# \des+ +__OUTPUT__ +List of foreign servers +-[ RECORD 1 ]--------+--------------------------------------------------- +Name | src_server +Owner | tgtuser +Foreign-data wrapper | postgres_fdw +Access privileges | +Type | +Version | +FDW options | (host '192.168.2.28', port '5444', dbname 'srcdb') +Description | +-[ RECORD 2 ]--------+--------------------------------------------------- +Name | tgt_server +Owner | tgtuser +Foreign-data wrapper | postgres_fdw +Access privileges | +Type | +Version | +FDW options | (host 'localhost', port '5444', dbname 'tgtdb') +Description | +``` +```sql +tgtdb=# \deu+ +__OUTPUT__ + List of user mappings + Server | User name | FDW options +------------+--------------+-------------------------------------------- + src_server | enterprisedb | ("user" 'srcuser', password 'srcpassword') + tgt_server | enterprisedb | ("user" 'tgtuser', password 'tgtpassword') +(2 rows) +``` + +When database superuser `enterprisedb` invokes a cloning function, the database user `tgtuser` with password `tgtpassword` is used to connect to `tgt_server` on the `localhost` with port `5444` to database `tgtdb`. + +In addition, database user `srcuser` with password `srcpassword` connects to `src_server` on host `192.168.2.28` with port `5444` to database `srcdb`. + +!!! Note + Be sure the `pg_hba.conf` file of the database server running the source database `srcdb` has an appropriate entry. This entry must permit connection from the target server location (address `192.168.2.27` in the following example) with the database user `srcuser` that was included in the user mapping for the foreign server `src_server` defining the source server and database. + +```shell +# TYPE DATABASE USER ADDRESS METHOD + +# "local" is for Unix domain socket connections only +local all all md5 +# IPv4 local connections: +host srcdb srcuser 192.168.2.27/32 md5 +``` + +For specific use of these foreign server and user mapping examples, see the example given in [`remotecopyschema`](copying_a_remote_schema). + diff --git a/product_docs/docs/epas/17/database_administration/index.mdx b/product_docs/docs/epas/17/database_administration/index.mdx new file mode 100644 index 00000000000..48712593d96 --- /dev/null +++ b/product_docs/docs/epas/17/database_administration/index.mdx @@ -0,0 +1,11 @@ +--- +title: "Database administration" +indexCards: simple +navigation: +- 01_configuration_parameters +- 10_edb_resource_manager +- 02_edb_loader +- 14_edb_clone_schema +--- + +EDB Postgres Advanced Server includes features to help you to maintain, secure, and operate EDB Postgres Advanced Server databases. \ No newline at end of file diff --git a/product_docs/docs/epas/17/epas_platform_support/index.mdx b/product_docs/docs/epas/17/epas_platform_support/index.mdx new file mode 100644 index 00000000000..ce54e41ab45 --- /dev/null +++ b/product_docs/docs/epas/17/epas_platform_support/index.mdx @@ -0,0 +1,15 @@ +--- +title: "Supported platforms" +description: "Provides information for determining the platform support for EDB Postgres Advanced Server" +redirects: + - ../epas_inst_linux/02_supported_platforms + - /epas/latest/epas_platform_support/ #generated for docs/epas/reorg-role-use-case-mode +--- + +EDB Postgres Advanced Server supports installations on Linux and Windows platforms. + +To learn about the platform support for EDB Postgres Advanced Server, you can refer either to the platform support for EDB Postgres Advanced Server on the [Platform Compatibility page](https://www.enterprisedb.com/platform-compatibility#epas) on the EDB website or [Installing EDB Postgres Advanced Server](../installing). + + + + diff --git a/product_docs/docs/epas/17/epas_rel_notes/epas17_2_rel_notes.mdx b/product_docs/docs/epas/17/epas_rel_notes/epas17_2_rel_notes.mdx new file mode 100644 index 00000000000..fa8e3ce50bf --- /dev/null +++ b/product_docs/docs/epas/17/epas_rel_notes/epas17_2_rel_notes.mdx @@ -0,0 +1,34 @@ +--- +title: EDB Postgres Advanced Server 17.2 release notes +navTitle: "Version 17.2" +--- + +Released: 22 Nov 2024 + +EDB Postgres Advanced Server 17.2 includes the following enhancements and bug fixes: + +!!! Note Deprecation +With the release of EPAS 17, the DRITA is being deprecated and will not be included in EPAS 18. This decision follows the introduction of [EDB’s PWR](/pwr/latest/) and [`edb_wait_states`](/pg_extensions/wait_states/) extensions, which offer improved diagnostic capabilities compared to DRITA. These new extensions provide a diagnostic experience more closely aligned with Oracle’s AWR reports, making DRITA obsolete. +!!! + +!!! Note +The `pgAgent` and `adminpack` packages are end of life from EPAS 17 and later. +!!! + +| Type | Description | Category | +|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| Upstream merge | Merged with community PostgreSQL 17.2. See the [PostgreSQL 17 Release Notes](https://www.postgresql.org/docs/17/release-17-2.html) for more information. | | +| Feature | Added support for the Oracle-compatible `BFILE` native datatype and the `DBMS_LOB` package APIs. See the [DBMS_LOB](../reference/oracle_compatibility_reference/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/) for more information. | | +| Feature | Added support for the Oracle-compatible `DBMS_XMLDOM` package to provide interface for HTML and XML documents. See the [DBMS_XMLDOM](../reference/oracle_compatibility_reference/epas_compat_bip_guide/03_built-in_packages/dbms_xmldom) for more information. | | +| Feature | Added support for the Oracle-compatible `DBMS_ASSERT` package to validate input properties and sanitize user input, thereby reducing the risk of SQL injections. See the [DBMS_ASSERT](../reference/oracle_compatibility_reference/epas_compat_bip_guide/03_built-in_packages/01a_dbms_assert) for more information. | | +| Feature | Added support for the Oracle-equivalent `NLS_UPPER`,`NLS_LOWER`, and `NLS_INITCAP` functions. See the [NLS functions](../reference/sql_reference/03_functions_and_operators/nls_functions) for more information. | | +| Feature | Implemented `alteruser` utility to modify roles in the clusters. See the [alteruser utility](/tools/alteruser_utility/) for more information. | | +| Enhancement | Added support for the Oracle-compatible `FORALL..MERGE` and `FORALL..SAVE EXCEPTIONS`. See the [FORALL statement](../application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement/) for more information. | | +| Enhancement | Added support for the `XMLType` data type to be called as an object type and has predefined member functions and constructors on it. See the [XMLType datatype](../reference/sql_reference/02_data_types/06_xml_type) for more information. | | +| Enhancement | Added support for JSON log format in the EDB Auditing. This enables to create audit reports in XML, CSV, or JSON format. | | +| Enhancement | Added support for READ and WRITE privileges for the directory objects. | | +| Enhancement | Added support for READ and WRITE directory permissions in UTL_FILE APIs. | | +| Enhancement | Implemented `IMPORT FOREIGN SCHEMA`. This command imports a foreign schema from a foreign server or a database link. | | +| Enhancement | Added support for `ANYCOMPATIBLE` and "any" pseudo type to NVL function. This allows NVL function to support more types of argument combinations now. | | +| Enhancement | Added support to audit all the EDB Postgres Advanced Server's modified external function signatures so that PostgreSQL compatible extensions can be compiled along with EDB Postgres Advanced Server. | | +| Enhancement | EDB*Loader: Enhanced terminator and delimiter matching behavior to consider the server encoding. | | diff --git a/product_docs/docs/epas/17/epas_rel_notes/index.mdx b/product_docs/docs/epas/17/epas_rel_notes/index.mdx new file mode 100644 index 00000000000..2b9e6d61881 --- /dev/null +++ b/product_docs/docs/epas/17/epas_rel_notes/index.mdx @@ -0,0 +1,35 @@ +--- +navTitle: Release notes +title: "EDB Postgres Advanced Server release notes" +navigation: +- epas17_2_0_rel_notes + +--- + +EDB Postgres Advanced Server 17.2 is built on open-source PostgreSQL 17.2, which introduces myriad enhancements that enable databases to scale up and scale out in more efficient ways. + +The EDB Postgres Advanced Server documentation describes the latest version of EDB Postgres Advanced Server 17.2 including minor releases and patches. These release notes provide information on what was new in each release. + +| Version | Release date | Upstream merges | +|--------------------------------|--------------|------------------------------------------------------------------------------------------------------------------| +| [17.2](epas17_2_rel_notes) | 21 Nov 2024 | [17.2](https://www.postgresql.org/docs/17/release-17-2.html) | + +## Component certification + +This components are included in the EDB Postgres Advanced Server v17.2 release: + +- PL Debugger 1.8 +- pg_catcheck 1.6.0 +- EDB Query Advisor 1.1.2 +- EDB Wait States 1.4.0 +- EDB Clone Schema +- EDB Parallelclone +- EDB EDB*Plus 41.2.0 +- SQL Profiler 4.1.2 +- SPL Check 1.0.2 + +## Support announcements + +### Backup and Recovery Tool (BART) incompatibility + +The EDB Backup and Recovery Tool (BART) isn't supported by EDB Postgres Advanced Server or PostgreSQL version 14 and later. We strongly recommend that you move to [Barman](/supported-open-source/barman/) or [PgBackRest](/supported-open-source/pgbackrest/) as your backup recovery tool. \ No newline at end of file diff --git a/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/01_sql_protect_overview.mdx b/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/01_sql_protect_overview.mdx new file mode 100644 index 00000000000..87bbbfee77f --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/01_sql_protect_overview.mdx @@ -0,0 +1,99 @@ +--- +title: "SQL/Protect overview" +description: "Provides an overview about how SQL/Protect guards against different types of SQL injection attacks" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.28.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.073.html" +--- + + + +SQL/Protect guards against different types of SQL injection attacks. + +## Types of SQL injection attacks + +A number of different techniques are used to perpetrate SQL injection attacks. Each technique is characterized by a certain *signature*. SQL/Protect examines queries for the following signatures. + +### Unauthorized relations + +While EDB Postgres Advanced Server allows administrators to restrict access to relations (such as tables and views), many administrators don't perform this tedious task. SQL/Protect provides a *learn* mode that tracks the relations a user accesses. + +This mode allows administrators to examine the workload of an application and for SQL/Protect to learn the relations an application is allowed to access for a given user or group of users in a role. + +When SQL/Protect is switched to *passive* or *active* mode, the incoming queries are checked against the list of learned relations. + +### Utility commands + +A common technique used in SQL injection attacks is to run utility commands, which are typically SQL data definition language (DDL) statements. An example is creating a user-defined function that can access other system resources. + +SQL/Protect can block running all utility commands that aren't normally needed during standard application processing. + +### SQL tautology + +The most frequent technique used in SQL injection attacks is issuing a tautological `WHERE` clause condition (that is, using a condition that is always true). + +The following is an example: + + `WHERE password = 'x' OR 'x'='x'` + +Attackers usually start identifying security weaknesses using this technique. SQL/Protect can block queries that use a tautological conditional clause. + +### Unbounded DML statements + +A dangerous action taken during SQL injection attacks is running unbounded DML statements. These are `UPDATE` and `DELETE` statements with no `WHERE` clause. For example, an attacker might update all users’ passwords to a known value or initiate a denial of service attack by deleting all of the data in a key table. + +## Monitoring SQL injection attacks + +SQL/Protect can monitor and report on SQL injection attacks. + +### Protected roles + +Monitoring for SQL injection attacks involves analyzing SQL statements originating in database sessions where the current user of the session is a *protected role*. A protected role is an EDB Postgres Advanced Server user or group that the database administrator chooses to monitor using SQL/Protect. (In EDB Postgres Advanced Server, users and groups are collectively referred to as *roles*.) + +You can customize each protected role for the types of SQL injection attacks it's being monitored for, This approach provides different levels of protection by role and significantly reduces the user-maintenance load for DBAs. + +You can't make a role with the superuser privilege a protected role. If a protected non-superuser role later becomes a superuser, certain behaviors occur when that superuser tries to issue any command: + +- SQL/Protect issues a warning message for every command issued by the protected superuser. +- The statistic in the column superusers of `edb_sql_protect_stats` is incremented with every command issued by the protected superuser. See [Attack attempt statistics](#attack-attempt-statistics) for information on the `edb_sql_protect_stats` view. +- SQL/Protect in active mode prevents all commands issued by the protected superuser from running. + +Either alter a protected role that has the superuser privilege so that it's no longer a superuser, or revert it to an unprotected role. + +### Attack attempt statistics + +SQL/Protect records each use of a command by a protected role that's considered an attack. It collects statistics by type of SQL injection attack, as discussed in [Types of SQL injection attacks](../02_protecting_against_sql_injection_attacks/01_sql_protect_overview/#types-of-sql-injection-attacks). + +You can access these statistics from the view `edb_sql_protect_stats`. You can easily monitor this view to identify the start of a potential attack. + +The columns in `edb_sql_protect_stats` monitor the following: + +- **username.** Name of the protected role. +- **superusers.** Number of SQL statements issued when the protected role is a superuser. In effect, any SQL statement issued by a protected superuser increases this statistic. See [Protected roles](#protected-roles) for information about protected superusers. +- **relations.** Number of SQL statements issued referencing relations that weren't learned by a protected role. (These relations aren't in a role’s protected relations list.) +- **commands.** Number of DDL statements issued by a protected role. +- **tautology.** Number of SQL statements issued by a protected role that contained a tautological condition. +- **dml.** Number of `UPDATE` and `DELETE` statements issued by a protected role that didn't contain a `WHERE` clause. + +These statistics give database administrators the chance to react proactively in preventing theft of valuable data or other malicious actions. + +If a role is protected in more than one database, the role’s statistics for attacks in each database are maintained separately and are viewable only when connected to the respective database. + +!!! Note + SQL/Protect statistics are maintained in memory while the database server is running. When the database server is shut down, the statistics are saved to a binary file named `edb_sqlprotect.stat` in the `data/global` subdirectory of the EDB Postgres Advanced Server home directory. + +### Attack attempt queries + +Each use of a command by a protected role that's considered an attack by SQL/Protect is recorded in the `edb_sql_protect_queries` view, which contains the following columns: + +- **username.** Database user name of the attacker used to log into the database server. +- **ip_address.** IP address of the machine from which the attack was initiated. +- **port.** Port number from which the attack originated. +- **machine_name.** Name of the machine from which the attack originated, if known. +- **date_time.** Date and time when the database server received the query. The time is stored to the precision of a minute. +- **query.** The query string sent by the attacker. + +The maximum number of offending queries that are saved in `edb_sql_protect_queries` is controlled by the `edb_sql_protect.max_queries_to_save` configuration parameter. + +If a role is protected in more than one database, the role’s queries for attacks in each database are maintained separately. They are viewable only when connected to the respective database. diff --git a/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/02_configuring_sql_protect.mdx b/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/02_configuring_sql_protect.mdx new file mode 100644 index 00000000000..3b453a1dfb9 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/02_configuring_sql_protect.mdx @@ -0,0 +1,508 @@ +--- +title: "Configuring SQL/Protect" +description: "Describes the various ways you can configure SQL/Protect" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.29.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.074.html" +--- + + + +You can configure how SQL/Protect operates. + +## Prerequisites + +Meet the following prerequisites before configuring SQL/Protect: + +- The library file (`sqlprotect.so` on Linux, `sqlprotect.dll` on Windows) needed to run SQL/Protect is installed in the `lib` subdirectory of your EDB Postgres Advanced Server home directory. For Windows, the EDB Postgres Advanced Server installer does this. For Linux, install the `edb-as-server-sqlprotect` RPM package, where `` is the EDB Postgres Advanced Server version number. + +- You need the SQL script file `sqlprotect.sql` located in the `share/contrib` subdirectory of your EDB Postgres Advanced Server home directory. + +- You must configure the database server to use SQL/Protect, and you must configure each database that you want SQL/Protect to monitor: + + - You must modify the database server configuration file `postgresql.conf` by adding and enabling configuration parameters used by SQL/Protect. + - Install database objects used by SQL/Protect in each database that you want SQL/Protect to monitor. + +## Configuring the module + +1. Edit the following configuration parameters in the `postgresql.conf` file located in the `data` subdirectory of your EDB Postgres Advanced Server home directory: + + - `shared_preload_libraries`. Add `$libdir/sqlprotect` to the list of libraries. + + - `edb_sql_protect.enabled`. Controls whether SQL/Protect is actively monitoring protected roles by analyzing SQL statements issued by those roles and reacting according to the setting of `edb_sql_protect.level`. When you're ready to begin monitoring with SQL/Protect, set this parameter to `on`. The default is `off`. + + - `edb_sql_protect.level`. Sets the action taken by SQL/Protect when a SQL statement is issued by a protected role. The default behavior is `passive`. Initially, set this parameter to `learn`. See [Setting the protection level](#setting-the-protection-level) for more information. + + - `edb_sql_protect.max_protected_roles`. Sets the maximum number of roles to protect. The default is `64`. + + - `edb_sql_protect.max_protected_relations`. Sets the maximum number of relations to protect per role. The default is `1024`. + + The total number of protected relations for the server is the number of protected relations times the number of protected roles. Every protected relation consumes space in shared memory. The space for the maximum possible protected relations is reserved during database server startup. + + - `edb_sql_protect.max_queries_to_save`. Sets the maximum number of offending queries to save in the `edb_sql_protect_queries` view. The default is `5000`. If the number of offending queries reaches the limit, additional queries aren't saved in the view but are accessible in the database server log file. + + The minimum valid value for this parameter is `100`. If you specify a value less than `100`, the database server starts using the default setting of `5000`. A warning message is recorded in the database server log file. + + This example shows the settings of these parameters in the `postgresql.conf` file: + +```ini +shared_preload_libraries = '$libdir/dbms_pipe,$libdir/edb_gen,$libdir/sqlprotect' + # (change requires restart) + . + . + . +edb_sql_protect.enabled = off +edb_sql_protect.level = learn +edb_sql_protect.max_protected_roles = 64 +edb_sql_protect.max_protected_relations = 1024 +edb_sql_protect.max_queries_to_save = 5000 +``` + +2. After you modify the `postgresql.conf` file, restart the database server. + + - **On Linux:** Invoke the EDB Postgres Advanced Server service script with the `restart` option. + + On a Redhat or CentOS 7.x installation, use the command: + + ```shell + systemctl restart edb-as-17 + ``` + + - **On Windows:** Use the Windows Services applet to restart the service named `edb-as-14`. + +3. For each database that you want to protect from SQL injection attacks, connect to the database as a superuser (either `enterprisedb` or `postgres`, depending on your installation options). Then run the script `sqlprotect.sql`, located in the `share/contrib` subdirectory of your EDB Postgres Advanced Server home directory. The script creates the SQL/Protect database objects in a schema named `sqlprotect`. + +This example shows the process to set up protection for a database named `edb`: + + +```sql +$ /usr/edb/as17/bin/psql -d edb -U enterprisedb +Password for user enterprisedb: +psql.bin (17.2.0, server 17.2.0) +Type "help" for help. + +edb=# \i /usr/edb/as17/share/contrib/sqlprotect.sql +CREATE SCHEMA +GRANT +SET +CREATE TABLE +GRANT +CREATE TABLE +GRANT +CREATE FUNCTION +CREATE FUNCTION +CREATE FUNCTION +CREATE FUNCTION +CREATE FUNCTION +CREATE FUNCTION +CREATE FUNCTION +DO +CREATE FUNCTION +CREATE FUNCTION +DO +CREATE VIEW +GRANT +DO +CREATE VIEW +GRANT +CREATE VIEW +GRANT +CREATE FUNCTION +CREATE FUNCTION +SET +``` + +## Selecting roles to protect + +After you create the SQL/Protect database objects in a database, you can select the roles for which to monitor SQL queries for protection and the level of protection to assign to each role. + +### Setting the protected roles list + +For each database that you want to protect, you must determine the roles you want to monitor and then add those roles to the *protected roles list* of that database. + +1. Connect as a superuser to a database that you want to protect with either `psql` or the Postgres Enterprise Manager client: + + +```sql +$ /usr/edb/as17/bin/psql -d edb -U enterprisedb +Password for user enterprisedb: +psql.bin (17.2.0, server 17.2.0) +Type "help" for help. + +edb=# +``` + +2. Since the SQL/Protect tables, functions, and views are built under the `sqlprotect` schema, use the `SET search_path` command to include the `sqlprotect` schema in your search path. Doing so eliminates the need to schema-qualify any operation or query involving SQL/Protect database objects: + +```sql +edb=# SET search_path TO sqlprotect; +SET +``` + +3. You must add each role that you want to protect to the protected roles list. This list is maintained in the table `edb_sql_protect`. + + To add a role, use the function `protect_role('rolename')`. This example protects a role named `appuser`: + +```sql +edb=# SELECT protect_role('appuser'); +__OUTPUT__ + protect_role +-------------- + +(1 row) +``` + +You can list the roles that were added to the protected roles list with the following query: + +```sql +edb=# SELECT * FROM edb_sql_protect; +__OUTPUT__ + dbid | roleid | protect_relations | allow_utility_cmds | allow_tautology | + allow_empty_dml +-------+--------+-------------------+--------------------+-----------------+-- +------------- + 13917 | 16671 | t | f | f | f +(1 row) +``` + +A view is also provided that gives the same information using the object names instead of the object identification numbers (OIDs): + +```sql +edb=# \x +Expanded display is on. +edb=# SELECT * FROM list_protected_users; +__OUTPUT__ +-[ RECORD 1 ]------+-------- +dbname | edb +username | appuser +protect_relations | t +allow_utility_cmds | f +allow_tautology | f +allow_empty_dml | f +``` + + + +### Setting the protection level + +The `edb_sql_protect.level` configuration parameter sets the protection level, which defines the behavior of SQL/Protect when a protected role issues a SQL statement. The defined behavior applies to all roles in the protected roles lists of all databases configured with SQL/Protect in the database server. + +You can set the `edb_sql_protect.level` configuration parameter in the `postgresql.conf` file to one of the following values to specify learn, passive, or active mode: + +- `learn`. Tracks the activities of protected roles and records the relations used by the roles. Use this mode when first configuring SQL/Protect so the expected behaviors of the protected applications are learned. +- `passive`. Issues warnings if protected roles are breaking the defined rules but doesn't stop any SQL statements from executing. This mode is the next step after SQL/Protect learns the expected behavior of the protected roles. It essentially behaves in intrusion detection mode. You can run this mode in production when proper monitoring is in place. +- `active`. Stops all invalid statements for a protected role. This mode behaves as a SQL firewall, preventing dangerous queries from running. This approach is particularly effective against early penetration testing when the attacker is trying to find the vulnerability point and the type of database behind the application. Not only does SQL/Protect close those vulnerability points, it tracks the blocked queries. This tracking can alert administrators before the attacker finds another way to penetrate the system. + +The default mode is `passive`. + +If you're using SQL/Protect for the first time, set `edb_sql_protect.level` to `learn`. + +## Monitoring protected roles + +After you configure SQL/Protect in a database, add roles to the protected roles list, and set the desired protection level, you can activate SQL/Protect in `learn`, `passive`, or `active` mode. You can then start running your applications. + +With a new SQL/Protect installation, the first step is to determine the relations that protected roles are allowed to access during normal operation. Learn mode allows a role to run applications during which time SQL/Protect is recording the relations that are accessed. These are added to the role’s *protected relations list* stored in table `edb_sql_protect_rel`. + +Monitoring for protection against attack begins when you run SQL/Protect in passive or active mode. In passive and active modes, the role is permitted to access the relations in its protected relations list. These are the specified relations the role can access during typical usage. + +However, if a role attempts to access a relation that isn't in its protected relations list, SQL/Protect returns a `WARNING` or `ERROR` severity-level message. The role’s attempted action on the relation might not be carried out, depending on whether the mode is passive or active. + + + +### Learn mode + +To activate SQL/Protect in learn mode: + +1. Set the parameters in the `postgresql.conf` file: + +```ini +edb_sql_protect.enabled = on +edb_sql_protect.level = learn +``` + +2. Reload the `postgresql.conf` file. From the EDB Postgres Advanced Server application menu, select **Reload Configuration > Expert Configuration**. + + For an alternative method of reloading the configuration file, use the `pg_reload_conf` function. Be sure you're connected to a database as a superuser, and execute `function pg_reload_conf`: + +```sql +edb=# SELECT pg_reload_conf(); +__OUTPUT__ + pg_reload_conf +---------------- + t +(1 row) +``` + +3. Allow the protected roles to run their applications. + + For example, the following queries are issued in the `psql` application by the protected role `appuser`: + +```sql +edb=> SELECT * FROM dept; +__OUTPUT__ +NOTICE: SQLPROTECT: Learned relation: 16384 + deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON +(4 rows) +``` +```sql +edb=> SELECT empno, ename, job FROM emp WHERE deptno = 10; +__OUTPUT__ +NOTICE: SQLPROTECT: Learned relation: 16391 + empno | ename | job +-------+--------+----------- + 7782 | CLARK | MANAGER + 7839 | KING | PRESIDENT + 7934 | MILLER | CLERK +(3 rows) +``` + + SQL/Protect generates a `NOTICE` severity-level message, indicating the relation was added to the role’s protected relations list. + + In SQL/Protect learn mode, SQL statements that are cause for suspicion aren't prevented from executing. However, a message is issued to alert the user to potentially dangerous statements: + +```sql +edb=> CREATE TABLE appuser_tab (f1 INTEGER); +NOTICE: SQLPROTECT: This command type is illegal for this user +CREATE TABLE +edb=> DELETE FROM appuser_tab; +NOTICE: SQLPROTECT: Learned relation: 16672 +NOTICE: SQLPROTECT: Illegal Query: empty DML +DELETE 0 +``` + +4. As a protected role runs applications, you can query the SQL/Protect tables to see that relations were added to the role’s protected relations list. Connect as a superuser to the database you're monitoring, and set the search path to include the `sqlprotect` schema: + +```sql +edb=# SET search_path TO sqlprotect; +SET +``` + + Query the `edb_sql_protect_rel` table to see the relations added to the protected relations list: + +```sql +edb=# SELECT * FROM edb_sql_protect_rel; +__OUTPUT__ + dbid | roleid | relid +--------+--------+------- + 13917 | 16671 | 16384 + 13917 | 16671 | 16391 + 13917 | 16671 | 16672 +(3 rows) +``` + + The `list_protected_rels` view provides more comprehensive information along with the object names instead of the OIDs: + +```sql +edb=# SELECT * FROM list_protected_rels; +__OUTPUT__ + Database | Protected User | Schema | Name | Type | Owner +----------+----------------+--------+-------------+-------+------------ + edb | appuser | public | dept | Table | enterprisedb + edb | appuser | public | emp | Table | enterprisedb + edb | appuser | public | appuser_tab | Table | appuser +(3 rows) +``` + +### Passive mode + +After a role’s applications have accessed all relations they need, you can change the protection level so that SQL/Protect can actively monitor the incoming SQL queries and protect against SQL injection attacks. + +Passive mode is a less restrictive protection mode than active. + +1. To activate SQL/Protect in passive mode, set the following parameters in the `postgresql.conf` file: + +```ini +edb_sql_protect.enabled = on +edb_sql_protect.level = passive +``` + +2. Reload the configuration file as shown in Step 2 of [Learn mode](#learn-mode). + + Now SQL/Protect is in passive mode. For relations that were learned, such as the `dept` and `emp` tables of the prior examples, SQL statements are permitted. No special notification to the client by SQL/Protect is required, as shown by the following queries run by user `appuser`: + +```sql +edb=> SELECT * FROM dept; +__OUTPUT__ + deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON +(4 rows) +``` +```sql +edb=> SELECT empno, ename, job FROM emp WHERE deptno = 10; +__OUTPUT__ + empno | ename | job +-------+--------+----------- + 7782 | CLARK | MANAGER + 7839 | KING | PRESIDENT + 7934 | MILLER | CLERK +(3 rows) +``` + + SQL/Protect doesn't prevent any SQL statement from executing. However, it issues a message of `WARNING` severity level for SQL statements executed against relations that weren't learned. It also issues a warning for SQL statements that contain a prohibited signature: + +```sql +edb=> CREATE TABLE appuser_tab_2 (f1 INTEGER); +WARNING: SQLPROTECT: This command type is illegal for this user +CREATE TABLE +edb=> INSERT INTO appuser_tab_2 VALUES (1); +WARNING: SQLPROTECT: Illegal Query: relations +INSERT 0 1 +edb=> INSERT INTO appuser_tab_2 VALUES (2); +WARNING: SQLPROTECT: Illegal Query: relations +INSERT 0 1 +edb=> SELECT * FROM appuser_tab_2 WHERE 'x' = 'x'; +WARNING: SQLPROTECT: Illegal Query: relations +WARNING: SQLPROTECT: Illegal Query: tautology +__OUTPUT__ +f1 +---- + 1 + 2 +(2 rows) +``` + +3. Monitor the statistics for suspicious activity. + + By querying the view `edb_sql_protect_stats`, you can see the number of times SQL statements executed that referenced relations that weren't in a role’s protected relations list or contained SQL injection attack signatures. + + The following is a query on `edb_sql_protect_stats`: + +```sql +edb=# SET search_path TO sqlprotect; +SET +edb=# SELECT * FROM edb_sql_protect_stats; +__OUTPUT__ + username | superusers | relations | commands | tautology | dml +-----------+------------+-----------+----------+-----------+----- + appuser | 0 | 3 | 1 | 1 | 0 +(1 row) +``` + +4. View information on specific attacks. + + By querying the `edb_sql_protect_queries` view, you can see the SQL statements that were executed that referenced relations that weren't in a role’s protected relations list or that contained SQL injection attack signatures. + + The following code sample shows a query on `edb_sql_protect_queries`: + +```sql +edb=# SELECT * FROM edb_sql_protect_queries; +__OUTPUT__ +-[ RECORD 1 ]+--------------------------------------------- + username | appuser + ip_address | + port | + machine_name | + date_time | 20-JUN-14 13:21:00 -04:00 + query | INSERT INTO appuser_tab_2 VALUES (1); +-[ RECORD 2 ]+--------------------------------------------- + username | appuser + ip_address | + port | + machine_name | + date_time | 20-JUN-14 13:21:00 -04:00 + query | CREATE TABLE appuser_tab_2 (f1 INTEGER); +-[ RECORD 3 ]+--------------------------------------------- + username | appuser + ip_address | + port | + machine_name | + date_time | 20-JUN-14 13:22:00 -04:00 + query | INSERT INTO appuser_tab_2 VALUES (2); +-[ RECORD 4 ]+--------------------------------------------- + username | appuser + ip_address | + port | + machine_name | + date_time | 20-JUN-14 13:22:00 -04:00 + query | SELECT * FROM appuser_tab_2 WHERE 'x' = 'x'; +``` + +!!! Note + The `ip_address` and `port` columns don't return any information if the attack originated on the same host as the database server using the Unix-domain socket (that is, `pg_hba.conf` connection type `local`). + +### Active mode + +In active mode, disallowed SQL statements are prevented from executing. Also, the message issued by SQL/Protect has a higher severity level of `ERROR` instead of `WARNING`. + +1. To activate SQL/Protect in active mode, set the following parameters in the `postgresql.conf` file: + +```ini +edb_sql_protect.enabled = on +edb_sql_protect.level = active +``` + +2. Reload the configuration file as shown in Step 2 of [Learn mode](#learn-mode). + +This example shows SQL statements similar to those given in the examples of Step 2 in [Passive mode](#passive-mode). These statements are executed by the user `appuser` when `edb_sql_protect.level` is set to `active`: + +```sql +edb=> CREATE TABLE appuser_tab_3 (f1 INTEGER); +ERROR: SQLPROTECT: This command type is illegal for this user +edb=> INSERT INTO appuser_tab_2 VALUES (1); +ERROR: SQLPROTECT: Illegal Query: relations +edb=> SELECT * FROM appuser_tab_2 WHERE 'x' = 'x'; +ERROR: SQLPROTECT: Illegal Query: relations +``` + +The following shows the resulting statistics: + +```sql +edb=# SELECT * FROM sqlprotect.edb_sql_protect_stats; +__OUTPUT__ + username | superusers | relations | commands | tautology | dml +-----------+------------+-----------+----------+-----------+----- + appuser | 0 | 5 | 2 | 1 | 0 +(1 row) +``` + +The following is a query on `edb_sql_protect_queries`: + +```sql +edb=# SELECT * FROM sqlprotect.edb_sql_protect_queries; +__OUTPUT__ +-[ RECORD 1 ]+--------------------------------------------- + username | appuser + ip_address | + port | + machine_name | + date_time | 20-JUN-14 13:21:00 -04:00 + query | CREATE TABLE appuser_tab_2 (f1 INTEGER); +-[ RECORD 2 ]+--------------------------------------------- + username | appuser + ip_address | + port | + machine_name | + date_time | 20-JUN-14 13:22:00 -04:00 + query | INSERT INTO appuser_tab_2 VALUES (2); +-[ RECORD 3 ]+--------------------------------------------- + username | appuser + ip_address | 192.168.2.6 + port | 50098 + machine_name | + date_time | 20-JUN-14 13:39:00 -04:00 + query | CREATE TABLE appuser_tab_3 (f1 INTEGER); +-[ RECORD 4 ]+--------------------------------------------- + username | appuser + ip_address | 192.168.2.6 + port | 50098 + machine_name | + date_time | 20-JUN-14 13:39:00 -04:00 + query | INSERT INTO appuser_tab_2 VALUES (1); +-[ RECORD 5 ]+--------------------------------------------- + username | appuser + ip_address | 192.168.2.6 + port | 50098 + machine_name | + date_time | 20-JUN-14 13:39:00 -04:00 + query | SELECT * FROM appuser_tab_2 WHERE 'x' = 'x'; +``` diff --git a/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/03_common_maintenance_operations.mdx b/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/03_common_maintenance_operations.mdx new file mode 100644 index 00000000000..e49bb163f79 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/03_common_maintenance_operations.mdx @@ -0,0 +1,260 @@ +--- +title: "Common maintenance operations" +description: "Describes how to perform routine maintenance tasks using SQL/Protect" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.30.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.075.html" +--- + + + +You must be connected as a superuser to perform these operations. Include the `sqlprotect` schema in your search path. + +## Adding a role to the protected roles list + +Add a role to the protected roles list. Run `protect_role('rolename')`, as shown in this example: + +```sql +edb=# SELECT protect_role('newuser'); +__OUTPUT__ + protect_role +-------------- +(1 row) +``` + +## Removing a role from the protected roles list + +To remove a role from the protected roles list, use either of the following functions: + +```sql +unprotect_role('rolename') + +unprotect_role(roleoid) +``` + +The variation of the function using the OID is useful if you remove the role using the `DROP ROLE` or `DROP USER` SQL statement before removing the role from the protected roles list. If a query on a SQL/Protect relation returns a value such as `unknown (OID=16458)` for the user name, use the `unprotect_role(roleoid)` form of the function to remove the entry for the deleted role from the protected roles list. + +Removing a role using these functions also removes the role’s protected relations list. + +To delete the statistics for a role that was removed, use the [drop_stats function](#drop_stats). + +To delete the offending queries for a role that was removed, use the [drop_queries function](#drop_queries). + +This example shows the `unprotect_role` function: + +```sql +edb=# SELECT unprotect_role('newuser'); +__OUTPUT__ + unprotect_role +---------------- +(1 row) +``` + +Alternatively, you can remove the role by giving its OID of `16693`: + +```sql +edb=# SELECT unprotect_role(16693); +__OUTPUT__ + unprotect_role +---------------- +(1 row) +``` + +## Setting the types of protection for a role + +You can change whether a role is protected from a certain type of SQL injection attack. + +Change the Boolean value for the column in `edb_sql_protect` corresponding to the type of SQL injection attack for which you want to enable or disable protection of a role. + +Be sure to qualify the following columns in your `WHERE` clause of the statement that updates `edb_sql_protect`: + +- **dbid.** OID of the database for which you're making the change. +- **roleid.** OID of the role for which you're changing the Boolean settings + +For example, to allow a given role to issue utility commands, update the `allow_utility_cmds` column: + +```sql +UPDATE edb_sql_protect SET allow_utility_cmds = TRUE WHERE dbid = +13917 AND roleid = 16671; +``` + +You can verify the change was made by querying `edb_sql_protect` or `list_protected_users`. In the following query, note that column `allow_utility_cmds` now contains `t`: + +```sql +edb=# SELECT dbid, roleid, allow_utility_cmds FROM edb_sql_protect; +__OUTPUT__ + dbid | roleid | allow_utility_cmds +--------+--------+-------------------- + 13917 | 16671 | t +(1 row) +``` + +The updated rules take effect on new sessions started by the role since the change was made. + +## Removing a relation from the protected relations list + +If SQL/Protect learns that a given relation is accessible for a given role, you can later remove that relation from the role’s protected relations list. + +Delete the entry from the `edb_sql_protect_rel` table using any of the following functions: + +```sql +unprotect_rel('rolename', 'relname') +unprotect_rel('rolename', 'schema', 'relname') +unprotect_rel(roleoid, reloid) +``` + +If the relation given by `relname` isn't in your current search path, specify the relation’s schema using the second function format. + +The third function format allows you to specify the OIDs of the role and relation, respectively, instead of their text names. + +This example removes the `public.emp` relation from the protected relations list of the role `appuser`: + +```sql +edb=# SELECT unprotect_rel('appuser', 'public', 'emp'); +__OUTPUT__ + unprotect_rel +--------------- +(1 row) +``` + +This query shows there's no longer an entry for the `emp` relation: + +```sql +edb=# SELECT * FROM list_protected_rels; +__OUTPUT__ + Database | Protected User | Schema | Name | Type | Owner +----------+----------------+--------+-------------+-------+-------------- + edb | appuser | public | dept | Table | enterprisedb + edb | appuser | public | appuser_tab | Table | appuser +(2 rows) +``` + +SQL/Protect now issues a warning or completely blocks access (depending on the setting of `edb_sql_protect.level`) when the role attempts to use that relation. + + + +## Deleting statistics + +You can delete statistics from the view `edb_sql_protect_stats` using either of the following functions: + +```sql +drop_stats('rolename') + +drop_stats(roleoid) +``` + +The form of the function using the OID is useful if you remove the role using the `DROP ROLE` or `DROP USER` SQL statement before deleting the role’s statistics using `drop_stats('rolename')`. If a query on `edb_sql_protect_stats` returns a value such as `unknown (OID=16458)` for the user name, use the `drop_stats(roleoid)` form of the function to remove the deleted role’s statistics from `edb_sql_protect_stats`. + +This example shows the `drop_stats` function: + +```sql +edb=# SELECT drop_stats('appuser'); +__OUTPUT__ + drop_stats +------------ +(1 row) +``` +```sql +edb=# SELECT * FROM edb_sql_protect_stats; +__OUTPUT__ + username | superusers | relations | commands | tautology | dml +-----------+------------+-----------+----------+-----------+----- +(0 rows) +``` + +This example uses the `drop_stats(roleoid)` form of the function when a role is dropped before deleting its statistics: + +```sql +edb=# SELECT * FROM edb_sql_protect_stats; +__OUTPUT__ + username | superusers | relations | commands | tautology | dml +---------------------+------------+-----------+----------+-----------+----- + unknown (OID=16693) | 0 | 5 | 3 | 1 | 0 + appuser | 0 | 5 | 2 | 1 | 0 +(2 rows) +``` +```sql +edb=# SELECT drop_stats(16693); +__OUTPUT__ + drop_stats +------------ + +(1 row) +``` +```sql +edb=# SELECT * FROM edb_sql_protect_stats; +__OUTPUT__ + username | superusers | relations | commands | tautology | dml +----------+------------+-----------+----------+-----------+----- + appuser | 0 | 5 | 2 | 1 | 0 +(1 row) +``` + + + +## Deleting offending queries + +You can delete offending queries from the view `edb_sql_protect_queries` using either of the following functions: + +```sql +drop_queries('rolename') + +drop_queries(roleoid) +``` + +The variation of the function using the OID is useful if you remove the role using the `DROP ROLE` or `DROP USER` SQL statement before deleting the role’s offending queries using `drop_queries('rolename')`. If a query on `edb_sql_protect_queries` returns a value such as `unknown (OID=16454)` for the user name, use the `drop_queries(roleoid)` form of the function to remove the deleted role’s offending queries from `edb_sql_protect_queries`. + +This example shows the `drop_queries` function: + +```sql +edb=# SELECT drop_queries('appuser'); +__OUTPUT__ + drop_queries +-------------- + 5 +(1 row) +``` +```sql +edb=# SELECT * FROM edb_sql_protect_queries; +__OUTPUT__ + username | ip_address | port | machine_name | date_time | query +-----------+------------+------+--------------+-----------+------- +(0 rows) +``` + +This example uses the `drop_queries(roleoid)` form of the function when a role is dropped before deleting its queries: + +```sql +edb=# SELECT username, query FROM edb_sql_protect_queries; +__OUTPUT__ + username | query +---------------------+---------------------------------------------- + unknown (OID=16454) | CREATE TABLE appuser_tab_2 (f1 INTEGER); + unknown (OID=16454) | INSERT INTO appuser_tab_2 VALUES (2); + unknown (OID=16454) | CREATE TABLE appuser_tab_3 (f1 INTEGER); + unknown (OID=16454) | INSERT INTO appuser_tab_2 VALUES (1); + unknown (OID=16454) | SELECT * FROM appuser_tab_2 WHERE 'x' = 'x'; +(5 rows) +``` +```sql +edb=# SELECT drop_queries(16454); +__OUTPUT__ + drop_queries +-------------- + 5 +(1 row) +``` +```sql +edb=# SELECT * FROM edb_sql_protect_queries; +__OUTPUT__ + username | ip_address | port | machine_name | date_time | query +----------+------------+------+--------------+-----------+------- +(0 rows) +``` + +## Disabling and enabling monitoring + +If you want to turn off SQL/Protect monitoring, modify the `postgresql.conf` file, setting the `edb_sql_protect.enabled` parameter to `off`. After saving the file, reload the server configuration to apply the settings. + +If you want to turn on SQL/Protect monitoring, modify the `postgresql.conf` file, setting the `edb_sql_protect.enabled` parameter to `on`. Save the file, and then reload the server configuration to apply the settings. diff --git a/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/04_backing_up_restoring_sql_protect.mdx b/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/04_backing_up_restoring_sql_protect.mdx new file mode 100644 index 00000000000..652e18d53ca --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/04_backing_up_restoring_sql_protect.mdx @@ -0,0 +1,276 @@ +--- +title: "Backing up and restoring a SQL/Protect database" +description: "Describes how to back up and then restore databases configured with SQL/Protect" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.31.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.076.html" +--- + + + +Backing up a database that's configured with SQL/Protect and then restoring the backup file to a new database requires considerations in addition to those normally associated with backup and restore procedures. These added considerations are mainly due to the use of object identification numbers (OIDs) in the SQL/Protect tables. + +!!! Note + This information applies if your backup and restore procedures result in re-creating database objects in the new database with new OIDs, such as when using the `pg_dump` backup program. + + If you're backing up your EDB Postgres Advanced Server database server by using the operating system’s copy utility to create a binary image of the EDB Postgres Advanced Server data files (file system backup method), then this information doesn't apply. + +## Object identification numbers in SQL/Protect tables + +SQL/Protect uses two tables, `edb_sql_protect` and `edb_sql_protect_rel`, to store information on database objects such as databases, roles, and relations. References to these database objects in these tables are done using the objects’ OIDs, not their text names. The OID is a numeric data type used by EDB Postgres Advanced Server to uniquely identify each database object. + +When a database object is created, EDB Postgres Advanced Server assigns an OID to the object, which is then used when a reference to the object is needed in the database catalogs. If you create the same database object in two databases, such as a table with the same `CREATE TABLE` statement, each table is assigned a different OID in each database. + +In a backup and restore operation that results in re-creating the backed-up database objects, the restored objects end up with different OIDs in the new database from what they were assigned in the original database. As a result, the OIDs referencing databases, roles, and relations stored in the `edb_sql_protect` and `edb_sql_protect_rel` tables are no longer valid when these tables are dumped to a backup file and then restored to a new database. + +Two functions, `export_sqlprotect` and `import_sqlprotect`, are used specifically for backing up and restoring SQL/Protect tables to ensure the OIDs in the SQL/Protect tables reference the correct database objects after the tables are restored. + +## Backing up the database + +Back up a database that was configured with SQL/Protect. + +1. Create a backup file using `pg_dump`. + + This example shows a plain-text backup file named `/tmp/edb.dmp` created from database `edb` using the `pg_dump` utility program: + +```shell +$ cd /usr/edb/as17/bin +$ ./pg_dump -U enterprisedb -Fp -f /tmp/edb.dmp edb +Password: +$ +``` + +2. Connect to the database as a superuser, and export the SQL/Protect data using the `export_sqlprotect('sqlprotect_file')` function. `sqlprotect_file` is the fully qualified path to a file where the SQL/Protect data is saved. + + The `enterprisedb` operating system account (`postgres` if you installed EDB Postgres Advanced Server in PostgreSQL compatibility mode) must have read and write access to the directory specified in `sqlprotect_file`. + +```sql +edb=# SELECT sqlprotect.export_sqlprotect('/tmp/sqlprotect.dmp'); +__OUTPUT__ + export_sqlprotect +------------------- +(1 row) +``` + +The files `/tmp/edb.dmp` and `/tmp/sqlprotect.dmp` comprise your total database backup. + +## Restoring from the backup files + +1. Restore the backup file to the new database. + + This example uses the `psql` utility program to restore the plain-text backup file `/tmp/edb.dmp` to a newly created database named `newdb`: + +```sql +$ /usr/edb/as17/bin/psql -d newdb -U enterprisedb -f /tmp/edb.dmp +Password for user enterprisedb: +SET +SET +SET +SET +SET +COMMENT +CREATE SCHEMA + . + . + . +``` + +2. Connect to the new database as a superuser, and delete all rows from the `edb_sql_protect_rel` table. + + This deletion removes any existing rows in the `edb_sql_protect_rel` table that were backed up from the original database. These rows don't contain the correct OIDs relative to the database where the backup file was restored. + +```sql +$ /usr/edb/as17/bin/psql -d newdb -U enterprisedb +Password for user enterprisedb: +psql.bin (17.2.0, server 17.2.0) +Type "help" for help. + +newdb=# DELETE FROM sqlprotect.edb_sql_protect_rel; +DELETE 2 +``` + +3. Delete all rows from the `edb_sql_protect` table. + + This deletion removes any existing rows in the `edb_sql_protect` table that were backed up from the original database. These rows don't contain the correct OIDs relative to the database where the backup file was restored. + +```sql +newdb=# DELETE FROM sqlprotect.edb_sql_protect; +DELETE 1 +``` + +4. Delete any of the database's statistics. + + This deletion removes any existing statistics for the database to which you're restoring the backup. The following query displays any existing statistics: + +```sql +newdb=# SELECT * FROM sqlprotect.edb_sql_protect_stats; +__OUTPUT__ + username | superusers | relations | commands | tautology | dml +-----------+------------+-----------+----------+-----------+----- +(0 rows) +``` + + For each row that appears in the preceding query, use the `drop_stats` function, specifying the role name of the entry. + + For example, if a row appeared with `appuser` in the `username` column, issue the following command to remove it: + +```sql +newdb=# SELECT sqlprotect.drop_stats('appuser'); +__OUTPUT__ + drop_stats +------------ +(1 row) +``` + +5. Delete any of the database's offending queries. + + This deletion removes any existing queries for the database to which you're restoring the backup. This query displays any existing queries: + +```sql +edb=# SELECT * FROM sqlprotect.edb_sql_protect_queries; +__OUTPUT__ + username | ip_address | port | machine_name | date_time | query +-----------+------------+------+--------------+-----------+------- +(0 rows) +``` + + For each row that appears in the preceding query, use the `drop_queries` function, specifying the role name of the entry. For example, if a row appeared with `appuser` in the `username` column, issue the following command to remove it: + +```sql +edb=# SELECT sqlprotect.drop_queries('appuser'); +__OUTPUT__ + drop_queries +-------------- +(1 row) +``` + +6. Make sure the role names that were protected by SQL/Protect in the original database are in the database server where the new database resides. + + If the original and new databases reside in the same database server, then you don't need to do anything if you didn't delete any of these roles from the database server. + +7. Run the function `import_sqlprotect('sqlprotect_file')`, where `sqlprotect_file` is the fully qualified path to the file you created in Step 2 of [Backing up the database](#backing-up-the-database). + +```sql +newdb=# SELECT sqlprotect.import_sqlprotect('/tmp/sqlprotect.dmp'); +__OUTPUT__ + import_sqlprotect +------------------- +(1 row) +``` + + Tables `edb_sql_protect` and `edb_sql_protect_rel` are populated with entries containing the OIDs of the database objects as assigned in the new database. The statistics view `edb_sql_protect_stats` also displays the statistics imported from the original database. + + The SQL/Protect tables and statistics are properly restored for this database. Use the following queries on the EDB Postgres Advanced Server system catalogs to verify: + +```sql +newdb=# SELECT datname, oid FROM pg_database; +__OUTPUT__ + datname | oid +-----------+------- + template1 | 1 + template0 | 13909 + edb | 13917 + newdb | 16679 +(4 rows) +``` +```sql +newdb=# SELECT rolname, oid FROM pg_roles; +__OUTPUT__ + rolname | oid +--------------+------- + enterprisedb | 10 + appuser | 16671 + newuser | 16678 +(3 rows) +``` +```sql +newdb=# SELECT relname, oid FROM pg_class WHERE relname IN +('dept','emp','appuser_tab'); +__OUTPUT__ + relname | oid +-------------+------- + appuser_tab | 16803 + dept | 16809 + emp | 16812 +(3 rows) +``` +```sql +newdb=# SELECT * FROM sqlprotect.edb_sql_protect; +__OUTPUT__ + dbid | roleid | protect_relations | allow_utility_cmds | allow_tautology | + allow_empty_dml +-------+--------+-------------------+--------------------+-----------------+-- +--------------- + 16679 | 16671 | t | t | f | f +(1 row) +``` +```sql +newdb=# SELECT * FROM sqlprotect.edb_sql_protect_rel; +__OUTPUT__ + dbid | roleid | relid +-------+--------+------- + 16679 | 16671 | 16809 + 16679 | 16671 | 16803 +(2 rows) +``` +```sql +newdb=# SELECT * FROM sqlprotect.edb_sql_protect_stats; +__OUTPUT__ + username | superusers | relations | commands | tautology | dml +----------+------------+-----------+----------+-----------+----- + appuser | 0 | 5 | 2 | 1 | 0 +(1 row) +``` +```sql +newedb=# \x +Expanded display is on. +nwedb=# SELECT * FROM sqlprotect.edb_sql_protect_queries; +__OUTPUT__ +-[ RECORD 1 ]+--------------------------------------------- + username | appuser + ip_address | + port | + machine_name | + date_time | 20-JUN-14 13:21:00 -04:00 + query | CREATE TABLE appuser_tab_2 (f1 INTEGER); +-[ RECORD 2 ]+--------------------------------------------- + username | appuser + ip_address | + port | + machine_name | + date_time | 20-JUN-14 13:22:00 -04:00 + query | INSERT INTO appuser_tab_2 VALUES (2); +-[ RECORD 3 ]+--------------------------------------------- + username | appuser + ip_address | 192.168.2.6 + port | 50098 + machine_name | + date_time | 20-JUN-14 13:39:00 -04:00 + query | CREATE TABLE appuser_tab_3 (f1 INTEGER); +-[ RECORD 4 ]+--------------------------------------------- + username | appuser + ip_address | 192.168.2.6 + port | 50098 + machine_name | + date_time | 20-JUN-14 13:39:00 -04:00 + query | INSERT INTO appuser_tab_2 VALUES (1); +-[ RECORD 5 ]+--------------------------------------------- + username | appuser + ip_address | 192.168.2.6 + port | 50098 + machine_name | + date_time | 20-JUN-14 13:39:00 -04:00 + query | SELECT * FROM appuser_tab_2 WHERE 'x' = 'x'; +``` + + Note the following about the columns in tables `edb_sql_protect` and `edb_sql_protect_rel`: + + - **dbid.** Matches the value in the `oid` column from `pg_database` for `newdb`. + - **roleid.** Matches the value in the `oid` column from `pg_roles` for `appuser`. + + Also, in table `edb_sql_protect_rel`, the values in the `relid` column match the values in the `oid` column of `pg_class` for relations `dept` and `appuser_tab`. + +8. Verify that the SQL/Protect configuration parameters are set as desired in the `postgresql.conf` file for the database server running the new database. Restart the database server or reload the configuration file as appropriate. + +You can now monitor the database using SQL/Protect. diff --git a/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/index.mdx b/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/index.mdx new file mode 100644 index 00000000000..414aac2bf02 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/02_protecting_against_sql_injection_attacks/index.mdx @@ -0,0 +1,27 @@ +--- +title: "Protecting against SQL injection attacks" +indexCards: simple +description: "With the SQL/Protect feature, EDB Postgres Advanced Server offers protection against SQL injection attacks by examining incoming queries for common SQL injection profiles." +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.27.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.072.html" +--- + + + +EDB Postgres Advanced Server provides protection against *SQL injection attacks*. A SQL injection attack is an attempt to compromise a database by running SQL statements whose results provide clues to the attacker as to the content, structure, or security of that database. + +Preventing a SQL injection attack is normally the responsibility of the application developer. The database administrator typically has little or no control over the potential threat. The difficulty for database administrators is that the application must have access to the data to function properly. + +SQL/Protect: + +- Allows a database administrator to protect a database from SQL injection attacks +- Provides a layer of security in addition to the normal database security policies by examining incoming queries for common SQL injection profiles +- Gives the control back to the database administrator by alerting the administrator to potentially dangerous queries and by blocking these queries. + +
+ +sql_protect_overview configuring_sql_protect common_maintenance_operations backing_up_restoring_sql_protect + +
diff --git a/product_docs/docs/epas/17/epas_security_guide/03_edb_wrap/edb_wrap_key_concepts.mdx b/product_docs/docs/epas/17/epas_security_guide/03_edb_wrap/edb_wrap_key_concepts.mdx new file mode 100644 index 00000000000..7bfde479819 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/03_edb_wrap/edb_wrap_key_concepts.mdx @@ -0,0 +1,17 @@ +--- +title: "EDB*Wrap key concepts" +description: "Describes the benefits and basic operation of the EDB*Wrap feature" +--- + +The EDB\*Wrap program translates a plaintext file that contains SPL or PL/pgSQL source code into a file that contains the same code in a form that's nearly impossible to read. Once you have the obfuscated form of the code, you can send that code to the PostgreSQL server, and the server stores those programs in obfuscated form. While EDB\*Wrap does obscure code, table definitions are still exposed. + +Everything you wrap is stored in obfuscated form. If you wrap an entire package, the package body source, as well as the prototypes contained in the package header and the functions and procedures contained in the package body, are stored in obfuscated form. + +If you wrap a `CREATE PACKAGE` statement, you hide the package API from other developers. You might want to wrap the package body but not the package header so users can see the package prototypes and other public variables that are defined in the package body. To allow users to see the prototypes the package contains, use EDBWrap to obfuscate only the `CREATE PACKAGE BODY` statement in the `edbwrap` input file, omitting the `CREATE PACKAGE` statement. The package header source is stored as plaintext, while the package body source and package functions and procedures are obfuscated. + +![image](../../images/epas_tools_utility_edb_wrap.png) + +You can't unwrap or debug wrapped source code and programs. Reverse engineering is possible but very difficult. + +The entire source file is wrapped into one unit. Any `psql` meta-commands included in the wrapped file aren't recognized when the file is executed. Executing an obfuscated file that contains a psql meta-command causes a syntax error. `edbwrap` doesn't validate SQL source code. If the plaintext form contains a syntax error, `edbwrap` doesn't report it. Instead, the server reports an error and aborts the entire file when you try to execute the obfuscated form. + diff --git a/product_docs/docs/epas/17/epas_security_guide/03_edb_wrap/index.mdx b/product_docs/docs/epas/17/epas_security_guide/03_edb_wrap/index.mdx new file mode 100644 index 00000000000..a8586191575 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/03_edb_wrap/index.mdx @@ -0,0 +1,21 @@ +--- +title: "Protecting proprietary source code" +description: "Describes how to use the EDB*Wrap utility to obfuscate proprietary source code and programs" +indexCards: simple +navigation: + - edb_wrap_key_concepts + - obfuscating_source_code +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/9.6/DB_Compat_for_Oracle_Dev_Tools_Guide.1.17.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/9.6/DB_Compat_for_Oracle_Dev_Tools_Guide.1.18.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.316.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.317.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.077.html" +--- + + + +The EDB\*Wrap utility protects proprietary source code and programs like functions, stored procedures, triggers, and packages from unauthorized scrutiny. + + diff --git a/product_docs/docs/epas/17/epas_security_guide/03_edb_wrap/obfuscating_source_code.mdx b/product_docs/docs/epas/17/epas_security_guide/03_edb_wrap/obfuscating_source_code.mdx new file mode 100644 index 00000000000..1986ccb4a16 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/03_edb_wrap/obfuscating_source_code.mdx @@ -0,0 +1,211 @@ +--- +title: "Obfuscating source code" +description: "Describes how to use the EDB*Wrap utility protects proprietary source code and programs" +redirects: + - /epas/latest/epas_compat_tools_guide/03_edb_wrap/ #generated for docs/epas/reorg-role-use-case-mode +--- + +EDB\*Wrap is a command line utility that accepts a single input source file, obfuscates the contents, and returns a single output file. When you invoke the `edbwrap` utility, you must provide the name of the file that contains the source code to obfuscate. You can also specify the name of the file where `edbwrap` writes the obfuscated form of the code. + +## Overview of the command-line styles + +`edbwrap` offers three different command-line styles. The first style is compatible with Oracle's `wrap` utility: + +```shell +edbwrap iname= [oname=] +``` + +The `iname=input_file` argument specifies the name of the input file. If `input_file` doesn't contain an extension, `edbwrap` searches for a file named `input_file.sql`. + +The optional `oname=output_file` argument specifies the name of the output file. If `output_file` doesn't contain an extension, `edbwrap` appends `.plb` to the name. + +If you don't specify an output file name, `edbwrap` writes to a file whose name is derived from the input file name. `edbwrap` strips the suffix (typically `.sql`) from the input file name and adds `.plb`. + +`edbwrap` offers two other command-line styles: + +```shell +edbwrap --iname [--oname ] +edbwrap -i [-o ] +``` + +You can mix command-line styles. The rules for deriving input and output file names are the same regardless of the style you use. + +Once `edbwrap` produces a file that contains obfuscated code, you typically feed that file into the PostgreSQL server using a client application such as `edb-psql`. The server executes the obfuscated code line by line and stores the source code for SPL and PL/pgSQL programs in wrapped form. + +In summary, to obfuscate code with EDB\*Wrap, you: + +1. Create the source code file. +2. Invoke EDB\*Wrap to obfuscate the code. +3. Import the file as if it were in plaintext form. + +## Creating the source code file + +To use the EDB\*Wrap utility, create the source code for the `list_emp` procedure in plaintext form: + +```sql +[bash] cat listemp.sql +CREATE OR REPLACE PROCEDURE list_emp +IS                                   +     v_empno         NUMBER(4);       +     v_ename         VARCHAR2(10);    +     CURSOR emp_cur IS                +         SELECT empno, ename FROM emp ORDER BY empno; +BEGIN                                                +     OPEN emp_cur;                                    +     DBMS_OUTPUT.PUT_LINE('EMPNO    ENAME');          +     DBMS_OUTPUT.PUT_LINE('-----    -------');        +     LOOP                                             +         FETCH emp_cur INTO v_empno, v_ename;         +         EXIT WHEN emp_cur%NOTFOUND;                  +         DBMS_OUTPUT.PUT_LINE(v_empno || '     ' || v_ename); +     END LOOP;                                                +     CLOSE emp_cur;                                           +END;                                                         +/                                                            +``` + +Import the `list_emp` procedure with a client application such as `edb-psql`: + +```sql +[bash] edb-psql edb +Welcome to edb-psql 8.4.3.2, the EnterpriseDB interactive terminal. +Type:  \copyright for distribution terms +       \h for help with SQL commands     +       \? for help with edb-psql commands +     \g or terminate with semicolon to execute query +      \q to quit                                      + +edb=# \i listemp.sql +CREATE PROCEDURE +``` + +View the plaintext source code stored in the server by examining the `pg_proc` system table: + +```sql +edb=# SELECT prosrc FROM pg_proc WHERE proname = 'list_emp'; +__EDBwrapped__ +                             prosrc                             +--------------------------------------------------------------                                                     + + v_empno         NUMBER(4);                                +     v_ename         VARCHAR2(10);                             +     CURSOR emp_cur IS                                         +         SELECT empno, ename FROM emp ORDER BY empno;          +  BEGIN                                                         +     OPEN emp_cur;                                             +     DBMS_OUTPUT.PUT_LINE('EMPNO    ENAME');                   +     DBMS_OUTPUT.PUT_LINE('-----    -------');                 +     LOOP                                                      +         FETCH emp_cur INTO v_empno, v_ename;                  +         EXIT WHEN emp_cur%NOTFOUND;                           +         DBMS_OUTPUT.PUT_LINE(v_empno || '     ' || v_ename);  +     END LOOP;                                                 +     CLOSE emp_cur;                                            +  END                                                           +(1 row)                                                        + +edb=# quit +``` +## Invoking EDB\*Wrap + +Ofuscate the plaintext file with EDB\*Wrap: + +```shell +[bash] edbwrap -i listemp.sql                                         +EDB*Wrap Utility: Release 8.4.3.2 + +Copyright (c) 2004-2021 EnterpriseDB Corporation.  All Rights Reserved. + +Using encoding UTF8 for input +Processing listemp.sql to listemp.plb + +Examining the contents of the output file (listemp.plb) file reveals +that the code is obfuscated: + +[bash] cat listemp.plb  +$__EDBwrapped__$                      +UTF8                                  +d+6DL30RVaGjYMIzkuoSzAQgtBw7MhYFuAFkBsfYfhdJ0rjwBv+bHr1FCyH6j9SgH +movU+bYI+jR+hR2jbzq3sovHKEyZIp9y3/GckbQgualRhIlGpyWfE0dltDUpkYRLN +/OUXmk0/P4H6EI98sAHevGDhOWI+58DjJ44qhZ+l5NNEVxbWDztpb/s5sdx4660qQ +Ozx3/gh8VkqS2JbcxYMpjmrwVr6fAXfb68Ml9mW2Hl7fNtxcb5kjSzXvfWR2XYzJf +KFNrEhbL1DTVlSEC5wE6lGlwhYvXOf22m1R2IFns0MtF9fwcnBWAs1YqjR00j6+fc +er/f/efAFh4= +$__EDBwrapped__$ +``` + +The second line of the wrapped file contains an encoding name. In this case, the encoding is UTF8. When you obfuscate a file, `edbwrap` infers the encoding of the input file by examining the locale. For example, if you're running `edbwrap` while your locale is set to `en_US.utf8`, `edbwrap` assumes that the input file is encoded in UTF8. Be sure to examine the output file after running `edbwrap`. If the locale contained in the wrapped file doesn't match the encoding of the input file, change your locale and rewrap the input file. + +## Importing the obfuscated code to the PostgreSQL server + +You can import the obfuscated code to the PostgreSQL server using the same tools that work with plaintext code: + +```sql +[bash] edb-psql edb +Welcome to edb-psql 8.4.3.2, the EnterpriseDB interactive terminal. +Type:  \copyright for distribution terms +       \h for help with SQL commands +       \? for help with edb-psql commands +     \g or terminate with semicolon to execute query +      \q to quit + +edb=# \i listemp.plb +CREATE PROCEDURE +``` + +The pg_proc system table contains the obfuscated code: + +```text +edb=# SELECT prosrc FROM pg_proc WHERE proname = 'list_emp'; +__OUTPUT__ +                                     prosrc +---------------------------------------------------------------- +  $__EDBwrapped__$ +  UTF8 +  dw4B9Tz69J3WOsy0GgYJQa+G2sLZ3IOyxS8pDyuOTFuiYe/EXiEatwwG3h3tdJk +  ea+AIp35dS/4idbN8wpegM3s994dQ3R97NgNHfvTQnO2vtd4wQtsQ/Zc4v4Lhfj +  nlV+A4UpHI5oQEnXeAch2LcRD87hkU0uo1ESeQV8IrXaj9BsZr+ueROnwhGs/Ec +  pva/tRV4m9RusFn0wyr38u4Z8w4dfnPW184Y3o6It4b3aH07WxTkWrMLmOZW1jJ +  Nu6u4o+ezO64G9QKPazgehslv4JB9NQnuocActfDSPMY7R7anmgw +  $__EDBwrapped__$ +(1 row) +``` + +Invoke the obfuscated code in the same way that you invoke the plaintext form: + +```sql +edb=# exec list_emp; +__OUTPUT__ +EMPNO    ENAME +-----    ------- +7369     SMITH +7499     ALLEN +7521     WARD +7566     JONES +7654     MARTIN +7698     BLAKE +7782     CLARK +7788     SCOTT +7839     KING +7844     TURNER +7876     ADAMS +7900     JAMES +7902     FORD +7934     MILLER + +EDB-SPL Procedure successfully completed +edb=# quit +``` + +When you use `pg_dump` to back up a database, wrapped programs remain obfuscated in the archive file. + +Be aware that audit logs produced by the Postgres server show wrapped programs in plaintext form. Source code is also displayed in plaintext in SQL error messages generated when the program executes. + +!!! Note + The bodies of the objects created by the following statements aren't stored in obfuscated form: + + ```sql + CREATE [OR REPLACE] TYPE type_name AS OBJECT + CREATE [OR REPLACE] TYPE type_name UNDER type_name + CREATE [OR REPLACE] TYPE BODY type_name + ``` diff --git a/product_docs/docs/epas/17/epas_security_guide/03_virtual_private_database.mdx b/product_docs/docs/epas/17/epas_security_guide/03_virtual_private_database.mdx new file mode 100644 index 00000000000..54dbe92c8df --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/03_virtual_private_database.mdx @@ -0,0 +1,33 @@ +--- +navTitle: Controlling data access +title: "Controlling data access (Virtual Private Database)" +description: "Virtual Private Database offers fine-grained access control to data down to specific rows, as defined by the security policy" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.6/EDB_Postgres_Advanced_Server_Guide.1.32.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.078.html" +--- + + + +Virtual Private Database is a type of *fine-grained access control* using security policies. Fine-grained access control means that you can control access to data down to specific rows as defined by the security policy. + +The rules that encode a *security policy* are defined in a *policy function*. A policy function is an SPL function with certain input parameters and return value. The security policy is the named association of the policy function to a particular database object, typically a table. + +In EDB Postgres Advanced Server, you can write the policy function in any language it supports, such as SQL and PL/pgSQL, in addition to SPL. + +!!! Note + The database objects currently supported by EDB Postgres Advanced Server Virtual Private Database are tables. You can apply policies to views or synonyms. + +The following are advantages of using Virtual Private Database: + +- It provides a fine-grained level of security. Database-object-level privileges given by the `GRANT` command determine access privileges to the entire instance of a database object. Virtual Private Database provides access control for the individual rows of a database object instance. +- You can apply a different security policy depending on the type of SQL command (`INSERT`, `UPDATE`, `DELETE`, or `SELECT`). +- The security policy can vary dynamically for each applicable SQL command affecting the database object. Factors such as the session user of the application accessing the database object affect the security policy. +- Invoking the security policy is transparent to all applications that access the database object. You don't have to modify individual applications to apply the security policy. +- After you enable a security policy, no application (including new applications) can circumvent the security policy except by the system privilege described in the note that follows. Even superusers can't circumvent the security policy except by the noted system privilege. + +!!! Note + The only way you can circumvent security policies is if the user is granted `EXEMPT ACCESS POLICY` system privilege. Use extreme care when granting the `EXEMPT ACCESS POLICY` privilege. A user with this privilege is exempted from all policies in the database. + +The `DBMS_RLS` package provides procedures to create policies, remove policies, enable policies, and disable policies. diff --git a/product_docs/docs/epas/17/epas_security_guide/04_profile_management/01_creating_a_new_profile/01_creating_a_password_function.mdx b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/01_creating_a_new_profile/01_creating_a_password_function.mdx new file mode 100644 index 00000000000..77eb426db17 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/01_creating_a_new_profile/01_creating_a_password_function.mdx @@ -0,0 +1,135 @@ +--- +title: "Creating a password function" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/04_profile_management/01_creating_a_new_profile/01_creating_a_password_function/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +When specifying `PASSWORD_VERIFY_FUNCTION`, you can provide a customized function that specifies the security rules to apply when your users change their password. For example, you can specify rules that stipulate that the new password must be at least *n* characters long and can't contain a specific value. + +The password function has the following signature: + +```sql + ( VARCHAR2, + VARCHAR2, + VARCHAR2) RETURN boolean +``` + +Where: + +- `user_name` is the name of the user. + +- `new_password` is the new password. + +- `old_password` is the user's previous password. If you reference this parameter in your function: + + - When a database superuser changes their password, the third parameter is always `NULL`. + - When a user with the `CREATEROLE` attribute changes their password, the parameter passes the previous password if the statement includes the `REPLACE` clause. The `REPLACE` clause is optional syntax for a user with the `CREATEROLE` privilege. + - When a user that isn't a database superuser and doesn't have the `CREATEROLE` attribute changes their password, the third parameter contains the previous password for the role. + +The function returns a Boolean value. If the function returns `true` and doesn't raise an exception, the password is accepted. If the function returns `false` or raises an exception, the password is rejected. If the function raises an exception, the specified error message is displayed to the user. If the function doesn't raise an exception but returns `false`, the following error message is displayed: + +`ERROR: password verification for the specified password failed` + +The function must be owned by a database superuser and reside in the `sys` schema. + +## Example + +This example creates a profile and a custom function. Then, the function is associated with the profile. + +This `CREATE PROFILE` command creates a profile named `acctg_pwd_profile`: + +```sql +CREATE PROFILE acctg_pwd_profile; +``` + +The following commands create a schema-qualified function named `verify_password`: + +```sql +CREATE OR REPLACE FUNCTION sys.verify_password(user_name varchar2, +new_password varchar2, old_password varchar2) +RETURN boolean IMMUTABLE +IS +BEGIN + IF (length(new_password) < 5) + THEN + raise_application_error(-20001, 'too short'); + END IF; + + IF substring(new_password FROM old_password) IS NOT NULL + THEN + raise_application_error(-20002, 'includes old password'); + END IF; + + RETURN true; +END; +``` + +The function first ensures that the password is at least five characters long and then compares the new password to the old password. If the new password contains fewer than five characters or contains the old password, the function raises an error. + +The following statement sets the ownership of the `verify_password` function to the `enterprisedb` database superuser: + +```sql +ALTER FUNCTION verify_password(varchar2, varchar2, varchar2) OWNER TO +enterprisedb; +``` + +Then, the `verify_password` function is associated with the profile: + +```sql +ALTER PROFILE acctg_pwd_profile LIMIT PASSWORD_VERIFY_FUNCTION +verify_password; +``` + +The following statements confirm that the function is working by first creating a test user (`alice`), and then attempting to associate invalid and valid passwords with her role: + +```sql +CREATE ROLE alice WITH LOGIN PASSWORD 'temp_password' PROFILE +acctg_pwd_profile; +``` + +Then, when `alice` connects to the database and attempts to change her password, she must adhere to the rules established by the profile function. A non-superuser without `CREATEROLE` must include the `REPLACE` clause when changing a password: + +```sql +edb=> ALTER ROLE alice PASSWORD 'hey'; +ERROR: missing REPLACE clause +``` + +The new password must be at least five characters long: + +```sql +edb=> ALTER USER alice PASSWORD 'hey' REPLACE 'temp_password'; +ERROR: EDB-20001: too short +CONTEXT: edb-spl function verify_password(character varying,character +varying,character varying) line 5 at procedure/function invocation statement +``` + +If the new password is acceptable, the command completes without error: + +```sql +edb=> ALTER USER alice PASSWORD 'hello' REPLACE 'temp_password'; +ALTER ROLE +``` + +If `alice` decides to change her password, the new password must not contain the old password: + +```sql +edb=> ALTER USER alice PASSWORD 'helloworld' REPLACE 'hello'; +ERROR: EDB-20002: includes old password +CONTEXT: edb-spl function verify_password(character varying,character +varying,character varying) line 10 at procedure/function invocation statement +``` + +To remove the verify function, set `password_verify_function` to `NULL`: + +```sql +ALTER PROFILE acctg_pwd_profile LIMIT password_verify_function NULL; +``` + +Then, all password constraints are lifted: + +```sql +edb=# ALTER ROLE alice PASSWORD 'hey'; +ALTER ROLE +``` diff --git a/product_docs/docs/epas/17/epas_security_guide/04_profile_management/01_creating_a_new_profile/index.mdx b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/01_creating_a_new_profile/index.mdx new file mode 100644 index 00000000000..109fdfd0b74 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/01_creating_a_new_profile/index.mdx @@ -0,0 +1,124 @@ +--- +title: "Creating a new profile" +description: "Describes how to create a new profile" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.030.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.027.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.030.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/04_profile_management/01_creating_a_new_profile/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Use the `CREATE PROFILE` command to create a new profile. The syntax is: + +```sql +CREATE PROFILE + [LIMIT {} ... ]; +``` + +Include the `LIMIT` clause and one or more space-delimited parameter/value pairs to specify the rules enforced by EDB Postgres Advanced Server. + +## Parameters + +- `profile_name` specifies the name of the profile. + +- `parameter` specifies the attribute limited by the profile. + +- `value` specifies the parameter limit. + +EDB Postgres Advanced Server supports the vollowing `value` for each `parameter`: + +`FAILED_LOGIN_ATTEMPTS` specifies the number of failed login attempts that a user can make before the server locks them out of their account for the length of time specified by `PASSWORD_LOCK_TIME`. Supported values are: + +- An `INTEGER` value greater than `0`. +- `DEFAULT` — The value of `FAILED_LOGIN_ATTEMPTS` specified in the `DEFAULT` profile. +- `UNLIMITED` — The connecting user can make an unlimited number of failed login attempts. + +`PASSWORD_LOCK_TIME` specifies the length of time that must pass before the server unlocks an account that was locked because of `FAILED_LOGIN_ATTEMPTS`. Supported values are: + +- A `NUMERIC` value greater than or equal to 0. To specify a fractional portion of a day, specify a decimal value. For example, use the value `4.5` to specify 4 days, 12 hours. +- `DEFAULT` — The value of `PASSWORD_LOCK_TIME` specified in the `DEFAULT` profile. +- `UNLIMITED` — The account is locked until a database superuser manually unlocks it. + +`PASSWORD_LIFE_TIME` specifies the number of days that the current password can be used before the user is prompted to provide a new password. Include the `PASSWORD_GRACE_TIME` clause when using the `PASSWORD_LIFE_TIME` clause to specify the number of days that pass after the password expires before connections by the role are rejected. If you don't specify `PASSWORD_GRACE_TIME`, the password expires on the day specified by the default value of `PASSWORD_GRACE_TIME`, and the user can't execute any command until they provide a new password. Supported values are: + +- A `NUMERIC` value greater than or equal to 0. To specify a fractional portion of a day, specify a decimal value. For example, use the value `4.5` to specify 4 days, 12 hours. +- `DEFAULT` — The value of `PASSWORD_LIFE_TIME` specified in the `DEFAULT` profile. +- `UNLIMITED` — The password doesn't have an expiration date. + +`PASSWORD_GRACE_TIME` specifies the length of the grace period after a password expires until the user is forced to change their password. When the grace period expires, a user can connect but can't execute any command until they update their expired password. Supported values are: + +- A `NUMERIC` value greater than or equal to 0. To specify a fractional portion of a day, specify a decimal value. For example, use the value `4.5` to specify 4 days, 12 hours. +- `DEFAULT` — The value of `PASSWORD_GRACE_TIME` specified in the `DEFAULT` profile. +- `UNLIMITED` — The grace period is infinite. + +`PASSWORD_REUSE_TIME` specifies the number of days a user must wait before reusing a password. Use the `PASSWORD_REUSE_TIME` and `PASSWORD_REUSE_MAX` parameters together. If you specify a finite value for one of these parameters while the other is `UNLIMITED`, old passwords can never be reused. If both parameters are set to `UNLIMITED`, there are no restrictions on password reuse. Supported values are: + +- A `NUMERIC` value greater than or equal to 0. To specify a fractional portion of a day, specify a decimal value. For example, use the value `4.5` to specify 4 days, 12 hours. +- `DEFAULT` — The value of `PASSWORD_REUSE_TIME` specified in the `DEFAULT` profile. +- `UNLIMITED` — The password can be reused without restrictions. + +`PASSWORD_REUSE_MAX` specifies the number of password changes that must occur before a password can be reused. Use the `PASSWORD_REUSE_TIME` and `PASSWORD_REUSE_MAX` parameters together. If you specify a finite value for one of these parameters while the other is `UNLIMITED`, old passwords can never be reused. If both parameters are set to `UNLIMITED`, there are no restrictions on password reuse. Supported values are: + +- An `INTEGER` value greater than or equal to 0. +- `DEFAULT` — The value of `PASSWORD_REUSE_MAX` specified in the `DEFAULT` profile. +- `UNLIMITED` — The password can be reused without restrictions. + +`PASSWORD_VERIFY_FUNCTION` specifies password complexity. Supported values are: + +- The name of a PL/SQL function. +- `DEFAULT` — The value of `PASSWORD_VERIFY_FUNCTION` specified in the `DEFAULT` profile. +- `NULL` + +`PASSWORD_ALLOW_HASHED` specifies whether an encrypted password is allowed. If you specify `TRUE`, the system allows a user to change the password by specifying a hash-computed encrypted password on the client side. If you specify `FALSE`, then a password must be specified in a plain-text form to validate. Otherwise, an error is thrown if a server receives an encrypted password. Supported values are: + +- A Boolean value `TRUE/ON/YES/1` or `FALSE/OFF/NO/0`. +- `DEFAULT` — The value of `PASSWORD_ALLOW_HASHED` specified in the `DEFAULT` profile. + +!!! Note + - The `PASSWORD_ALLOW_HASHED` isn't an Oracle-compatible parameter. + - Use `DROP PROFILE` command to remove the profile. + +## Examples + +The following command creates a profile named `acctg`. The profile specifies that if a user doesn't authenticate with the correct password in five attempts, the account is locked for one day: + +```sql +CREATE PROFILE acctg LIMIT + FAILED_LOGIN_ATTEMPTS 5 + PASSWORD_LOCK_TIME 1; +``` + +The following command creates a profile named `sales`. The profile specifies that a user must change their password every 90 days: + +```sql +CREATE PROFILE sales LIMIT + PASSWORD_LIFE_TIME 90 + PASSWORD_GRACE_TIME 3; +``` + +If the user doesn't change their password before the 90 days specified in the profile has passed, a warning appears at login. After a grace period of their days, their account can't invoke any commands until they change their password. + +The following command creates a profile named `accts`. The profile specifies that a user can't reuse a password within 180 days of the last use of the password and must change their password at least five times before reusing the password: + +```sql +CREATE PROFILE accts LIMIT + PASSWORD_REUSE_TIME 180 + PASSWORD_REUSE_MAX 5; +``` + +The following command creates a profile named `resources`. The profile calls a user-defined function named `password_rules` that verifies that the password provided meets their standards for complexity: + +```sql +CREATE PROFILE resources LIMIT + PASSWORD_VERIFY_FUNCTION password_rules; +``` + +
+ +creating_a_password_function + +
diff --git a/product_docs/docs/epas/17/epas_security_guide/04_profile_management/02_altering_a_profile.mdx b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/02_altering_a_profile.mdx new file mode 100644 index 00000000000..50c9dc338f4 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/02_altering_a_profile.mdx @@ -0,0 +1,50 @@ +--- +title: "Altering a profile" +description: "Describes how to use the ALTER PROFILE command to modify a user-defined profile" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.031.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.031.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.028.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/04_profile_management/02_altering_a_profile/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Use the `ALTER PROFILE` command to modify a user-defined profile. EDB Postgres Advanced Server supports two forms of the command: + +```sql +ALTER PROFILE RENAME TO ; + +ALTER PROFILE + LIMIT {}[...]; +``` + +Include the `LIMIT` clause and one or more space-delimited parameter/value pairs to specify the rules enforced by EDB Postgres Advanced Server. Or use `ALTER PROFILE...RENAME TO` to change the name of a profile. + +## Parameters + +- `profile_name` specifies the name of the profile. +- `new_name` specifies the new name of the profile. +- `parameter` specifies the attribute limited by the profile. +- `value` specifies the parameter limit. + +See the table in [Creating a new profile](01_creating_a_new_profile/#creating_a_new_profile) for a complete list of accepted parameter/value pairs. + +## Examples + +The following example modifies a profile named `acctg_profile`: + +```sql +ALTER PROFILE acctg_profile + LIMIT FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 1; +``` + +`acctg_profile` counts failed connection attempts when a login role attempts to connect to the server. The profile specifies that if a user doesn't authenticate with the correct password in three attempts, the account is locked for one day. + +The following example changes the name of `acctg_profile` to `payables_profile`: + +```sql +ALTER PROFILE acctg_profile RENAME TO payables_profile; +``` diff --git a/product_docs/docs/epas/17/epas_security_guide/04_profile_management/03_dropping_a_profile.mdx b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/03_dropping_a_profile.mdx new file mode 100644 index 00000000000..01d269adc97 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/03_dropping_a_profile.mdx @@ -0,0 +1,47 @@ +--- +title: "Dropping a profile" +description: "Describes how to use the DROP PROFILE command to drop a profile" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.032.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.029.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.032.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/04_profile_management/03_dropping_a_profile/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +Use the `DROP PROFILE` command to drop a profile. The syntax is: + +```sql +DROP PROFILE [IF EXISTS] [CASCADE|RESTRICT]; +``` + +Include the `IF EXISTS` clause to instruct the server not to throw an error if the specified profile doesn't exist. The server issues a notice if the profile doesn't exist. + +Include the optional `CASCADE` clause to reassign any users that are currently associated with the profile to the `default` profile and then drop the profile. Include the optional `RESTRICT` clause to instruct the server not to drop any profile that's associated with a role. This is the default behavior. + +## Parameters + +`profile_name` + +The name of the profile being dropped. + +## Examples + +This example drops a profile named `acctg_profile`: + +```sql +DROP PROFILE acctg_profile CASCADE; +``` + +The command first reassociates any roles associated with the `acctg_profile` profile with the `default` profile and then drops the `acctg_profile` profile. + +The following example drops a profile named `acctg_profile`: + +```sql +DROP PROFILE acctg_profile RESTRICT; +``` + +The `RESTRICT` clause in the command instructs the server not to drop `acctg_profile` if any roles are associated with the profile. diff --git a/product_docs/docs/epas/17/epas_security_guide/04_profile_management/04_associating_a_profile_with_an_existing_role.mdx b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/04_associating_a_profile_with_an_existing_role.mdx new file mode 100644 index 00000000000..6ff02ca4085 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/04_associating_a_profile_with_an_existing_role.mdx @@ -0,0 +1,85 @@ +--- +title: "Associating a profile with an existing role" +description: "Describes how to use commands to associate the profile with a role" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.033.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.033.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.030.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/04_profile_management/04_associating_a_profile_with_an_existing_role/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +After creating a profile, you can use the `ALTER USER... PROFILE` or `ALTER ROLE... PROFILE` command to associate the profile with a role. The command syntax related to profile management functionality is: + +```sql +ALTER USER|ROLE [[WITH] option[…] +``` + +where `option` can be the following compatible clauses: + +```sql + PROFILE +| ACCOUNT {LOCK|UNLOCK} +| PASSWORD EXPIRE [AT ''] +``` + +Or, `option` can be the following noncompatible clauses: + +```sql +| PASSWORD SET AT '' +| LOCK TIME '' +| STORE PRIOR PASSWORD {'' '} [, ...] +``` + +For information about the administrative clauses of the `ALTER USER` or `ALTER ROLE` command that are supported by EDB Postgres Advanced Server, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-commands.html). + +Only a database superuser can use the `ALTER USER|ROLE` clauses that enforce profile management. The clauses enforce the following behaviors: + +- Include the `PROFILE` clause and a `profile_name` to associate a predefined profile with a role or to change the predefined profile associated with a user. + +- Include the `ACCOUNT` clause and the `LOCK` or `UNLOCK` keyword to place the user account in a locked or unlocked state. + +- Include the `LOCK TIME 'timestamp'` clause and a date/time value to lock the role at the specified time and unlock the role at the time indicated by the `PASSWORD_LOCK_TIME` parameter of the profile assigned to this role. If `LOCK TIME` is used with the `ACCOUNT LOCK` clause, only a database superuser can unlock the role with the `ACCOUNT UNLOCK` clause. + +- Include the `PASSWORD EXPIRE` clause with the `AT 'timestamp'` keywords to specify a date/time when the password associated with the role expires. If you omit the `AT 'timestamp'` keywords, the password expires immediately. + +- Include the `PASSWORD SET AT 'timestamp'` keywords to set the password modification date to the time specified. + +- Include the `STORE PRIOR PASSWORD {'password' 'timestamp} [, ...]` clause to modify the password history, adding the new password and the time the password was set. + +Each login role can have only one profile. To discover the profile that's currently associated with a login role, query the `profile` column of the `DBA_USERS` view. + +## Parameters + +`name` + + The name of the role with which to associate the specified profile. + +`password` + + The password associated with the role. + +`profile_name` + + The name of the profile to associate with the role. + +`timestamp` + + The date and time at which to enforce the clause. When specifying a value for `timestamp`, enclose the value in single quotes. + +## Examples + +This command uses the `ALTER USER... PROFILE` command to associate a profile named `acctg` with a user named `john`: + +```sql +ALTER USER john PROFILE acctg_profile; +``` + +The following command uses the `ALTER ROLE... PROFILE` command to associate a profile named `acctg` with a user named `john`: + +```sql +ALTER ROLE john PROFILE acctg_profile; +``` diff --git a/product_docs/docs/epas/17/epas_security_guide/04_profile_management/05_unlocking_a_locked_account.mdx b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/05_unlocking_a_locked_account.mdx new file mode 100644 index 00000000000..4ff718019b9 --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/05_unlocking_a_locked_account.mdx @@ -0,0 +1,72 @@ +--- +title: "Unlocking a locked account" +description: "Describes how to use specific clauses in the ALTER USER|ROLE command to lock or unlock a user role" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.034.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.034.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.031.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/04_profile_management/05_unlocking_a_locked_account/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A database superuser can use clauses of the `ALTER USER|ROLE...` command to lock or unlock a role. The syntax is: + +```sql +ALTER USER|ROLE + ACCOUNT {LOCK|UNLOCK} + LOCK TIME '' +``` + +Include the `ACCOUNT LOCK` clause to lock a role immediately. When locked, a role’s `LOGIN` functionality is disabled. When you specify the `ACCOUNT LOCK` clause without the `LOCK TIME` clause, the state of the role doesn't change until a superuser uses the `ACCOUNT UNLOCK` clause to unlock the role. + +Use the `ACCOUNT UNLOCK` clause to unlock a role. + +Use the `LOCK TIME 'timestamp'` clause to lock the account at the time specified by the given timestamp for the length of time specified by the `PASSWORD_LOCK_TIME` parameter of the profile associated with this role. + +Combine the `LOCK TIME 'timestamp'` clause and the `ACCOUNT LOCK` clause to lock an account at a specified time until the account is unlocked by a superuser invoking the `ACCOUNT UNLOCK` clause. + +## Parameters + +`name` + + The name of the role that's being locked or unlocked. + +`timestamp` + + The date and time when the role is locked. When specifying a value for `timestamp`, enclose the value in single quotes. + +!!! Note + This command (available only in EDB Postgres Advanced Server) is implemented to support Oracle-styled profile management. + +## Examples + +This example uses the `ACCOUNT LOCK` clause to lock the role named `john`. The account remains locked until the account is unlocked with the `ACCOUNT UNLOCK` clause. + +```sql +ALTER ROLE john ACCOUNT LOCK; +``` + +This example uses the `ACCOUNT UNLOCK` clause to unlock the role named `john`: + +```sql +ALTER USER john ACCOUNT UNLOCK; +``` + +This example uses the `LOCK TIME 'timestamp'` clause to lock the role named `john` on September 4, 2015: + +```sql +ALTER ROLE john LOCK TIME ‘September 4 12:00:00 2015’; +``` + +The role remains locked for the length of time specified by the `PASSWORD_LOCK_TIME` parameter. + +This example combines the `LOCK TIME 'timestamp'` clause and the `ACCOUNT LOCK` clause to lock the role named `john` on September 4, 2015: + +```sql +ALTER ROLE john LOCK TIME ‘September 4 12:00:00 2015’ ACCOUNT LOCK; +``` + +The role remains locked until a database superuser uses the `ACCOUNT UNLOCK` command to unlock the role. diff --git a/product_docs/docs/epas/17/epas_security_guide/04_profile_management/06_creating_a_new_role_associated_with_a_profile.mdx b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/06_creating_a_new_role_associated_with_a_profile.mdx new file mode 100644 index 00000000000..aaf695f3d6e --- /dev/null +++ b/product_docs/docs/epas/17/epas_security_guide/04_profile_management/06_creating_a_new_role_associated_with_a_profile.mdx @@ -0,0 +1,83 @@ +--- +title: "Creating a new role associated with a profile" +description: "Describes how to assign a profile to a new role" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.035.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.035.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.032.html" +redirects: + - /epas/latest/epas_compat_ora_dev_guide/04_profile_management/06_creating_a_new_role_associated_with_a_profile/ #generated for docs/epas/reorg-role-use-case-mode +--- + + + +A database superuser can use clauses of the `CREATE USER|ROLE` command to assign a named profile to a role when creating the role or to specify profile management details for a role. The command syntax related to profile management functionality is: + +```sql +CREATE USER|ROLE [[WITH]
[[AS] ] + [USING ] + [WHERE | WHERE CURRENT OF ] + [{RETURNING|RETURN} * | [[AS] ] +[, ...] INTO ] +``` + +- Include the `FOR exec_count` clause to specify the number of times the statement executes. This clause is valid only if the `VALUES` clause references an array or a pointer to an array. +- `table` is the name (optionally schema qualified) of an existing table. Include the `ONLY` clause to limit processing to the specified table. If you don't include the `ONLY` clause, any tables inheriting from the named table are also processed. +- `alias` is a substitute name for the target table. +- `using_list` is a list of table expressions, allowing columns from other tables to appear in the `WHERE` condition. +- Include the `WHERE` clause to specify the rows to delete. If you don't include a `WHERE` clause in the statement, `DELETE` deletes all rows from the table, leaving the table definition intact. +- `condition` is an expression, host variable, or parameter marker that returns a value of type `BOOLEAN`. Those rows for which `condition` returns true are deleted. +- `cursor_name` is the name of the cursor to use in the `WHERE CURRENT OF` clause. The row to be deleted is the one most recently fetched from this cursor. The cursor must be a nongrouping query on the `DELETE` statements target table. You can't specify `WHERE CURRENT OF` in a `DELETE` statement that includes a Boolean condition. + +The `RETURN/RETURNING` clause specifies an `output_expression` or `host_variable_list` that's returned by the `DELETE` command after each row is deleted: + + - `output_expression` is an expression to be computed and returned by the `DELETE` command after each row is deleted. `output_name` is the name of the returned column. Include \* to return all columns. + - `host_variable_list` is a comma-separated list of host variables and optional indicator variables. Each host variable receives a corresponding value from the `RETURNING` clause. + +For example, the following statement deletes all rows from the `emp` table, where the `sal` column contains a value greater than the value specified in the host variable, `:max_sal:` + +```sql +DELETE FROM emp WHERE sal > :max_sal; +``` + +For more information about using the `DELETE` statement, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-delete.html). + +## DESCRIBE + +Use the `DESCRIBE` statement to find the number of input values required by a prepared statement or the number of output values returned by a prepared statement. The `DESCRIBE` statement is used to analyze a SQL statement whose shape is unknown at the time you write your application. + +The `DESCRIBE` statement populates an `SQLDA` descriptor. To populate a SQL descriptor, use the `ALLOCATE DESCRIPTOR` and `DESCRIBE...DESCRIPTOR` statements. + +```sql +EXEC SQL DESCRIBE BIND VARIABLES FOR INTO ; +``` + + +```sql +EXEC SQL DESCRIBE SELECT LIST FOR INTO ; +``` + +Where: + +- `statement_name` is the identifier associated with a prepared SQL statement or PL/SQL block. +- `descriptor` is the name of C variable of type `SQLDA*`. You must allocate the space for the descriptor by calling `sqlald()` and initialize the descriptor before executing the `DESCRIBE` statement. + +When you execute the first form of the `DESCRIBE` statement, ECPG populates the given descriptor with a description of each input variable *required* by the statement. For example, given two descriptors: + +```sql +SQLDA *query_values_in; +SQLDA *query_values_out; +``` + +You might prepare a query that returns information from the `emp` table: + +```sql +EXEC SQL PREPARE get_emp FROM + "SELECT ename, empno, sal FROM emp WHERE empno = ?"; +``` + +The command requires one input variable for the parameter marker (?). + +```sql +EXEC SQL DESCRIBE BIND VARIABLES + FOR get_emp INTO query_values_in; +``` + +After describing the bind variables for this statement, you can examine the descriptor to find the number of variables required and the type of each variable. + +When you execute the second form, ECPG populates the given descriptor with a description of each value returned by the statement. For example, the following statement returns three values: + +```sql +EXEC SQL DESCRIBE SELECT LIST + FOR get_emp INTO query_values_out; +``` + +After describing the select list for this statement, you can examine the descriptor to find the number of returned values and the name and type of each value. + +Before executing the statement, you must bind a variable for each input value and a variable for each output value. The variables that you bind for the input values specify the actual values used by the statement. The variables that you bind for the output values tell ECPGPlus where to put the values when you execute the statement. + +This is alternative Pro\*C-compatible syntax for the `DESCRIBE DESCRIPTOR` statement. + +## DESCRIBE DESCRIPTOR + +Use the `DESCRIBE DESCRIPTOR` statement to retrieve information about a SQL statement and store that information in a SQL descriptor. Before using `DESCRIBE DESCRIPTOR`, you must allocate the descriptor with the `ALLOCATE DESCRIPTOR` statement. The syntax is: + +```sql +EXEC SQL DESCRIBE [INPUT | OUTPUT] + USING [SQL] DESCRIPTOR ; +``` + +Where: + +- `statement_name` is the name of a prepared SQL statement. +- `descriptor_name` is the name of the descriptor. `descriptor_name` can be a quoted string value or a host variable that contains the name of the descriptor. + +If you include the `INPUT` clause, ECPGPlus populates the given descriptor with a description of each input variable required by the statement. + +For example, given two descriptors: + +```sql +EXEC SQL ALLOCATE DESCRIPTOR query_values_in; +EXEC SQL ALLOCATE DESCRIPTOR query_values_out; +``` + +You might prepare a query that returns information from the `emp` table: + +```sql +EXEC SQL PREPARE get_emp FROM + "SELECT ename, empno, sal FROM emp WHERE empno = ?"; +``` + +The command requires one input variable for the parameter marker (?). + +```sql +EXEC SQL DESCRIBE INPUT get_emp USING 'query_values_in'; +``` + +After describing the bind variables for this statement, you can examine the descriptor to find the number of variables required and the type of each variable. + +If you don't specify the `INPUT` clause, `DESCRIBE DESCRIPTOR` populates the specified descriptor with the values returned by the statement. + +If you include the `OUTPUT` clause, ECPGPlus populates the given descriptor with a description of each value returned by the statement. + +For example, the following statement returns three values: + +```sql +EXEC SQL DESCRIBE OUTPUT FOR get_emp USING 'query_values_out'; +``` + +After describing the select list for this statement, you can examine the descriptor to find the number of returned values and the name and type of each value. + +## DISCONNECT + +Use the `DISCONNECT` statement to close the connection to the server. The syntax is: + +```sql +EXEC SQL DISCONNECT [][CURRENT][DEFAULT][ALL]; +``` + +Where `connection_name` is the connection name specified in the `CONNECT` statement used to establish the connection. If you don't specify a connection name, the current connection is closed. + +Include the `CURRENT` keyword to specify for ECPGPlus to close the connection used most recently. + +Include the `DEFAULT` keyword to specify for ECPGPlus to close the connection named `DEFAULT`. If you don't specify a name when opening a connection, ECPGPlus assigns the name `DEFAULT` to the connection. + +Include the `ALL` keyword to close all active connections. + +The following example creates a connection named `hr_connection` that connects to the `hr` database and then disconnects from the connection: + +```c +/* client.pgc*/ +int main() +{ + EXEC SQL CONNECT TO hr AS connection_name; + EXEC SQL DISCONNECT connection_name; + return(0); +} +``` + +## EXECUTE + +Use the `EXECUTE` statement to execute a statement previously prepared using an `EXEC SQL PREPARE` statement. The syntax is: + +```sql +EXEC SQL [FOR ] EXECUTE + [USING {DESCRIPTOR + |: [[INDICATOR] :]}]; +``` + +Where: + +- `array_size` is an integer value or a host variable that contains an integer value that specifies the number of rows to process. If you omit the `FOR` clause, the statement is executed once for each member of the array. +- `statement_name` specifies the name assigned to the statement when the statement was created using the `EXEC SQL PREPARE` statement. + +Include the `USING` clause to supply values for parameters in the prepared statement: + +- Include the `DESCRIPTOR` `SQLDA_descriptor` clause to provide an SQLDA descriptor value for a parameter. +- Use a `host_variable` (and an optional `indicator_variable`) to provide a user-specified value for a parameter. + +The following example creates a prepared statement that inserts a record into the `emp` table: + +```sql +EXEC SQL PREPARE add_emp (numeric, text, text, numeric) AS + INSERT INTO emp VALUES($1, $2, $3, $4); +``` + +Each time you invoke the prepared statement, provide fresh parameter values for the statement: + +```sql +EXEC SQL EXECUTE add_emp USING 8000, 'DAWSON', 'CLERK', 7788; +EXEC SQL EXECUTE add_emp USING 8001, 'EDWARDS', 'ANALYST', 7698; +``` + +## EXECUTE DESCRIPTOR + +Use the `EXECUTE` statement to execute a statement previously prepared by an `EXEC SQL PREPARE` statement, using an SQL descriptor. The syntax is: + +```sql +EXEC SQL [FOR ] EXECUTE + [USING [SQL] DESCRIPTOR ] + [INTO [SQL] DESCRIPTOR ]; +``` + +Where: + +- `array_size` is an integer value or a host variable that contains an integer value that specifies the number of rows to process. If you omit the `FOR` clause, the statement is executed once for each member of the array. +- `statement_identifier` specifies the identifier assigned to the statement with the `EXEC SQL PREPARE` statement. +- `descriptor_name` specifies the name of a descriptor (as a single-quoted string literal), or a host variable that contains the name of a descriptor. + +Include the `USING` clause to specify values for any input parameters required by the prepared statement. + +Include the `INTO` clause to specify a descriptor into which the `EXECUTE` statement writes the results returned by the prepared statement. + +The following example executes the prepared statement, `give_raise`, using the values contained in the descriptor `stmtText:` + +```sql +EXEC SQL PREPARE give_raise FROM :stmtText; +EXEC SQL EXECUTE give_raise USING DESCRIPTOR :stmtText; +``` + +## EXECUTE...END EXEC + +Use the `EXECUTE…END-EXEC` statement to embed an anonymous block into a client application. The syntax is: + +```sql +EXEC SQL [AT ] EXECUTE END-EXEC; +``` + +Where: + +- `database_name` is the database identifier or a host variable that contains the database identifier. If you omit the `AT` clause, the statement executes on the current default database. +- `anonymous_block` is an inline sequence of PL/pgSQL or SPL statements and declarations. You can include host variables and optional indicator variables in the block. Each such variable is treated as an `IN/OUT` value. + +The following example executes an anonymous block: + +```sql +EXEC SQL EXECUTE + BEGIN + IF (current_user = :admin_user_name) THEN + DBMS_OUTPUT.PUT_LINE('You are an administrator'); + END IF; +END-EXEC; +``` + +!!! Note + The `EXECUTE…END EXEC` statement is supported only by EDB Postgres Advanced Server. + +## EXECUTE IMMEDIATE + +Use the `EXECUTE IMMEDIATE` statement to execute a string that contains a SQL command. The syntax is: + +```sql +EXEC SQL [AT ] EXECUTE IMMEDIATE ; +``` + +Where: + +- `database_name` is the database identifier or a host variable that contains the database identifier. If you omit the `AT` clause, the statement executes on the current default database. +- `command_text` is the command executed by the `EXECUTE IMMEDIATE` statement. + +This dynamic SQL statement is useful when you don't know the text of an SQL statement when writing a client application. For example, a client application might prompt a trusted user for a statement to execute. After the user provides the text of the statement as a string value, the statement is then executed with an `EXECUTE IMMEDIATE` command. + +The statement text can't contain references to host variables. If the statement might contain parameter markers or returns one or more values, use the `PREPARE` and `DESCRIBE` statements. + +The following example executes the command contained in the `:command_text` host variable: + +```sql +EXEC SQL EXECUTE IMMEDIATE :command_text; +``` + +## FETCH + +Use the `FETCH` statement to return rows from a cursor into an SQLDA descriptor or a target list of host variables. Before using a `FETCH` statement to retrieve information from a cursor, you must prepare the cursor using `DECLARE` and `OPEN` statements. The statement syntax is: + +```sql +EXEC SQL [FOR ] FETCH + { USING DESCRIPTOR }|{ INTO }; +``` + +Where: + +- `array_size` is an integer value or a host variable that contains an integer value specifying the number of rows to fetch. If you omit the `FOR` clause, the statement is executed once for each member of the array. +- `cursor` is the name of the cursor from which rows are being fetched or a host variable that contains the name of the cursor. + +If you include a `USING` clause, the `FETCH` statement populates the specified SQLDA descriptor with the values returned by the server. + +If you include an `INTO` clause, the `FETCH` statement populates the host variables (and optional indicator variables) specified in the `target_list`. + +The following code fragment declares a cursor named `employees` that retrieves the `employee number`, `name`, and `salary` from the `emp` table: + +```sql +EXEC SQL DECLARE employees CURSOR FOR + SELECT empno, ename, esal FROM emp; +EXEC SQL OPEN emp_cursor; +EXEC SQL FETCH emp_cursor INTO :emp_no, :emp_name, :emp_sal; +``` + +## FETCH DESCRIPTOR + +Use the `FETCH DESCRIPTOR` statement to retrieve rows from a cursor into an SQL descriptor. The syntax is: + +```sql +EXEC SQL [FOR ] FETCH + INTO [SQL] DESCRIPTOR ; +``` + +Where: + +- `array_size` is an integer value or a host variable that contains an integer value specifying the number of rows to fetch. If you omit the `FOR` clause, the statement is executed once for each member of the array. +- `cursor` is the name of the cursor from which rows are fetched or a host variable that contains the name of the cursor. The client must `DECLARE` and `OPEN` the cursor before calling the `FETCH DESCRIPTOR` statement. +- `descriptor_name` specifies the name of a descriptor (as a single-quoted string literal) or a host variable that contains the name of a descriptor. Prior to use, the descriptor must be allocated using an `ALLOCATE DESCRIPTOR` statement. + +Include the `INTO` clause to specify a SQL descriptor into which the `EXECUTE` statement writes the results returned by the prepared statement. + +The following example allocates a descriptor named `row_desc` that holds the description and the values of a specific row in the result set. It then declares and opens a cursor for a prepared statement (`my_cursor`), before looping through the rows in result set, using a `FETCH` to retrieve the next row from the cursor into the descriptor: + +```sql +EXEC SQL ALLOCATE DESCRIPTOR 'row_desc'; +EXEC SQL DECLARE my_cursor CURSOR FOR query; +EXEC SQL OPEN my_cursor; + +for( row = 0; ; row++ ) +{ + EXEC SQL BEGIN DECLARE SECTION; + int col; +EXEC SQL END DECLARE SECTION; +EXEC SQL FETCH my_cursor INTO SQL DESCRIPTOR 'row_desc'; +``` + +## GET DESCRIPTOR + +Use the `GET DESCRIPTOR` statement to retrieve information from a descriptor. The `GET DESCRIPTOR` statement comes in two forms. The first form returns the number of values (or columns) in the descriptor. + +```sql +EXEC SQL GET DESCRIPTOR + : = COUNT; +``` + +The second form returns information about a specific value (specified by the `VALUE column_number` clause): + +```sql +EXEC SQL [FOR ] GET DESCRIPTOR + VALUE {: = {,…}}; +``` + +Where: + +- `array_size` is an integer value or a host variable that contains an integer value that specifies the number of rows to process. If you specify an `array_size`, the `host_variable` must be an array of that size. For example, if `array_size` is `10`, `:host_variable` must be a 10-member array of `host_variables`. If you omit the `FOR` clause, the statement is executed once for each member of the array. +- `descriptor_name` specifies the name of a descriptor as a single-quoted string literal or a host variable that contains the name of a descriptor. + +Include the `VALUE` clause to specify the information retrieved from the descriptor. + +- `column_number` identifies the position of the variable in the descriptor. +- `host_variable` specifies the name of the host variable that receives the value of the item. +- `descriptor_item` specifies the type of the retrieved descriptor item. + +ECPGPlus implements the following `descriptor_item` types: + +- `TYPE` +- `LENGTH` +- `OCTET_LENGTH` +- `RETURNED_LENGTH` +- `RETURNED_OCTET_LENGTH` +- `PRECISION` +- `SCALE` +- `NULLABLE` +- `INDICATOR` +- `DATA` +- `NAME` + +The following code fragment shows using a `GET DESCRIPTOR` statement to obtain the number of columns entered in a user-provided string: + +```sql +EXEC SQL ALLOCATE DESCRIPTOR parse_desc; +EXEC SQL PREPARE query FROM :stmt; +EXEC SQL DESCRIBE query INTO SQL DESCRIPTOR parse_desc; +EXEC SQL GET DESCRIPTOR parse_desc :col_count = COUNT; +``` + +The example allocates an SQL descriptor named `parse_desc` before using a `PREPARE` statement to check the syntax of the string provided by the user `:stmt`. A `DESCRIBE` statement moves the user-provided string into the descriptor, `parse_desc`. The call to `EXEC SQL GET DESCRIPTOR` interrogates the descriptor to discover the number of columns `(:col_count)` in the result set. + +## INSERT + +Use the `INSERT` statement to add one or more rows to a table. The syntax for the ECPGPlus `INSERT` statement is the same as the syntax for the SQL statement, but you can use parameter markers and host variables any place that a value is allowed. The syntax is: + +```sql +[FOR ] INSERT INTO
[( [, ...])] + {DEFAULT VALUES | + VALUES ({ | DEFAULT} [, ...])[, ...] | } + [RETURNING * | [[ AS ] ] [, ...]] +``` + +Include the `FOR exec_count` clause to specify the number of times the statement executes. This clause is valid only if the `VALUES` clause references an array or a pointer to an array. + +- `table` specifies the (optionally schema-qualified) name of an existing table. +- `column` is the name of a column in the table. The column name can be qualified with a subfield name or array subscript. Specify the `DEFAULT VALUES` clause to use default values for all columns. +- `expression` is the expression, value, host variable, or parameter marker that's assigned to the corresponding column. Specify `DEFAULT` to fill the corresponding column with its default value. +- `query` specifies a `SELECT` statement that supplies the rows to insert. +- `output_expression` is an expression that's computed and returned by the `INSERT` command after each row is inserted. The expression can refer to any column in the table. Specify \* to return all columns of the inserted rows. +- `output_name` specifies a name to use for a returned column. + +The following example adds a row to the `employees` table: + +```sql +INSERT INTO emp (empno, ename, job, hiredate) + VALUES ('8400', :ename, 'CLERK', '2011-10-31'); +``` + +!!! Note + The `INSERT` statement uses a host variable `:ename` to specify the value of the `ename` column. + +For more information about using the `INSERT` statement, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-insert.html). + +## OPEN + +Use the `OPEN` statement to open a cursor. The syntax is: + +```sql +EXEC SQL [FOR ] OPEN [USING ]; +``` + +`parameters` is one of the following: + +```sql +DESCRIPTOR +``` + +or + +```sql + [ [ INDICATOR ] , … ] +``` + +Where: + +- `array_size` is an integer value or a host variable that contains an integer value specifying the number of rows to fetch. If you omit the `FOR` clause, the statement is executed once for each member of the array. +- `cursor` is the name of the cursor being opened. +- `parameters` is either `DESCRIPTOR SQLDA_descriptor` or a comma-separated list of `host variables` and optional `indicator variables` that initialize the cursor. If specifying an `SQLDA_descriptor`, the descriptor must be initialized with a `DESCRIBE` statement. + +The `OPEN` statement initializes a cursor using the values provided in `parameters`. Once initialized, the cursor result set remains unchanged unless the cursor is closed and reopened. A cursor is automatically closed when an application terminates. + +The following example declares a cursor named `employees` that queries the `emp` table. It returns the `employee number`, `name`, `salary`, and `commission` of an employee whose name matches a user-supplied value stored in the host variable `:emp_name`. + +```sql +EXEC SQL DECLARE employees CURSOR FOR + SELECT + empno, ename, sal, comm  + FROM  + emp + WHERE ename = :emp_name; +EXEC SQL OPEN employees; +... +``` + +After declaring the cursor, the example uses an `OPEN` statement to make the contents of the cursor available to a client application. + +## OPEN DESCRIPTOR + +Use the `OPEN DESCRIPTOR` statement to open a cursor with a SQL descriptor. The syntax is: + +```sql +EXEC SQL [FOR ] OPEN + [USING [SQL] DESCRIPTOR ] + [INTO [SQL] DESCRIPTOR ]; +``` + +Where: + +- `array_size` is an integer value or a host variable that contains an integer value specifying the number of rows to fetch. If you omit the `FOR` clause, the statement is executed once for each member of the array. +- `cursor` is the name of the cursor being opened. +- `descriptor_name` specifies the name of an SQL descriptor in the form of a single-quoted string literal or a host variable that contains the name of an SQL descriptor that contains the query that initializes the cursor. + +For example, the following statement opens a cursor named `emp_cursor` using the host variable `:employees`: + +```sql +EXEC SQL OPEN emp_cursor USING DESCRIPTOR :employees; +``` + +## PREPARE + +Prepared statements are useful when a client application must perform a task multiple times. The statement is parsed, written, and planned only once rather than each time the statement is executed. This approach saves repetitive processing time. + +Use the `PREPARE` statement to prepare a SQL statement or PL/pgSQL block for execution. The statement is available in two forms. The first form is: + +```sql +EXEC SQL [AT ] PREPARE + FROM ; +``` + +The second form is: + +```sql +EXEC SQL [AT ] PREPARE + AS ; +``` + +Where: + +- `database_name` is the database identifier or a host variable that contains the database identifier against which the statement executes. If you omit the `AT` clause, the statement executes against the current default database. +- `statement_name` is the identifier associated with a prepared SQL statement or PL/SQL block. +- `sql_statement` can take the form of a `SELECT` statement, a single-quoted string literal, or a host variable that contains the text of an SQL statement. + +To include variables in a prepared statement, substitute placeholders (`$1, $2, $3`, and so on) for statement values that might change when you `PREPARE` the statement. When you `EXECUTE` the statement, provide a value for each parameter. Provide the values in the order in which they replace placeholders. + +The following example creates a prepared statement named `add_emp` that inserts a record into the `emp` table: + +```sql +EXEC SQL PREPARE add_emp (int, text, text, numeric) AS + INSERT INTO emp VALUES($1, $2, $3, $4); +``` + +Each time you invoke the statement, provide fresh parameter values for the statement: + +```sql +EXEC SQL EXECUTE add_emp(8003, 'Davis', 'CLERK', 2000.00); +EXEC SQL EXECUTE add_emp(8004, 'Myer', 'CLERK', 2000.00); +``` + +!!! Note + A client application must issue a `PREPARE` statement in each session in which a statement executes. Prepared statements persist only for the duration of the current session. + +## ROLLBACK + +Use the `ROLLBACK` statement to abort the current transaction and discard any updates made by the transaction. The syntax is: + +```sql +EXEC SQL [AT ] ROLLBACK [WORK] + [ { TO [SAVEPOINT] } | RELEASE ] +``` + +Where `database_name` is the database identifier or a host variable that contains the database identifier against which the statement executes. If you omit the `AT` clause, the statement executes against the current default database. + +Include the `TO` clause to abort any commands that executed after the specified `savepoint`. Use the `SAVEPOINT` statement to define the `savepoint`. If you omit the `TO` clause, the `ROLLBACK` statement aborts the transaction, discarding all updates. + +Include the `RELEASE` clause to cause the application to execute an `EXEC SQL COMMIT RELEASE` and close the connection. + +Use the following statement to roll back a complete transaction: + +```sql +EXEC SQL ROLLBACK; +``` + +Invoking this statement aborts the transaction, undoing all changes, erasing any savepoints, and releasing all transaction locks. Suppose you include a savepoint (`my_savepoint` in the following example): + +```sql +EXEC SQL ROLLBACK TO SAVEPOINT my_savepoint; +``` + +Only the portion of the transaction that occurred after the `my_savepoint` is rolled back. `my_savepoint` is retained, but any savepoints created after `my_savepoint` are erased. + +Rolling back to a specified savepoint releases all locks acquired after the savepoint. + +## SAVEPOINT + +Use the `SAVEPOINT` statement to define a *savepoint*. A savepoint is a marker in a transaction. You can use a `ROLLBACK` statement to abort the current transaction, returning the state of the server to its condition prior to the specified savepoint. The syntax of a `SAVEPOINT` statement is: + +```sql +EXEC SQL [AT ] SAVEPOINT +``` + +Where: + +- `database_name` is the database identifier or a host variable that contains the database identifier against which the savepoint resides. If you omit the `AT` clause, the statement executes against the current default database. +- `savepoint_name` is the name of the savepoint. If you reuse a `savepoint_name`, the original savepoint is discarded. + +You can establish savepoints only in a transaction block. A transaction block can contain multiple savepoints. + +To create a savepoint named `my_savepoint`, include the statement: + +```sql +EXEC SQL SAVEPOINT my_savepoint; +``` + +## SELECT + +ECPGPlus extends support of the `SQL SELECT` statement by providing the `INTO host_variables` clause. The clause allows you to select specified information from an EDB Postgres Advanced Server database into a host variable. The syntax for the `SELECT` statement is: + +```sql +EXEC SQL [AT ] +SELECT + [ ] + [ ALL | DISTINCT [ ON( , ...) ]] + select_list INTO + + [ FROM from_item [, from_item ]...] + [ WHERE condition ] + [ hierarchical_query_clause ] + [ GROUP BY expression [, ...]] + [ HAVING condition ] + [ { UNION [ ALL ] | INTERSECT | MINUS } (subquery) ] + [ ORDER BY expression [order_by_options]] + [ LIMIT { count | ALL }] + [ OFFSET start [ ROW | ROWS ] ] + [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ] + [ FOR { UPDATE | SHARE } [OF table_name [, ...]][NOWAIT ][...]] +``` + +Where: + +- `database_name` is the name of the database or host variable that contains the name of the database in which the table resides. This value can take the form of an unquoted string literal or of a host variable. +- `host_variables` is a list of host variables populated by the `SELECT` statement. If the `SELECT` statement returns more than a single row, `host_variables` must be an array. + +ECPGPlus provides support for the additional clauses of the SQL `SELECT` statement as documented in the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-select.html). + +To use the `INTO host_variables` clause, include the names of defined host variables when specifying the `SELECT` statement. For example, the following `SELECT` statement populates the `:emp_name` and `:emp_sal` host variables with a list of employee names and salaries: + +```sql +EXEC SQL SELECT ename, sal + INTO :emp_name, :emp_sal + FROM emp + WHERE empno = 7988; +``` + +The enhanced `SELECT` statement also allows you to include parameter markers (question marks) in any clause where a value is allowed. For example, the following query contains a parameter marker in the `WHERE` clause: + +```sql +SELECT * FROM emp WHERE dept_no = ?; +``` + +This `SELECT` statement allows you to provide a value at runtime for the `dept_no` parameter marker. + +## SET CONNECTION + +There are at least three reasons you might need more than one connection in a given client application: + +- You might want different privileges for different statements. +- You might need to interact with multiple databases in the same client. +- Multiple threads of execution in a client application can't share a connection concurrently. + +The syntax for the `SET CONNECTION` statement is: + +```sql +EXEC SQL SET CONNECTION ; +``` + +Where `connection_name` is the name of the connection to the database. + +To use the `SET CONNECTION` statement, open the connection to the database using the second form of the `CONNECT` statement. Include the `AS` clause to specify a `connection_name`. + +By default, the current thread uses the current connection. Use the `SET CONNECTION` statement to specify a default connection for the current thread to use. The default connection is used only when you execute an `EXEC SQL` statement that doesn't explicitly specify a connection name. For example, the following statement uses the default connection because it doesn't include an `AT connection_name` clause: + +```sql +EXEC SQL DELETE FROM emp; +``` + +This statement doesn't use the default connection because it specifies a connection name using the `AT connection_name` clause: + +```sql +EXEC SQL AT acctg_conn DELETE FROM emp; +``` + +For example, suppose a client application creates and maintains multiple connections using either of the following approaches: + +```sql +EXEC SQL CONNECT TO edb AS acctg_conn + USER 'alice' IDENTIFIED BY 'acctpwd'; +``` + + +```sql +EXEC SQL CONNECT TO edb AS hr_conn + USER 'bob' IDENTIFIED BY 'hrpwd'; +``` + +It can change between the connections with the `SET CONNECTION` statement: + +```sql +SET CONNECTION acctg_conn; +``` + +or + +```sql +SET CONNECTION hr_conn; +``` + +The server uses the privileges associated with the connection when determining the privileges available to the connecting client. When using the `acctg_conn` connection, the client has the privileges associated with the role `alice`. When connected using `hr_conn`, the client has the privileges associated with `bob`. + +## SET DESCRIPTOR + +Use the `SET DESCRIPTOR` statement to assign a value to a descriptor area using information provided by the client application in the form of a host variable or an integer value. The statement comes in two forms. The first form is: + +```sql +EXEC SQL [FOR ] SET DESCRIPTOR + VALUE = ; +``` + +The second form is: + +```sql +EXEC SQL [FOR ] SET DESCRIPTOR + COUNT = integer; +``` + +Where: + +- `array_size` is an integer value or a host variable that contains an integer value specifying the number of rows to fetch. If you omit the `FOR` clause, the statement executes once for each member of the array. +- `descriptor_name` specifies the name of a descriptor as a single-quoted string literal or a host variable that contains the name of a descriptor. + +Include the `VALUE` clause to describe the information stored in the descriptor. + +- `column_number` identifies the position of the variable within the descriptor. +- `descriptor_item` specifies the type of the descriptor item. +- `host_variable` specifies the name of the host variable that contains the value of the item. + +ECPGPlus implements the following `descriptor_item` types: + +- `TYPE` +- `LENGTH` +- `[REF] INDICATOR` +- `[REF] DATA` +- `[REF] RETURNED LENGTH` + +For example, a client application might prompt a user for a dynamically created query: + +```c +query_text = promptUser("Enter a query"); +``` + +To execute a dynamically created query, you must first prepare the query (parsing and validating the syntax of the query) and then describe the input parameters found in the query using the `EXEC SQL DESCRIBE INPUT` statement. + +```sql +EXEC SQL ALLOCATE DESCRIPTOR query_params; +EXEC SQL PREPARE emp_query FROM :query_text; + +EXEC SQL DESCRIBE INPUT emp_query + USING SQL DESCRIPTOR 'query_params'; +``` + +After describing the query, the `query_params` descriptor contains information about each parameter required by the query. + +For this example, assume that the user entered: + +```sql +SELECT ename FROM emp WHERE sal > ? AND job = ?;, +``` + +In this case, the descriptor describes two parameters, one for `sal > ?` and one for `job = ?`. + +To discover the number of parameter markers (question marks) in the query and therefore the number of values you must provide before executing the query, use: + +```sql +EXEC SQL GET DESCRIPTOR … :host_variable = COUNT; +``` + +Then, you can use `EXEC SQL GET DESCRIPTOR` to retrieve the name of each parameter. You can also use `EXEC SQL GET DESCRIPTOR` to retrieve the type of each parameter from the descriptor, along with the number of parameters. Or you can supply each `value` in the form of a character string and ECPG converts that string into the required data type. + +The data type of the first parameter is `numeric`. The type of the second parameter is `varchar`. The name of the first parameter is `sal`. The name of the second parameter is `job`. + +Next, loop through each parameter, prompting the user for a value, and store those values in host variables. You can use `GET DESCRIPTOR … COUNT` to find the number of parameters in the query. + +```sql +EXEC SQL GET DESCRIPTOR 'query_params' + :param_count = COUNT; + +for(param_number = 1; + param_number <= param_count; + param_number++) +{ +``` + +Use `GET DESCRIPTOR` to copy the name of the parameter into the `param_name` host variable: + +```sql +EXEC SQL GET DESCRIPTOR 'query_params' + VALUE :param_number :param_name = NAME; + +reply = promptUser(param_name); +if (reply == NULL) + reply_ind = 1; /* NULL */ +else + reply_ind = 0; /* NOT NULL */ +``` + +To associate a `value` with each parameter, you use the `EXEC SQL SET DESCRIPTOR` statement. For example: + +```sql +EXEC SQL SET DESCRIPTOR 'query_params' + VALUE :param_number DATA = :reply; +EXEC SQL SET DESCRIPTOR 'query_params' + VALUE :param_number INDICATOR = :reply_ind; +} +``` + +Now, you can use the `EXEC SQL EXECUTE DESCRIPTOR` statement to execute the prepared statement on the server. + +## UPDATE + +Use an `UPDATE` statement to modify the data stored in a table. The syntax is: + +```sql +EXEC SQL [AT ][FOR ] + UPDATE [ ONLY ] table [ [ AS ] alias ] + SET {column = { expression | DEFAULT } | + (column [, ...]) = ({ expression|DEFAULT } [, ...])} [, ...] + [ FROM from_list ] + [ WHERE condition | WHERE CURRENT OF cursor_name ] + [ RETURNING * | output_expression [[ AS ] output_name] [, ...] ] +``` + +Where `database_name` is the name of the database or host variable that contains the name of the database in which the table resides. This value can take the form of an unquoted string literal or of a host variable. + +Include the `FOR exec_count` clause to specify the number of times the statement executes. This clause is valid only if the `SET` or `WHERE` clause contains an array. + +ECPGPlus provides support for the additional clauses of the SQL `UPDATE` statement as documented in the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-update.html). + +You can use a host variable in any clause that specifies a value. To use a host variable, substitute a defined variable for any value associated with any of the documented `UPDATE` clauses. + +The following `UPDATE` statement changes the job description of an employee (identified by the `:ename` host variable) to the value contained in the `:new_job` host variable. It increases the employees salary by multiplying the current salary by the value in the `:increase` host variable: + +```sql +EXEC SQL UPDATE emp + SET job = :new_job, sal = sal * :increase + WHERE ename = :ename; +``` + +The enhanced `UPDATE` statement also allows you to include parameter markers (question marks) in any clause where an input value is permitted. For example, we can write the same update statement with a parameter marker in the `WHERE` clause: + +```sql +EXEC SQL UPDATE emp + SET job = ?, sal = sal * ? + WHERE ename = :ename; +``` + +This `UPDATE` statement allows you to prompt the user for a new value for the `job` column and provide the amount by which the `sal` column is incremented for the employee specified by `:ename`. + +## WHENEVER + +Use the `WHENEVER` statement to specify the action taken by a client application when it encounters an SQL error or warning. The syntax is: + +```sql +EXEC SQL WHENEVER ; +``` + +The following table describes the different conditions that might trigger an `action`. + +| Condition | Description | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `NOT FOUND` | The server returns a `NOT FOUND` condition when it encounters a `SELECT` that returns no rows or when a `FETCH` reaches the end of a result set. | +| `SQLERROR` | The server returns an `SQLERROR` condition when it encounters a serious error returned by an SQL statement. | +| `SQLWARNING` | The server returns an `SQLWARNING` condition when it encounters a nonfatal warning returned by an SQL statement. | + +The following table describes the actions that result from a client encountering a `condition`. + +| Action | Description | +| ----------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CALL function [([args])]` | Call the named `function`. | +| `CONTINUE` | Proceed to the next statement. | +| `DO BREAK` | Emit a C break statement. A break statement can appear in a `loop` or a `switch` statement. If executed, the break statement terminates the `loop` or the `switch` statement. | +| `DO CONTINUE` | Emit a C `continue` statement. A `continue` statement can exist only in a loop. If executed, it causes the flow of control to return to the top of the loop. | +| `DO function ([args])` | Call the named `function`. | +| `GOTO label` or `GO TO label` | Proceed to the statement that contains the label. | +| `SQLPRINT` | Print a message to standard error. | +| `STOP` | Stop executing. | + +The following code fragment prints a message if the client application encounters a warning and aborts the application if it encounters an error: + +```sql +EXEC SQL WHENEVER SQLWARNING SQLPRINT; +EXEC SQL WHENEVER SQLERROR STOP; +``` + +Include the following code to specify for a client to continue processing after warning a user of a problem: + +```sql +EXEC SQL WHENEVER SQLWARNING SQLPRINT; +``` + +Include the following code to call a function if a query returns no rows or when a cursor reaches the end of a result set: + +```sql +EXEC SQL WHENEVER NOT FOUND CALL error_handler(__LINE__); +``` diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/index.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/index.mdx new file mode 100644 index 00000000000..c855e0c154e --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/index.mdx @@ -0,0 +1,75 @@ +--- +title: "Language element reference" +description: "Description of the ECPGPlus language elements" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.55.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.56.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.57.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.23.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.49.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.51.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.28.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.24.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.27.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.48.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.29.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.25.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.30.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.47.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.31.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.46.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.32.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.45.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.33.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.44.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.34.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.26.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.43.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.35.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.52.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.50.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.42.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.53.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.54.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.36.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.59.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.58.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.41.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.37.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.40.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.38.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/EDB_Postgres_Advanced_Server_ecpgPlus_Guide.1.39.html" + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-reference-guide/9.6/Database_Compatibility_for_Oracle_Developers_Reference_Guide.1.093.html" + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-reference-guide/9.6/Database_Compatibility_for_Oracle_Developers_Reference_Guide.1.058.html" + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-reference-guide/9.6/Database_Compatibility_for_Oracle_Developers_Reference_Guide.1.031.html" + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-reference-guide/9.6/Database_Compatibility_for_Oracle_Developers_Reference_Guide.1.033.html" + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-reference-guide/9.6/Database_Compatibility_for_Oracle_Developers_Reference_Guide.1.085.html" + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-reference-guide/9.6/Database_Compatibility_for_Oracle_Developers_Reference_Guide.1.087.html" + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-reference-guide/9.6/Database_Compatibility_for_Oracle_Developers_Reference_Guide.1.082.html" + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-reference-guide/9.6/Database_Compatibility_for_Oracle_Developers_Reference_Guide.1.088.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.349.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.351.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.352.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.350.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.348.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.114.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.119.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.113.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.111.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.108.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.086.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.063.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.061.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.103.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.104.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.100.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.102.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.101.html" +--- + + + +ECPGPlus has these language elements. + diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/sqlda_structure.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/sqlda_structure.mdx new file mode 100644 index 00000000000..9e4a92a10a9 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/sqlda_structure.mdx @@ -0,0 +1,120 @@ +--- +title: "The SQLDA structure" +--- + +Oracle Dynamic SQL method 4 uses the SQLDA data structure to hold the data and metadata for a dynamic SQL statement. A SQLDA structure can describe a set of input parameters corresponding to the parameter markers found in the text of a dynamic statement or the result set of a dynamic statement. + +## Layout + +The layout of the SQLDA structure is: + +```c +struct SQLDA +{ + int N; /* Number of entries */ + char **V; /* Variables */ + int *L; /* Variable lengths */ + short *T; /* Variable types */ + short **I; /* Indicators */ + int F; /* Count of variables discovered by DESCRIBE */ + char **S; /* Variable names */ + short *M; /* Variable name maximum lengths */ + short *C; /* Variable name actual lengths */ + char **X; /* Indicator names */ + short *Y; /* Indicator name maximum lengths */ + short *Z; /* Indicator name actual lengths */ +}; +``` + +## Parameters + +`N - maximum number of entries` + + The `N` structure member contains the maximum number of entries that the SQLDA can describe. This member is populated by the `sqlald()` function when you allocate the SQLDA structure. Before using a descriptor in an `OPEN` or `FETCH` statement, you must set `N` to the actual number of values described. + +`V - data values` + + The `V` structure member is a pointer to an array of data values. + +- For a `SELECT`-list descriptor, `V` points to an array of values returned by a `FETCH` statement. Each member in the array corresponds to a column in the result set. +- For a bind descriptor, `V` points to an array of parameter values. You must populate the values in this array before opening a cursor that uses the descriptor. + +Your application must allocate the space required to hold each value. See [displayResultSet](/epas/latest/application_programming/ecpgplus_guide/05_building_executing_dynamic_sql_statements/#executing_query_with_unknown_number_of_variables) for an example of how to allocate space for `SELECT`-list values. + +`L - length of each data value` + + The `L` structure member is a pointer to an array of lengths. Each member of this array must indicate the amount of memory available in the corresponding member of the `V` array. For example, if `V[5]` points to a buffer large enough to hold a 20-byte NULL-terminated string, `L[5]` must contain the value 21 (20 bytes for the characters in the string plus 1 byte for the NULL-terminator). Your application must set each member of the `L` array. + +`T - data types` + + The `T` structure member points to an array of data types, one for each column (or parameter) described by the descriptor. + +- For a bind descriptor, you must set each member of the `T` array to tell ECPGPlus the data type of each parameter. +- For a `SELECT`-list descriptor, the `DESCRIBE SELECT LIST` statement sets each member of the `T` array to reflect the type of data found in the corresponding column. + +You can change any member of the `T` array before executing a `FETCH` statement to force ECPGPlus to convert the corresponding value to a specific data type. For example, if the `DESCRIBE SELECT LIST` statement indicates that a given column is of type `DATE`, you can change the corresponding `T` member to request that the next `FETCH` statement return that value in the form of a NULL-terminated string. Each member of the `T` array is a numeric type code (see [Type Codes](type_codes.mdx) for a list of type codes). The type codes returned by a `DESCRIBE SELECT LIST` statement differ from those expected by a `FETCH` statement. After executing a `DESCRIBE SELECT LIST` statement, each member of `T` encodes a data type and a flag indicating whether the corresponding column is nullable. You can use the `sqlnul()` function to extract the type code and nullable flag from a member of the T array. The signature of the `sqlnul()` function is as follows: + +```c +void sqlnul(unsigned short *valType, + unsigned short *typeCode, + int *isNull) +``` + +For example, to find the type code and nullable flag for the third column of a descriptor named results, invoke `sqlnul()` as follows: + +```c +sqlnul(&results->T[2], &typeCode, &isNull); +``` + +`I - indicator variables` + + The `I` structure member points to an array of indicator variables. This array is allocated for you when your application calls the `sqlald()` function to allocate the descriptor. + +- For a `SELECT`-list descriptor, each member of the `I` array indicates whether the corresponding column contains a NULL (non-zero) or non-NULL (zero) value. +- For a bind parameter, your application must set each member of the `I` array to indicate whether the corresponding parameter value is NULL. + +`F - number of entries` + + The `F` structure member indicates how many values are described by the descriptor. The `N` structure member indicates the maximum number of values that the descriptor can describe. `F` indicates the actual number of values. The value of the `F` member is set by ECPGPlus when you execute a `DESCRIBE` statement. `F` can be positive, negative, or zero. + +- For a `SELECT`-list descriptor, `F` contains a positive value if the number of columns in the result set is equal to or less than the maximum number of values permitted by the descriptor (as determined by the `N` structure member). It contains 0 if the statement isn't a `SELECT` statement. It contains a negative value if the query returns more columns than allowed by the `N` structure member. +- For a bind descriptor, `F` contains a positive number if the number of parameters found in the statement is less than or equal to the maximum number of values permitted by the descriptor (as determined by the `N` structure member). It contains 0 if the statement contains no parameters markers. It contains a negative value if the statement contains more parameter markers than allowed by the `N` structure member. + +If `F` contains a positive number (after executing a `DESCRIBE` statement), that number reflects the count of columns in the result set (for a `SELECT`-list descriptor) or the number of parameter markers found in the statement (for a bind descriptor). If `F` contains a negative value, you can compute the absolute value of `F` to discover how many values or parameter markers are required. For example, if `F` contains `-24` after describing a `SELECT` list, you know that the query returns 24 columns. + +`S - column/parameter names` + + The `S` structure member points to an array of NULL-terminated strings. + +- For a `SELECT`-list descriptor, the `DESCRIBE SELECT LIST` statement sets each member of this array to the name of the corresponding column in the result set. +- For a bind descriptor, the `DESCRIBE BIND VARIABLES` statement sets each member of this array to the name of the corresponding bind variable. + +In this release, the name of each bind variable is determined by the left-to-right order of the parameter marker within the query. For example, the name of the first parameter is always `?0`, the name of the second parameter is always `?1`, and so on. + +`M - maximum column/parameter name length` + + The `M` structure member points to an array of lengths. Each member in this array specifies the maximum length of the corresponding member of the `S` array (that is, `M[0]` specifies the maximum length of the column/parameter name found at `S[0]`). This array is populated by the `sqlald()` function. + +`C - actual column/parameter name length` + + The `C` structure member points to an array of lengths. Each member in this array specifies the actual length of the corresponding member of the `S` array (that is, `C[0]` specifies the actual length of the column/parameter name found at `S[0]`). + + This array is populated by the `DESCRIBE` statement. + +`X - indicator variable names` + + The `X` structure member points to an array of NULL-terminated strings. Each string represents the name of a NULL indicator for the corresponding value. + + This array isn't used by ECPGPlus but is provided for compatibility with Pro\*C applications. + +`Y - maximum indicator name length` + + The `Y` structure member points to an array of lengths. Each member in this array specifies the maximum length of the corresponding member of the `X` array (that is, `Y[0]` specifies the maximum length of the indicator name found at `X[0]`). + + This array isn't used by ECPGPlus but is provided for compatibility with Pro\*C applications. + +`Z - actual indicator name length` + + The `Z` structure member points to an array of lengths. Each member in this array specifies the actual length of the corresponding member of the `X` array (that is, `Z[0]` specifies the actual length of the indicator name found at `X[0]`). + + This array isn't used by ECPGPlus but is provided for compatibility with Pro\*C applications. diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/supported_c_data_types.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/supported_c_data_types.mdx new file mode 100644 index 00000000000..3a9fd43129e --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/supported_c_data_types.mdx @@ -0,0 +1,36 @@ +--- +title: "Supported C data types" +--- + + + +An ECPGPlus application must deal with two sets of data types: SQL data types (such as `SMALLINT`, `DOUBLE PRECISION`, and `CHARACTER VARYING`) and C data types (like `short`, `double`, and `varchar[n]`). When an application fetches data from the server, ECPGPlus maps each SQL data type to the type of the C variable into which the data is returned. + +In general, ECPGPlus can convert most SQL server types into similar C types, but not all combinations are valid. For example, ECPGPlus tries to convert a SQL character value into a C integer value, but the conversion might fail at execution time if the SQL character value contains non-numeric characters. + +The reverse is also true. When an application sends a value to the server, ECPGPlus tries to convert the C data type into the required SQL type. Again, the conversion might fail at execution time if the C value can't be converted into the required SQL type. + +ECPGPlus can convert any SQL type into C character values (`char[n]` or `varchar[n]`). Although it's safe to convert any SQL type to or from `char[n]` or `varchar[n]`, it's often convenient to use more natural C types such as `int`, `double`, or `float`. + +The supported C data types are: + +- `short` +- `int` +- `unsigned int` +- `long long int` +- `float` +- `double` +- `char[n+1]` +- `varchar[n+1]` +- `bool` +- Any equivalent created by a `typedef` + +In addition to the numeric and character types supported by C, the `pgtypeslib` runtime library offers custom data types, as well as functions to operate on those types, for dealing with date/time and exact numeric values: + +- `timestamp` +- `interval` +- `date` +- `decimal` +- `numeric` + +To use a data type supplied by `pgtypeslib`, you must `#include` the proper header file. \ No newline at end of file diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/type_codes.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/type_codes.mdx new file mode 100644 index 00000000000..f1680125d20 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/07_reference/type_codes.mdx @@ -0,0 +1,40 @@ +--- +title: "Type codes" +--- + +## Type codes for external data types + +The following table contains the type codes for *external* data types. An external data type is used to indicate the type of a C host variable. When an application binds a value to a parameter or binds a buffer to a `SELECT`-list item, set the type code in the corresponding SQLDA descriptor `(descriptor->T[column])` to one of the following values: + +| Type code | Host variable type (C data type) | +| ------------------------------------------------- | ----------------------------------------- | +| `1, 2, 8, 11, 12, 15, 23, 24, 91, 94, 95, 96, 97` | `char[]` | +| `3` | `int` | +| `4, 7, 21` | `float` | +| `5, 6` | `null-terminated string (char[length+1])` | +| `9` | `varchar` | +| `22` | `double` | +| `68` | `unsigned int` | + +## Type codes for internal data types + +The following table contains the type codes for *internal* data types. An internal type code is used to indicate the type of a value as it resides in the database. The `DESCRIBE SELECT LIST` statement populates the data type array `(descriptor->T[column])` using the following values. + +| Internal type code | Server type | +| ---------------------- | ------------------------ | +| `1` | `VARCHAR2` | +| `2` | `NUMBER` | +| `8` | `LONG` | +| `11` | `ROWID` | +| `12` | `DATE` | +| `23` | `RAW` | +| `24` | `LONG RAW` | +| `96` | `CHAR` | +| `100` | `BINARY FLOAT` | +| `101` | `BINARY DOUBLE` | +| `104` | `UROWID` | +| `187` | `TIMESTAMP` | +| `188` | `TIMESTAMP W/TIMEZONE` | +| `189` | `INTERVAL YEAR TO MONTH` | +| `190` | `INTERVAL DAY TO SECOND` | +| `232` | `TIMESTAMP LOCAL_TZ` | diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/index.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/index.mdx new file mode 100644 index 00000000000..6cc9e065670 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/index.mdx @@ -0,0 +1,7 @@ +--- +title: "Application programmer reference" +indexCards: simple +description: "Contains reference information applicable to application programmer tasks, such as the layout of system catalog tables" +--- + +This reference information applies to application programmers. \ No newline at end of file diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/02_case_sensitivity.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/02_case_sensitivity.mdx new file mode 100644 index 00000000000..29207131d8b --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/02_case_sensitivity.mdx @@ -0,0 +1,26 @@ +--- +title: "Case sensitivity" +redirects: + - /epas/latest/epas_compat_spl/01_basic_spl_elements/02_case_sensitivity/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/01_basic_spl_elements/02_case_sensitivity/ +--- + + + +Keywords and user-defined identifiers that are used in an SPL program are case insensitive. For example, the statement `DBMS_OUTPUT.PUT_LINE('Hello World');` is interpreted the as `dbms_output.put_line('Hello World');` or `Dbms_Output.Put_Line('Hello World');` or `DBMS_output.Put_line('Hello World');`. + +Character and string constants, however, are case sensitive. Data retrieved from the EDB Postgres Advanced Server database or from other external sources is also case sensitive. + +The statement `DBMS_OUTPUT.PUT_LINE('Hello World!');` produces the following output: + +```sql +__OUTPUT__ +Hello World! +``` + +The statement `DBMS_OUTPUT.PUT_LINE('HELLO WORLD!');` produces this output: + +```sql +__OUTPUT__ +HELLO WORLD! +``` diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/03_identifiers.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/03_identifiers.mdx new file mode 100644 index 00000000000..7ebd112c630 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/03_identifiers.mdx @@ -0,0 +1,21 @@ +--- +title: "Identifiers" +redirects: + - /epas/latest/epas_compat_spl/01_basic_spl_elements/03_identifiers/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/01_basic_spl_elements/03_identifiers/ +--- + + + +*Identifiers* are user-defined names that identify elements of an SPL program including variables, cursors, labels, programs, and parameters. The syntax rules for valid identifiers are the same as for identifiers in the SQL language. + +An identifier can't be the same as an SPL keyword or a keyword of the SQL language. The following are some examples of valid identifiers: + +```text +x +last___name +a_$_Sign +Many$$$$$$$$signs_____ +THIS_IS_AN_EXTREMELY_LONG_NAME +A1 +``` diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/04_qualifiers.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/04_qualifiers.mdx new file mode 100644 index 00000000000..209302b11f8 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/04_qualifiers.mdx @@ -0,0 +1,45 @@ +--- +title: "Qualifiers" +redirects: + - /epas/latest/epas_compat_spl/01_basic_spl_elements/04_qualifiers/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/01_basic_spl_elements/04_qualifiers/ +--- + + + +A *qualifier* is a name that specifies the owner or context of an entity that's the object of the qualifier. Specify a qualified object using these elements, in order: + +1. The qualifier name +2. A dot with no intervening white space +3. The name of the object being qualified with no intervening white space + +This syntax is called *dot notation*. + +The following is the syntax of a qualified object. + +```text +. [ . ]... +``` + +`qualifier` is the name of the owner of the object. `object` is the name of the entity belonging to `qualifier`. You can have a chain of qualifications in which the preceding qualifier owns the entity identified by the subsequent qualifiers and object. + +You can qualify almost any identifier. How you qualify an identifier depends on what the identifier represents and its context. + +Some examples of qualification follow: + +- Procedure and function names qualified by the schema to which they belong, e.g., `schema_name.procedure_name(...)` +- Trigger names qualified by the schema to which they belong, e.g., `schema_name.trigger_name` +- Column names qualified by the table to which they belong, e.g., `emp.empno` +- Table names qualified by the schema to which they belong, e.g., `public.emp` +- Column names qualified by table and schema, e.g., `public.emp.empno` + +As a general rule, where a name appears in the syntax of an SPL statement you can use its qualified name as well. Typically, a qualified name is used only if ambiguity is associated with the name. Examples of ambiguity include, for example: + +- Two procedures with the same name belonging to two different schemas are invoked from a program. +- The same name is used for a table column and SPL variable in the same program. + +Avoid using qualified names if possible. We use the following conventions to avoid naming conflicts: + +- All variables declared in the declaration section of an SPL program are prefixed by `v_`, e.g., `v_empno`. +- All formal parameters declared in a procedure or function definition are prefixed by `p_`, e.g., `p_empno`. +- Column names and table names don't have any special prefix conventions, e.g., column `empno` in table `emp`. diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/05_constants.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/05_constants.mdx new file mode 100644 index 00000000000..563e959f122 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/05_constants.mdx @@ -0,0 +1,17 @@ +--- +title: "Constants" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.041.html" +redirects: + - /epas/latest/epas_compat_spl/01_basic_spl_elements/05_constants/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/01_basic_spl_elements/05_constants/ +--- + + + +*Constants* or *literals* are fixed values that you can use in SPL programs to represent values of various types such as numbers, strings, and dates. Constants come in the following types: + +- Numeric (integer and real) +- Character and string +- Date/time diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/06_user_defined_pl_sql_subtypes.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/06_user_defined_pl_sql_subtypes.mdx new file mode 100644 index 00000000000..c8e67ed7ab6 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/06_user_defined_pl_sql_subtypes.mdx @@ -0,0 +1,118 @@ +--- +title: "User-defined PL/SQL subtypes" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.048.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.142.html" +redirects: + - /epas/latest/epas_compat_spl/01_basic_spl_elements/06_user_defined_pl_sql_subtypes/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/01_basic_spl_elements/06_user_defined_pl_sql_subtypes/ +--- + + + +EDB Postgres Advanced Server supports user-defined PL/SQL subtypes and subtype aliases. + +## About subtypes + +A subtype is a data type with an optional set of constraints that restrict the values that can be stored in a column of that type. The rules that apply to the type on which the subtype is based are still enforced, but you can use additional constraints to place limits on the precision or scale of values stored in the type. + +You can define a subtype in the declaration of a PL function, procedure, anonymous block, or package. The syntax is: + +```sql +SUBTYPE IS [()] [NOT NULL] +``` + +Where `constraint` is: + +```text +{ [, ]} | +``` + +`subtype_name` + + `subtype_name` specifies the name of the subtype. + +`type_name` + + `type_name` specifies the name of the original type on which the subtype is based. `type_name` can be: + +- The name of any of the types supported by EDB Postgres Advanced Server. +- The name of any composite type. +- A column anchored by a `%TYPE` operator. +- The name of another subtype. + +Include the `constraint` clause to define restrictions for types that support precision or scale. + +`precision` + + `precision` specifies the total number of digits permitted in a value of the subtype. + +`scale` + + `scale` specifies the number of fractional digits permitted in a value of the subtype. + +`length` + + `length` specifies the total length permitted in a value of `CHARACTER`, `VARCHAR`, or `TEXT` base types. + +Include the `NOT NULL` clause to specify that you can't store `NULL` values in a column of the specified subtype. + +A subtype that is based on a column inherits the column size constraints, but the subtype doesn't inherit `NOT NULL` or `CHECK` constraints. + +## Unconstrained subtypes + +To create an unconstrained subtype, use the `SUBTYPE` command to specify the new subtype name and the name of the type on which the subtype is based. For example, the following command creates a subtype named `address` that has all of the attributes of the type `CHAR`: + +```sql +SUBTYPE address IS CHAR; +``` + +You can also create a subtype (constrained or unconstrained) that's a subtype of another subtype: + +```sql +SUBTYPE cust_address IS address NOT NULL; +``` + +This command creates a subtype named `cust_address` that shares all of the attributes of the `address` subtype. Include the `NOT NULL` clause to specify that a value of the `cust_address` can't be `NULL`. + +## Constrained subtypes + +Include a `length` value when creating a subtype that's based on a character type to define the maximum length of the subtype. For example: + +```sql +SUBTYPE acct_name IS VARCHAR (15); +``` + +This example creates a subtype named `acct_name` that's based on a `VARCHAR` data type but is limited to 15 characters. + +Include values for `precision` to specify the maximum number of digits in a value of the subtype. Optionally, include values for `scale` to specify the number of digits to the right of the decimal point when constraining a numeric base type. For example: + +```sql +SUBTYPE acct_balance IS NUMBER (5, 2); +``` + +This example creates a subtype named `acct_balance` that shares all of the attributes of a `NUMBER` type but that can't exceed three digits to the left of the decimal point and two digits to the right of the decimal. + +An argument declaration (in a function or procedure header) is a *formal argument*. The value passed to a function or procedure is an *actual argument*. When invoking a function or procedure, the caller provides 0 or more actual arguments. Each actual argument is assigned to a formal argument that holds the value in the body of the function or procedure. + +If a formal argument is declared as a constrained subtype, EDB Postgres Advanced Server: + +- Enforces subtype constraints when assigning an actual argument to a formal argument when invoking a procedure. +- Doesn't enforce subtype constraints when assigning an actual argument to a formal argument when invoking a function. + +## Using the %TYPE operator + +You can use `%TYPE` notation to declare a subtype anchored to a column. For example: + +```sql +SUBTYPE emp_type IS emp.empno%TYPE +``` + +This command creates a subtype named `emp_type` whose base type matches the type of the `empno` column in the `emp` table. A subtype that's based on a column shares the column size constraints. `NOT NULL` and `CHECK` constraints aren't inherited. + +## Subtype conversion + +Unconstrained subtypes are aliases for the type on which they are based. Any variable of type subtype (unconstrained) is interchangeable with a variable of the base type without conversion, and vice versa. + +You can interchange a variable of a constrained subtype with a variable of the base type without conversion. However, you can interchange a variable of the base type with a constrained subtype only if it complies with the constraints of the subtype. You can implicitly convert a variable of a constrained subtype to another subtype if it's based on the same subtype and the constraint values are within the values of the subtype to which it is being converted. diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/07_character.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/07_character.mdx new file mode 100644 index 00000000000..31843d1f9c4 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/07_character.mdx @@ -0,0 +1,19 @@ +--- +title: "Character set" +redirects: + - /epas/latest/epas_compat_spl/01_basic_spl_elements/07_character/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/01_basic_spl_elements/07_character/ +--- + + + +Write identifiers, expressions, statements, control structures, and so on that comprise the SPL language using the following characters: + +- Uppercase letters A–Z and lowercase letters a–z +- Digits 0–9` +- Symbols ( ) + - * / < > = ! ~ ^ ; : . ' @ % , " # $ & _ | { } ? [ ] +- Whitespace characters tab, space, and carriage return + + +!!! Note + The data that can be manipulated by an SPL program is determined by the character set supported by the database encoding. diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/index.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/index.mdx new file mode 100644 index 00000000000..764ec9c59d2 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/01_basic_spl_elements/index.mdx @@ -0,0 +1,15 @@ +--- +title: "Basic SPL elements" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.047.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.141.html" +redirects: + - /epas/latest/epas_compat_spl/01_basic_spl_elements/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/01_basic_spl_elements/ +--- + + + +The basic programming elements of an SPL program include aspects such as as case sensitivity and the available character set. \ No newline at end of file diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/01_assignment.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/01_assignment.mdx new file mode 100644 index 00000000000..236e0ef1dd1 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/01_assignment.mdx @@ -0,0 +1,43 @@ +--- +title: "Assignment" +redirects: + - /epas/latest/epas_compat_spl/04_basic_statements/01_assignment/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/04_basic_statements/01_assignment/ +--- + + + +The assignment statement sets a variable or a formal parameter of mode `OUT` or `IN OUT` specified on the left side of the assignment `:=` to the evaluated expression specified on the right side of the assignment. + +```text + := ; +``` + +`variable` is an identifier for a previously declared variable, `OUT` formal parameter, or `IN OUT` formal parameter. + +`expression` is an expression that produces a single value. The value produced by the expression must have a data type compatible with that of `variable`. + +This example shows the typical use of assignment statements in the executable section of the procedure: + +```sql +CREATE OR REPLACE PROCEDURE dept_salary_rpt ( + p_deptno NUMBER +) +IS + todays_date DATE; + rpt_title VARCHAR2(60); + base_sal INTEGER; + base_comm_rate NUMBER; + base_annual NUMBER; +BEGIN + todays_date := SYSDATE; + rpt_title := 'Report For Department # ' || p_deptno || ' on ' + || todays_date; + base_sal := 35525; + base_comm_rate := 1.33333; + base_annual := ROUND(base_sal * base_comm_rate, 2); + + DBMS_OUTPUT.PUT_LINE(rpt_title); + DBMS_OUTPUT.PUT_LINE('Base Annual Salary: ' || base_annual); +END; +``` diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/02_delete.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/02_delete.mdx new file mode 100644 index 00000000000..aaad0506585 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/02_delete.mdx @@ -0,0 +1,47 @@ +--- +title: "DELETE" +redirects: + - /epas/latest/epas_compat_spl/04_basic_statements/02_delete/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/04_basic_statements/02_delete/ +--- + + + +You can use the `DELETE` command available in the SQL language in SPL programs. + +You can use an expression in the SPL language wherever an expression is allowed in the SQL `DELETE` command. Thus, you can use SPL variables and parameters to supply values to the delete operation. + +```sql +CREATE OR REPLACE PROCEDURE emp_delete ( + p_empno IN emp.empno%TYPE +) +IS +BEGIN + DELETE FROM emp WHERE empno = p_empno; + + IF SQL%FOUND THEN + DBMS_OUTPUT.PUT_LINE('Deleted Employee # : ' || p_empno); + ELSE + DBMS_OUTPUT.PUT_LINE('Employee # ' || p_empno || ' not found'); + END IF; +END; +``` + +The `SQL%FOUND` conditional expression returns `TRUE` if a row is deleted, `FALSE` otherwise. See [Obtaining the result status](08_obtaining_the_result_status/#obtaining_the_result_status) for a discussion of `SQL%FOUND` and other similar expressions. + +This example deletes an employee using this procedure: + +```sql +EXEC emp_delete(9503); + +Deleted Employee # : 9503 + +SELECT * FROM emp WHERE empno = 9503; +__OUTPUT__ + empno | ename | job | mgr | hiredate | sal | comm | deptno +-------+-------+-----+-----+----------+-----+------+-------- +(0 rows) +``` + +!!! Note + You can include the `DELETE` command in a `FORALL` statement. A `FORALL` statement allows a single `DELETE` command to delete multiple rows from values supplied in one or more collections. See [Using the FORALL statement](/epas/latest/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement/#using_the_forall_statement) for more information. diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/03_insert.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/03_insert.mdx new file mode 100644 index 00000000000..d85fcb25033 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/03_insert.mdx @@ -0,0 +1,84 @@ +--- +title: "INSERT" +redirects: + - /epas/latest/epas_compat_spl/04_basic_statements/03_insert/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/04_basic_statements/03_insert/ +--- + + + +You can use the `INSERT` command available in the SQL language in SPL programs. + +You can use an expression in the SPL language wherever an expression is allowed in the SQL `INSERT` command. Thus, you can use SPL variables and parameters to supply values to the insert operation. + +This example is a procedure that inserts a new employee using data passed from a calling program: + +```sql +CREATE OR REPLACE PROCEDURE emp_insert ( + p_empno IN emp.empno%TYPE, + p_ename IN emp.ename%TYPE, + p_job IN emp.job%TYPE, + p_mgr IN emp.mgr%TYPE, + p_hiredate IN emp.hiredate%TYPE, + p_sal IN emp.sal%TYPE, + p_comm IN emp.comm%TYPE, + p_deptno IN emp.deptno%TYPE +) +IS +BEGIN + INSERT INTO emp VALUES ( + p_empno, + p_ename, + p_job, + p_mgr, + p_hiredate, + p_sal, + p_comm, + p_deptno); + + DBMS_OUTPUT.PUT_LINE('Added employee...'); + DBMS_OUTPUT.PUT_LINE('Employee # : ' || p_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || p_ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || p_job); + DBMS_OUTPUT.PUT_LINE('Manager : ' || p_mgr); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || p_hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || p_sal); + DBMS_OUTPUT.PUT_LINE('Commission : ' || p_comm); + DBMS_OUTPUT.PUT_LINE('Dept # : ' || p_deptno); + DBMS_OUTPUT.PUT_LINE('----------------------'); +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('OTHERS exception on INSERT of employee # ' + || p_empno); + DBMS_OUTPUT.PUT_LINE('SQLCODE : ' || SQLCODE); + DBMS_OUTPUT.PUT_LINE('SQLERRM : ' || SQLERRM); +END; +``` + +If an exception occurs, all database changes made in the procedure are rolled back. In this example, the `EXCEPTION` section with the `WHEN OTHERS` clause catches all exceptions. Two variables are displayed. `SQLCODE` is a number that identifies the specific exception that occurred. `SQLERRM` is a text message explaining the error. See [Exception handling](../05_control_structures/07_exception_handling/#exception_handling) for more information. + +The following shows the output when this procedure is executed: + +```sql +EXEC emp_insert(9503,'PETERSON','ANALYST',7902,'31-MAR-05',5000,NULL,40); + +Added employee... +Employee # : 9503 +Name : PETERSON +Job : ANALYST +Manager : 7902 +Hire Date : 31-MAR-05 00:00:00 +Salary : 5000 +Dept # : 40 +---------------------- + +SELECT * FROM emp WHERE empno = 9503; +__OUTPUT__ + empno | ename | job | mgr | hiredate | sal | comm | deptno +-------+--------+--------+------+-------------------+---------+------+------- + 9503 |PETERSON| ANALYST| 7902 | 31-MAR-05 00:00:00| 5000.00 | | 40 +(1 row) +``` + +!!! Note + You can include the `INSERT` command in a `FORALL` statement. A `FORALL` statement allows a single `INSERT` command to insert multiple rows from values supplied in one or more collections. See [Using the FORALL statement](/epas/latest/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement/#using_the_forall_statement) for more information. diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/04_null.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/04_null.mdx new file mode 100644 index 00000000000..68d330ab738 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/04_null.mdx @@ -0,0 +1,40 @@ +--- +title: "NULL" +redirects: + - /epas/latest/epas_compat_spl/04_basic_statements/04_null/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/04_basic_statements/04_null/ +--- + + + +The simplest statement is the `NULL` statement. This statement is an executable statement that does nothing. + +```sql +NULL; +``` + +The following is the simplest possible valid SPL program: + +```sql +BEGIN + NULL; +END; +``` + +The `NULL` statement can act as a placeholder where an executable statement is required such as in a branch of an `IF-THEN-ELSE` statement. For example: + +```sql +CREATE OR REPLACE PROCEDURE divide_it ( + p_numerator IN NUMBER, + p_denominator IN NUMBER, + p_result OUT NUMBER +) +IS +BEGIN + IF p_denominator = 0 THEN + NULL; + ELSE + p_result := p_numerator / p_denominator; + END IF; +END; +``` diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/05_using_the_returning_into_clause.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/05_using_the_returning_into_clause.mdx new file mode 100644 index 00000000000..b7d4467c968 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/05_using_the_returning_into_clause.mdx @@ -0,0 +1,138 @@ +--- +title: "RETURNING INTO" +redirects: + - /epas/latest/epas_compat_spl/04_basic_statements/05_using_the_returning_into_clause/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/04_basic_statements/05_using_the_returning_into_clause/ +--- + + + +You can append the `INSERT`, `UPDATE`, and `DELETE` commands with the optional `RETURNING INTO` clause. This clause allows the SPL program to capture the newly added, modified, or deleted values from the results of an `INSERT`, `UPDATE`, or `DELETE` command, respectively. + +## Syntax + +```sql +{ | | } + RETURNING { * | [, ] ...} + INTO { | [, ] ...}; +``` + +- `insert` is a valid `INSERT` command. +- `update` is a valid `UPDATE` command. +- `delete` is a valid `DELETE` command. +- If you specify `*`, then the values from the row affected by the `INSERT`, `UPDATE`, or `DELETE` command are made available for assignment to the record or fields to the right of the `INTO` keyword. (The use of `*` is an EDB Postgres Advanced Server extension and isn't compatible with Oracle databases.) +- `expr_1, expr_2...` are expressions evaluated upon the row affected by the `INSERT`, `UPDATE`, or `DELETE` command. The evaluated results are assigned to the record or fields to the right of the `INTO` keyword. +- `record` is the identifier of a record that must contain fields that match in number and order and are data-type compatible with the values in the `RETURNING` clause. +- `field_1, field_2,...` are variables that must match in number and order and are data-type compatible with the set of values in the `RETURNING` clause. + +If the `INSERT`, `UPDATE`, or `DELETE` command returns a result set with more than one row, then an exception is thrown with `SQLCODE 01422, query returned more than one row`. If no rows are in the result set, then the variables following the `INTO` keyword are set to null. + +!!! Note + A variation of `RETURNING INTO` using the `BULK COLLECT` clause allows a result set of more than one row that's returned into a collection. See [Using the BULK COLLECT clause](/epas/latest/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/#using_the_bulk_collect_clause) for more information. + +## Adding the RETURNING INTO clause + +This example modifies the `emp_comp_update` procedure introduced in [UPDATE](07_update/#update). It adds the `RETURNING INTO` clause: + +```sql +CREATE OR REPLACE PROCEDURE emp_comp_update ( + p_empno IN emp.empno%TYPE, + p_sal IN emp.sal%TYPE, + p_comm IN emp.comm%TYPE +) +IS + v_empno emp.empno%TYPE; + v_ename emp.ename%TYPE; + v_job emp.job%TYPE; + v_sal emp.sal%TYPE; + v_comm emp.comm%TYPE; + v_deptno emp.deptno%TYPE; +BEGIN + UPDATE emp SET sal = p_sal, comm = p_comm WHERE empno = p_empno + RETURNING + empno, + ename, + job, + sal, + comm, + deptno + INTO + v_empno, + v_ename, + v_job, + v_sal, + v_comm, + v_deptno; + + IF SQL%FOUND THEN + DBMS_OUTPUT.PUT_LINE('Updated Employee # : ' || v_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || v_ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || v_job); + DBMS_OUTPUT.PUT_LINE('Department : ' || v_deptno); + DBMS_OUTPUT.PUT_LINE('New Salary : ' || v_sal); + DBMS_OUTPUT.PUT_LINE('New Commission : ' || v_comm); + ELSE + DBMS_OUTPUT.PUT_LINE('Employee # ' || p_empno || ' not found'); + END IF; +END; +``` + +The following is the output from this procedure, assuming employee `9503` created by the `emp_insert` procedure still exists in the table: + +```sql +EXEC emp_comp_update(9503, 6540, 1200); +__OUTPUT__ +Updated Employee # : 9503 +Name : PETERSON +Job : ANALYST +Department : 40 +New Salary : 6540.00 +New Commission : 1200.00 +``` + +## Adding the RETURNING INTO clause using record types + +This example modifies the `emp_delete` procedure, adding the `RETURNING INTO` clause using record types: + +```sql +CREATE OR REPLACE PROCEDURE emp_delete ( + p_empno IN emp.empno%TYPE +) +IS + r_emp emp%ROWTYPE; +BEGIN + DELETE FROM emp WHERE empno = p_empno + RETURNING + * + INTO + r_emp; + + IF SQL%FOUND THEN + DBMS_OUTPUT.PUT_LINE('Deleted Employee # : ' || r_emp.empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || r_emp.ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || r_emp.job); + DBMS_OUTPUT.PUT_LINE('Manager : ' || r_emp.mgr); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || r_emp.hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || r_emp.sal); + DBMS_OUTPUT.PUT_LINE('Commission : ' || r_emp.comm); + DBMS_OUTPUT.PUT_LINE('Department : ' || r_emp.deptno); + ELSE + DBMS_OUTPUT.PUT_LINE('Employee # ' || p_empno || ' not found'); + END IF; +END; +``` + +The following is the output from this procedure: + +```sql +EXEC emp_delete(9503); +__OUTPUT__ +Deleted Employee # : 9503 +Name : PETERSON +Job : ANALYST +Manager : 7902 +Hire Date : 31-MAR-05 00:00:00 +Salary : 6540.00 +Commission : 1200.00 +Department : 40 +``` diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/06_select_into.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/06_select_into.mdx new file mode 100644 index 00000000000..6c52566060c --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/06_select_into.mdx @@ -0,0 +1,107 @@ +--- +title: "SELECT INTO" +redirects: + - /epas/latest/epas_compat_spl/04_basic_statements/06_select_into/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/04_basic_statements/06_select_into/ +--- + + + +The `SELECT INTO` statement is an SPL variation of the SQL `SELECT` command. The differences are: + +- `SELECT INTO` assigns the results to variables or records where they can then be used in SPL program statements. +- The accessible result set of `SELECT INTO` is at most one row. + +Other than these differences, all of the clauses of the `SELECT` command, such as `WHERE`, `ORDER BY`, `GROUP BY`, and `HAVING`, are valid for `SELECT INTO`. + +## Syntax + +These examples show two variations of `SELECT INTO`: + +```sql +SELECT INTO FROM ...; +``` + +`target` is a comma-separated list of simple variables. `select_expressions` and the remainder of the statement are the same as for the `SELECT` command. The selected values must exactly match in data type, number, and order the structure of the target or a runtime error occurs. + +```sql +SELECT * INTO FROM
...; +``` + +`record` is a record variable that was previously declared. + +If the query returns zero rows, null values are assigned to the targets. If the query returns multiple rows, the first row is assigned to the targets and the rest are discarded. ("The first row" isn't well-defined unless you used `ORDER BY.`) + +!!! Note + - In either case, where no row is returned or more than one row is returned, SPL throws an exception. + + - There is a variation of `SELECT INTO` using the `BULK COLLECT` clause that allows a result set of more than one row that's returned into a collection. See [SELECT BULK COLLECT](/epas/latest/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/01_select_bulk_collect/#select_bulk_collect) for more information. + +## Including the WHEN NO_DATA_FOUND clause + +You can use the `WHEN NO_DATA_FOUND` clause in an `EXCEPTION` block to determine whether the assignment was successful, that is, at least one row was returned by the query. + +This version of the `emp_sal_query` procedure uses the variation of `SELECT INTO` that returns the result set into a record. It also uses the `EXCEPTION` block containing the `WHEN NO_DATA_FOUND` conditional expression. + +```sql +CREATE OR REPLACE PROCEDURE emp_sal_query ( + p_empno IN emp.empno%TYPE +) +IS + r_emp emp%ROWTYPE; + v_avgsal emp.sal%TYPE; +BEGIN + SELECT * INTO r_emp + FROM emp WHERE empno = p_empno; + DBMS_OUTPUT.PUT_LINE('Employee # : ' || p_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || r_emp.ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || r_emp.job); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || r_emp.hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || r_emp.sal); + DBMS_OUTPUT.PUT_LINE('Dept # : ' || r_emp.deptno); + + SELECT AVG(sal) INTO v_avgsal + FROM emp WHERE deptno = r_emp.deptno; + IF r_emp.sal > v_avgsal THEN + DBMS_OUTPUT.PUT_LINE('Employee''s salary is more than the ' + || 'department average of ' || v_avgsal); + ELSE + DBMS_OUTPUT.PUT_LINE('Employee''s salary does not exceed the ' + || 'department average of ' || v_avgsal); + END IF; +EXCEPTION + WHEN NO_DATA_FOUND THEN + DBMS_OUTPUT.PUT_LINE('Employee # ' || p_empno || ' not found'); +END; +``` + +If the query is executed with a nonexistent employee number, the results appear as follows: + +```sql +EXEC emp_sal_query(0); + +Employee # 0 not found +``` + +## Including a TOO_MANY_ROWS exception + +Another conditional clause useful in the `EXCEPTION` section with `SELECT INTO` is the `TOO_MANY_ROWS` exception. If more than one row is selected by the `SELECT INTO` statement, SPL throws an exception. + +When the following block is executed, the `TOO_MANY_ROWS` exception is thrown since there are many employees in the specified department: + +```sql +DECLARE + v_ename emp.ename%TYPE; +BEGIN + SELECT ename INTO v_ename FROM emp WHERE deptno = 20 ORDER BY ename; +EXCEPTION + WHEN TOO_MANY_ROWS THEN + DBMS_OUTPUT.PUT_LINE('More than one employee found'); + DBMS_OUTPUT.PUT_LINE('First employee returned is ' || v_ename); +END; + +More than one employee found +First employee returned is ADAMS +``` + +See [Exception handling](../05_control_structures/07_exception_handling/#exception_handling) for information on exception handling. diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/07_update.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/07_update.mdx new file mode 100644 index 00000000000..6bd8e34c57f --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/07_update.mdx @@ -0,0 +1,54 @@ +--- +title: "UPDATE" +redirects: + - /epas/latest/epas_compat_spl/04_basic_statements/07_update/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/04_basic_statements/07_update/ +--- + + + +You can use the `UPDATE` command available in the SQL language in SPL programs. + +You can use an expression in the SPL language wherever an expression is allowed in the SQL `UPDATE` command. Thus, you can use SPL variables and parameters to supply values to the update operation. + +```sql +CREATE OR REPLACE PROCEDURE emp_comp_update ( + p_empno IN emp.empno%TYPE, + p_sal IN emp.sal%TYPE, + p_comm IN emp.comm%TYPE +) +IS +BEGIN + UPDATE emp SET sal = p_sal, comm = p_comm WHERE empno = p_empno; + + IF SQL%FOUND THEN + DBMS_OUTPUT.PUT_LINE('Updated Employee # : ' || p_empno); + DBMS_OUTPUT.PUT_LINE('New Salary : ' || p_sal); + DBMS_OUTPUT.PUT_LINE('New Commission : ' || p_comm); + ELSE + DBMS_OUTPUT.PUT_LINE('Employee # ' || p_empno || ' not found'); + END IF; +END; +``` + +The `SQL%FOUND` conditional expression returns `TRUE` if a row is updated, `FALSE` otherwise. See [Obtaining the result status](08_obtaining_the_result_status/#obtaining_the_result_status) for a discussion of `SQL%FOUND` and other similar expressions. + +This example shows the update on the employee: + +```sql +EXEC emp_comp_update(9503, 6540, 1200); + +Updated Employee # : 9503 +New Salary : 6540 +New Commission : 1200 + +SELECT * FROM emp WHERE empno = 9503; +__OUTPUT__ +empno | ename | job | mgr | hiredate | sal | comm | deptno +------+--------+--------+------+-------------------+--------+-------+------- + 9503 |PETERSON| ANALYST| 7902 | 31-MAR-05 00:00:00|6540.00 |1200.00| 40 +(1 row) +``` + +!!! Note + You can include the `UPDATE` command in a `FORALL` statement. A `FORALL` statement allows a single `UPDATE` command to update multiple rows from values supplied in one or more collections. See [Using the FORALL statement](/epas/latest/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement/#using_the_forall_statement) for more information. diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/08_obtaining_the_result_status.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/08_obtaining_the_result_status.mdx new file mode 100644 index 00000000000..c890897d3b6 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/08_obtaining_the_result_status.mdx @@ -0,0 +1,48 @@ +--- +title: "Obtaining the result status" +redirects: + - /epas/latest/epas_compat_spl/04_basic_statements/08_obtaining_the_result_status/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/04_basic_statements/08_obtaining_the_result_status/ +--- + + + +You can use several attributes to determine the effect of a command. `SQL%FOUND` is a Boolean that returns `TRUE` if at least one row was affected by an `INSERT`, `UPDATE` or `DELETE` command or a `SELECT INTO` command retrieved one or more rows. + +This anonymous block inserts a row and then displays the fact that the row was inserted: + +```sql +BEGIN + INSERT INTO emp (empno,ename,job,sal,deptno) VALUES ( + 9001, 'JONES', 'CLERK', 850.00, 40); + IF SQL%FOUND THEN + DBMS_OUTPUT.PUT_LINE('Row has been inserted'); + END IF; +END; + +Row has been inserted +``` + +`SQL%ROWCOUNT` provides the number of rows affected by an `INSERT`, `UPDATE`, `DELETE`, or `SELECT INTO` command. The `SQL%ROWCOUNT` value is returned as a `BIGINT` data type. The following example updates the row that was just inserted and displays `SQL%ROWCOUNT`: + +```sql +BEGIN + UPDATE emp SET hiredate = '03-JUN-07' WHERE empno = 9001; + DBMS_OUTPUT.PUT_LINE('# rows updated: ' || SQL%ROWCOUNT); +END; + +# rows updated: 1 +``` + +`SQL%NOTFOUND` is the opposite of `SQL%FOUND`. `SQL%NOTFOUND` returns `TRUE` if no rows were affected by an `INSERT`, `UPDATE` or `DELETE` command or a `SELECT INTO` command retrieved no rows. + +```sql +BEGIN + UPDATE emp SET hiredate = '03-JUN-07' WHERE empno = 9000; + IF SQL%NOTFOUND THEN + DBMS_OUTPUT.PUT_LINE('No rows were updated'); + END IF; +END; + +No rows were updated +``` diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/index.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/index.mdx new file mode 100644 index 00000000000..4488f2a3dc4 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/04_basic_statements/index.mdx @@ -0,0 +1,21 @@ +--- +title: "Types of programming statements" +indexCards: simple +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.061.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.155.html" +redirects: + - /epas/latest/epas_compat_spl/04_basic_statements/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/04_basic_statements/ +--- + + + +You can use several programming statements in an SPL program. + +
+ +assignment delete insert null using_the_returning_into_clause select_into update obtaining_the_result_status + +
diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/01_if_then.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/01_if_then.mdx new file mode 100644 index 00000000000..4063d33e5d6 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/01_if_then.mdx @@ -0,0 +1,57 @@ +--- +title: "IF-THEN" +redirects: + - /epas/latest/epas_compat_spl/05_control_structures/01_if_statement/01_if_then/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/05_control_structures/01_if_statement/01_if_then/ +--- + + + +## Syntax + +```sql +IF boolean-expression THEN + +END IF; +``` + +`IF-THEN` statements are the simplest form of `IF`. The statements between `THEN` and `END IF` are executed if the condition is `TRUE`. Otherwise, they are skipped. + +## Example + +This example uses `IF-THEN` statement to test and display employees who have a commission: + +```sql +DECLARE + v_empno emp.empno%TYPE; + v_comm emp.comm%TYPE; + CURSOR emp_cursor IS SELECT empno, comm FROM emp; +BEGIN + OPEN emp_cursor; + DBMS_OUTPUT.PUT_LINE('EMPNO COMM'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH emp_cursor INTO v_empno, v_comm; + EXIT WHEN emp_cursor%NOTFOUND; +-- +-- Test whether or not the employee gets a commission +-- + IF v_comm IS NOT NULL AND v_comm > 0 THEN + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || + TO_CHAR(v_comm,'$99999.99')); + END IF; + END LOOP; + CLOSE emp_cursor; +END; +``` + +The following is the output from this program: + +```sql +__OUTPUT__ +EMPNO COMM +----- ------- +7499 $300.00 +7521 $500.00 +7654 $1400.00 +``` diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/02_if_then_else.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/02_if_then_else.mdx new file mode 100644 index 00000000000..43036e577cb --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/02_if_then_else.mdx @@ -0,0 +1,72 @@ +--- +title: "IF-THEN-ELSE" +redirects: + - /epas/latest/epas_compat_spl/05_control_structures/01_if_statement/02_if_then_else/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/05_control_structures/01_if_statement/02_if_then_else/ +--- + + + +## Syntax + +```sql +IF boolean-expression THEN + +ELSE + +END IF; +``` + +`IF-THEN-ELSE` statements add to `IF-THEN` by letting you specify an alternative set of statements to execute if the condition evaluates to false. + +## Example + +This example shows an `IF-THEN-ELSE` statement being used to display the text `Non-commission` if an employee doesn't get a commission: + +```sql +DECLARE + v_empno emp.empno%TYPE; + v_comm emp.comm%TYPE; + CURSOR emp_cursor IS SELECT empno, comm FROM emp; +BEGIN + OPEN emp_cursor; + DBMS_OUTPUT.PUT_LINE('EMPNO COMM'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH emp_cursor INTO v_empno, v_comm; + EXIT WHEN emp_cursor%NOTFOUND; +-- +-- Test whether or not the employee gets a commission +-- + IF v_comm IS NOT NULL AND v_comm > 0 THEN + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || + TO_CHAR(v_comm,'$99999.99')); + ELSE + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || 'Non-commission'); + END IF; + END LOOP; + CLOSE emp_cursor; +END; +``` + +The following is the output from this program: + +```sql +__OUTPUT__ +EMPNO COMM +----- ------- +7369 Non-commission +7499 $ 300.00 +7521 $ 500.00 +7566 Non-commission +7654 $ 1400.00 +7698 Non-commission +7782 Non-commission +7788 Non-commission +7839 Non-commission +7844 Non-commission +7876 Non-commission +7900 Non-commission +7902 Non-commission +7934 Non-commission +``` diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/03_if_then_else_if.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/03_if_then_else_if.mdx new file mode 100644 index 00000000000..42ee162582d --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/03_if_then_else_if.mdx @@ -0,0 +1,92 @@ +--- +title: "IF-THEN-ELSE IF" +redirects: + - /epas/latest/epas_compat_spl/05_control_structures/01_if_statement/03_if_then_else_if/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/05_control_structures/01_if_statement/03_if_then_else_if/ +--- + + + +You can nest `IF` statements. This allows you to invoke alternative `IF` statements once it's determined whether the conditional of an outer `IF` statement is `TRUE` or `FALSE`. + +In this example, the outer `IF-THEN-ELSE` statement tests whether an employee has a commission. The inner `IF-THEN-ELSE` statements then test whether the employee’s total compensation exceeds or is less than the company average. + +!!! Note + The logic in this program can be simplified by calculating the employee’s yearly compensation using the `NVL` function in the `SELECT` command of the cursor declaration. However, the purpose of this example is to show the use of `IF` statements. + +```text +DECLARE + v_empno emp.empno%TYPE; + v_sal emp.sal%TYPE; + v_comm emp.comm%TYPE; + v_avg NUMBER(7,2); + CURSOR emp_cursor IS SELECT empno, sal, comm FROM emp; +BEGIN +-- +-- Calculate the average yearly compensation in the company +-- + SELECT AVG((sal + NVL(comm,0)) * 24) INTO v_avg FROM emp; + DBMS_OUTPUT.PUT_LINE('Average Yearly Compensation: ' || + TO_CHAR(v_avg,'$999,999.99')); + OPEN emp_cursor; + DBMS_OUTPUT.PUT_LINE('EMPNO YEARLY COMP'); + DBMS_OUTPUT.PUT_LINE('----- -----------'); + LOOP + FETCH emp_cursor INTO v_empno, v_sal, v_comm; + EXIT WHEN emp_cursor%NOTFOUND; +-- +-- Test whether or not the employee gets a commission +-- + IF v_comm IS NOT NULL AND v_comm > 0 THEN +-- +-- Test if the employee's compensation with commission exceeds the average +-- + IF (v_sal + v_comm) * 24 > v_avg THEN + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || + TO_CHAR((v_sal + v_comm) * 24,'$999,999.99') || ' Exceeds Average'); + ELSE + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || + TO_CHAR((v_sal + v_comm) * 24,'$999,999.99') || ' Below Average'); + END IF; + ELSE +-- +-- Test if the employee's compensation without commission exceeds the +average +-- + IF v_sal * 24 > v_avg THEN + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || + TO_CHAR(v_sal * 24,'$999,999.99') || ' Exceeds Average'); + ELSE + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || + TO_CHAR(v_sal * 24,'$999,999.99') || ' Below Average'); + END IF; + END IF; + END LOOP; + CLOSE emp_cursor; +END; +``` + +The following is the output from this program: + +```sql +__OUTPUT__ +Average Yearly Compensation: $ 53,528.57 +EMPNO YEARLY COMP +----- ----------- +7369 $ 19,200.00 Below Average +7499 $ 45,600.00 Below Average +7521 $ 42,000.00 Below Average +7566 $ 71,400.00 Exceeds Average +7654 $ 63,600.00 Exceeds Average +7698 $ 68,400.00 Exceeds Average +7782 $ 58,800.00 Exceeds Average +7788 $ 72,000.00 Exceeds Average +7839 $ 120,000.00 Exceeds Average +7844 $ 36,000.00 Below Average +7876 $ 26,400.00 Below Average +7900 $ 22,800.00 Below Average +7902 $ 72,000.00 Exceeds Average +7934 $ 31,200.00 Below Average +``` + +When you use this form, you're actually nesting an `IF` statement inside the `ELSE` part of an outer `IF` statement. Thus you need one `END IF` statement for each nested `IF` and one for the parent `IF-ELSE`. diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/04_if_then_elseif_else.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/04_if_then_elseif_else.mdx new file mode 100644 index 00000000000..5be6ab73913 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/04_if_then_elseif_else.mdx @@ -0,0 +1,77 @@ +--- +title: "IF-THEN-ELSIF-ELSE" +redirects: + - /epas/latest/epas_compat_spl/05_control_structures/01_if_statement/04_if_then_elseif_else/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/05_control_structures/01_if_statement/04_if_then_elseif_else/ +--- + + + +## Syntax + +```sql +IF boolean-expression THEN + +[ ELSIF boolean-expression THEN + +[ ELSIF boolean-expression THEN + ] ...] +[ ELSE + ] +END IF; +``` + +`IF-THEN-ELSIF-ELSE` provides a method of checking many alternatives in one statement. Formally it is equivalent to nested `IF-THEN-ELSE-IF-THEN` commands, but only one `END IF` is needed. + +## Example + +The following example uses an `IF-THEN-ELSIF-ELSE` statement to count the number of employees by compensation ranges of $25,000: + +```sql +DECLARE + v_empno emp.empno%TYPE; + v_comp NUMBER(8,2); + v_lt_25K SMALLINT := 0; + v_25K_50K SMALLINT := 0; + v_50K_75K SMALLINT := 0; + v_75K_100K SMALLINT := 0; + v_ge_100K SMALLINT := 0; + CURSOR emp_cursor IS SELECT empno, (sal + NVL(comm,0)) * 24 FROM emp; +BEGIN + OPEN emp_cursor; + LOOP + FETCH emp_cursor INTO v_empno, v_comp; + EXIT WHEN emp_cursor%NOTFOUND; + IF v_comp < 25000 THEN + v_lt_25K := v_lt_25K + 1; + ELSIF v_comp < 50000 THEN + v_25K_50K := v_25K_50K + 1; + ELSIF v_comp < 75000 THEN + v_50K_75K := v_50K_75K + 1; + ELSIF v_comp < 100000 THEN + v_75K_100K := v_75K_100K + 1; + ELSE + v_ge_100K := v_ge_100K + 1; + END IF; + END LOOP; + CLOSE emp_cursor; + DBMS_OUTPUT.PUT_LINE('Number of employees by yearly compensation'); + DBMS_OUTPUT.PUT_LINE('Less than 25,000 : ' || v_lt_25K); + DBMS_OUTPUT.PUT_LINE('25,000 - 49,9999 : ' || v_25K_50K); + DBMS_OUTPUT.PUT_LINE('50,000 - 74,9999 : ' || v_50K_75K); + DBMS_OUTPUT.PUT_LINE('75,000 - 99,9999 : ' || v_75K_100K); + DBMS_OUTPUT.PUT_LINE('100,000 and over : ' || v_ge_100K); +END; +``` + +The following is the output from this program: + +```sql +__OUTPUT__ +Number of employees by yearly compensation +Less than 25,000 : 2 +25,000 - 49,9999 : 5 +50,000 - 74,9999 : 6 +75,000 - 99,9999 : 0 +100,000 and over : 1 +``` diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/index.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/index.mdx new file mode 100644 index 00000000000..58abf3ff2d7 --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/01_if_statement/index.mdx @@ -0,0 +1,22 @@ +--- +title: "IF statement" +indexCards: simple +redirects: + - /epas/latest/epas_compat_spl/05_control_structures/01_if_statement/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/05_control_structures/01_if_statement/ +--- + + + +`IF` statements let you execute commands based on certain conditions. SPL has four forms of `IF`: + +- `IF ... THEN` +- `IF ... THEN ... ELSE` +- `IF ... THEN ... ELSE IF` +- `IF ... THEN ... ELSIF ... THEN ... ELSE` + +
+ +if_then if_then_else if_then_else_if if_then_elseif_else + +
diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/02_return_statement.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/02_return_statement.mdx new file mode 100644 index 00000000000..2cefb6d86dd --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/02_return_statement.mdx @@ -0,0 +1,45 @@ +--- +title: "RETURN statement" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.063.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.157.html" +redirects: + - /epas/latest/epas_compat_spl/05_control_structures/02_return_statement/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/05_control_structures/02_return_statement/ +--- + + + +The `RETURN` statement terminates the current function, procedure, or anonymous block and returns control to the caller. + +## Syntax + +The `RETURN` statement has two forms. The first form of the `RETURN` statement terminates a procedure or function that returns `void`. The syntax of this form is: + +```sql +RETURN; +``` + +The second form of `RETURN` returns a value to the caller. The syntax of this form is: + +```sql +RETURN ; +``` + +`expression` must evaluate to the same data type as the return type of the function. + +## Example + +This example uses the `RETURN` statement and returns a value to the caller: + +```sql +CREATE OR REPLACE FUNCTION emp_comp ( + p_sal NUMBER, + p_comm NUMBER +) RETURN NUMBER +IS +BEGIN + RETURN (p_sal + NVL(p_comm, 0)) * 24; +END emp_comp; +``` diff --git a/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/03_goto_statement.mdx b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/03_goto_statement.mdx new file mode 100644 index 00000000000..95b9fab493d --- /dev/null +++ b/product_docs/docs/epas/17/reference/application_programmer_reference/stored_procedural_language_reference/05_control_structures/03_goto_statement.mdx @@ -0,0 +1,101 @@ +--- +title: "GOTO statement" +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.064.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.5/Database_Compatibility_for_Oracle_Developers_Guide.1.158.html" +redirects: + - /epas/latest/epas_compat_spl/05_control_structures/03_goto_statement/ #generated for docs/epas/reorg-role-use-case-mode + - /epas/latest/application_programming/epas_compat_spl/05_control_structures/03_goto_statement/ +--- + + + +The `GOTO` statement causes the point of execution to jump to the statement with the specified label. + +## Syntax + +The syntax of a `GOTO` statement is: + +```sql +GOTO
+
ComponentVersionRelease NoteAddresses
-
ComponentVersionDescriptionAddresses
BDR5.6.0
Decoding Worker supports Streaming Transactions

One of the main advantages of streaming is that the WAL sender sends the partial transaction before it commits, which reduces replication lag. Now, with streaming support, the WAL decoder does the same thing, but it streams to the LCRs segments. Eventually, the WAL sender will read the LCRs and mimic the same behavior of streaming large transactions before they commit. This provides the benefits of decoding worker, such as reduced CPU and disk space, as well as the benefits of streaming, such as reduced lag and disk space, since ".spill" files are not generated. The WAL decoder always streams the transaction to LCRs, but based on downstream requests, the WAL sender either streams the transaction or just mimics the normal BEGIN..COMMIT scenario. In addition to the normal LCRs segment files, we create streaming files with the starting names TR_TXN_<file-name-format> and CAS_TXN_<file-name-format> for each streamed transaction.

@@ -120,14 +117,14 @@ any node in a subgroup and does not need to be redefined for every subgroup anymore. This is particularly useful when combined with ORIGIN\_GROUP keyword to reduce the complexity of commit scope setup.

CLI5.6.0
Use bdr.bdr_file_settings view in verify-settings

Use bdr.bdr_file_settings view to get the current settings for the proxy.

+
PGD CLI5.6.0
Use bdr.bdr_file_settings view in verify-settings

Use bdr.bdr_file_settings view to get the current settings for the proxy.

## Bug Fixes - +
ComponentVersionRelease NoteAddresses
diff --git a/product_docs/docs/pgd/5.6/rel_notes/pgd_5.6.1_rel_notes.mdx b/product_docs/docs/pgd/5.6/rel_notes/pgd_5.6.1_rel_notes.mdx new file mode 100644 index 00000000000..6f3d065a545 --- /dev/null +++ b/product_docs/docs/pgd/5.6/rel_notes/pgd_5.6.1_rel_notes.mdx @@ -0,0 +1,90 @@ +--- +title: EDB Postgres Distributed 5.6.1 release notes +navTitle: Version 5.6.1 +--- + +Released: 25 November 2024 + +EDB Postgres Distributed 5.6.1 includes a number of enhancements and bug fixes. + +## Highlights + +- Postgres 17 support +- ARM64 processor support + +## Features + +
ComponentVersionDescriptionAddresses
BDR5.6.0
Fixed buffer overrun in the writer

Include an extra zero byte at the end of a column value allocation in shared memory queue insert/update/delete messages.

98966
BDR5.6.0Fixes for some race conditions to prevent node sync from entering a hung state with the main subscription disabled.
+ + +
ComponentVersionDescriptionAddresses
BDR5.6.1
Added Postgres 17 support

Support for Postgres 17 has been added for all flavors (PostgreSQL, EDB Postgres Extended, +and EDB Postgres Advanced Server) starting with version 17.2.

+
BDR5.6.1
Added ARM64 processor Support

Support ARM architecture for EDB Postgres Distributed on Debian 12 and RHEL 9.

+
+ + +## Enhancements + + + + + + +
ComponentVersionDescriptionAddresses
BDR5.6.1
Added bdr.wait_node_confirm_lsn().

The function bdr.wait_node_confirm_lsn() has been introduced to wait until a specific node +reaches a designated Log Sequence Number (LSN). It first checks the confirmed_flush_lsn of +the replication slot for the specified node. If that information is not available, the function +connects to the node and queries pg_replication_origin_progress(), using the invoking node as +the origin. +If the nodename parameter is NULL, the function will wait for all nodes to reach the specified +LSN. If the target LSN is NULL, it will wait for the current wal_flush_lsn.

+
BDR5.6.1
Improvements made in SO Node Management and Progress Tracking.

An update addresses the movement of group slots in SO nodes, ensuring they don't appear as peers in +progress updates. Improvements include enhanced watermark management for SO leaders in the Optimized Topology +configuration, where write leaders now include watermarks in their updates. Watermarks are broadcasted +to simplify progress tracking on idle clusters. The peer progress mapping for SO nodes has been corrected, +and the tap test for group slot movement has been revised. +Additionally, the bdr_get_all_origins function now considers SO node origins.

+
BDR5.6.1
LSN Progress in Optimized Topology Configurations is now communicated.

While there are no connections from non-leader data nodes to subscriber-only nodes in an optimized +topology configuration, the LSN progress of all data nodes is periodically communicated to these +subscriber-only nodes through logical replication.

+
BDR5.6.1
Some DDL commands are now allowed by bdr.permit_unsafe_commands when set.

The bdr.permit_unsafe_commands parameter now allows some DDL commands that were previously disallowed. Specifically ALTER COLUMN...TYPE...USING can now be permitted if the user knows the operation is safe.

+
+ + +## Bug Fixes + + + + + + + + + + +
ComponentVersionDescriptionAddresses
BDR5.6.1
Addressed walsender crash that happend during configuration reload.

Ensure that pglogical GUCs are overridden only when operating within the pglogical worker. +If this is not the case, MyPGLogicalWorker will be NULL, resulting in a segmentation fault +when the walsender attempts a configuration reload from the +pgl_wait_for_standby_confirmation() function.

+
42100
BDR5.6.1
Fixed unintended eager connection related to consensus connections among Subscriber Only group members

The msgbroker module used to establish consensus connections lazily, meaning that connections +were created only when the first message was sent to a specific destination. This method +negatively affected the latency of Raft leader elections. The behavior was modified to create +connections to consensus peers eagerly. However, this change resulted in an unintended +consequence: a fully meshed consensus network among subscriber-only nodes, which may conflict +with customer network designs. This patch keeps the eager connection setup but limits it to +voting nodes only, reverting to a lazy connection setup for non-voting nodes.

+
42041
BDR5.6.1
Fixed autopatition task scheduling.

To improve reliability, shuffle the scheduling of autopartition tasks. This way, tasks +that are prone to failure won't consistently impact the success of other tasks.

+
41998
BDR5.6.1
Fixed parting subscription with standbys.

The parting subscription used to hang, failing to wait for standbys when the +bdr.standby_slot_names parameter was defined.

+
41821
BDR5.6.1
Fixed parting SO node with multiple origins.

All relevant origins must be removed when parting SO node. +With Optimized Topology, parting an SO node should result in removing all origins it +has, not just the one related to its SO group leader. +When parting a data node, even though there is no subscription to it +from SO node, the origin should be removed. +DO not make SO node target of a part catchup subscription when Optimized Topology is enabled.

+
BDR5.6.1
Stopped creation of slots for subscriber only nodes on witness nodes.

Subscriber only nodes should not have slots on witness nodes.

+
BDR5.6.1
Ensure no waiting for DEGRADE timeout when in an already degraded state.

When using commit scope with DEGRADE clause, if system detects that it's in degraded state, transactions should start in the DEGRADE mode. This ensures that the timeout is not applied on every commit.

+
PGD Proxy5.6.1
Fixed routing strategy for read nodes.

Corrected routing strategy for read nodes after a network partition.

+
+ + diff --git a/product_docs/docs/pgd/5.6/rel_notes/src/meta.yml b/product_docs/docs/pgd/5.6/rel_notes/src/meta.yml new file mode 100644 index 00000000000..bfda8f205ad --- /dev/null +++ b/product_docs/docs/pgd/5.6/rel_notes/src/meta.yml @@ -0,0 +1,24 @@ +product: EDB Postgres Distributed +shortname: pgd +title: EDB Postgres Distributed 5.6+ release notes +description: Release notes for EDB Postgres Distributed 5.6 and later +intro: | + The EDB Postgres Distributed documentation describes the latest version of EDB Postgres Distributed 5, including minor releases and patches. The release notes provide information on what was new in each release. For new functionality introduced in a minor or patch release, the content also indicates the release that introduced the feature. +columns: +- 0: + label: Release Date + key: shortdate +- 1: + label: "EDB Postgres Distributed" + key: version-link +- 2: + label: "BDR extension" + key: $bdrextension +- 3: + label: "PGD CLI" + key: $pgdcli +- 4: + label: "PGD Proxy" + key: $pgdproxy +components: [ "BDR", "PGD CLI", "PGD Proxy", "Utilities" ] + diff --git a/product_docs/docs/pgd/5.6/rel_notes/src/relnote_5.6.0.yml b/product_docs/docs/pgd/5.6/rel_notes/src/relnote_5.6.0.yml index 6ddb9ac5004..14347451483 100644 --- a/product_docs/docs/pgd/5.6/rel_notes/src/relnote_5.6.0.yml +++ b/product_docs/docs/pgd/5.6/rel_notes/src/relnote_5.6.0.yml @@ -1,6 +1,10 @@ product: EDB Postgres Distributed version: 5.6.0 date: 15 October 2024 +meta: + bdrextension: 5.6.0 + pgdcli: 5.6.0 + pgdproxy: 5.6.0 intro: | EDB Postgres Distributed 5.6.0 includes a number of enhancements and bug fixes. highlights: | @@ -415,7 +419,7 @@ relnotes: severity: High impact: High - relnote: Use bdr.bdr_file_settings view in verify-settings - component: CLI + component: PGD CLI component_version: 5.6.0 details: | Use bdr.bdr_file_settings view to get the current settings for the proxy. diff --git a/product_docs/docs/pgd/5.6/rel_notes/src/relnote_5.6.1.yml b/product_docs/docs/pgd/5.6/rel_notes/src/relnote_5.6.1.yml new file mode 100644 index 00000000000..9091df27928 --- /dev/null +++ b/product_docs/docs/pgd/5.6/rel_notes/src/relnote_5.6.1.yml @@ -0,0 +1,183 @@ +product: EDB Postgres Distributed +version: 5.6.1 +date: 25 November 2024 +meta: + bdrextension: 5.6.1 + pgdcli: 5.6.1 + pgdproxy: 5.6.1 +intro: | + EDB Postgres Distributed 5.6.1 includes a number of enhancements and bug fixes. +highlights: | + - Postgres 17 support + - ARM64 processor support +relnotes: +- relnote: Added Postgres 17 support + component: BDR + component_version: 5.6.1 + details: | + Support for Postgres 17 has been added for all flavors (PostgreSQL, EDB Postgres Extended, + and EDB Postgres Advanced Server) starting with version 17.2. + jira: BDR-5410 + addresses: "" + type: Feature + severity: High + impact: High +- relnote: Added ARM64 processor Support + component: BDR + component_version: 5.6.1 + details: | + Support ARM architecture for EDB Postgres Distributed on Debian 12 and RHEL 9. + jira: BDR-5410 + addresses: "" + type: Feature + severity: High + impact: High +- relnote: Addressed walsender crash that happend during configuration reload. + component: BDR + component_version: 5.6.1 + details: | + Ensure that pglogical GUCs are overridden only when operating within the pglogical worker. + If this is not the case, MyPGLogicalWorker will be NULL, resulting in a segmentation fault + when the walsender attempts a configuration reload from the + pgl_wait_for_standby_confirmation() function. + jira: BDR-5661 + addresses: "42100" + type: Bug-fix + severity: High + impact: High +- relnote: Fixed unintended eager connection related to consensus connections among Subscriber Only group members + component: BDR + component_version: 5.6.1 + details: | + The msgbroker module used to establish consensus connections lazily, meaning that connections + were created only when the first message was sent to a specific destination. This method + negatively affected the latency of Raft leader elections. The behavior was modified to create + connections to consensus peers eagerly. However, this change resulted in an unintended + consequence: a fully meshed consensus network among subscriber-only nodes, which may conflict + with customer network designs. This patch keeps the eager connection setup but limits it to + voting nodes only, reverting to a lazy connection setup for non-voting nodes. + jira: BDR-5666 + addresses: "42041" + type: Bug-fix + severity: High + impact: High +- relnote: Fixed autopatition task scheduling. + component: BDR + component_version: 5.6.1 + details: | + To improve reliability, shuffle the scheduling of autopartition tasks. This way, tasks + that are prone to failure won't consistently impact the success of other tasks. + jira: BDR-5638 + addresses: "41998" + type: Bug-fix + severity: High + impact: High +- relnote: Fixed parting subscription with standbys. + component: BDR + component_version: 5.6.1 + details: | + The parting subscription used to hang, failing to wait for standbys when the + bdr.standby_slot_names parameter was defined. + jira: BDR-5658 + addresses: "41821" + type: Bug-fix + severity: High + impact: High +- relnote: Added `bdr.wait_node_confirm_lsn()`. + component: BDR + component_version: 5.6.1 + details: | + The function `bdr.wait_node_confirm_lsn()` has been introduced to wait until a specific node + reaches a designated Log Sequence Number (LSN). It first checks the `confirmed_flush_lsn` of + the replication slot for the specified node. If that information is not available, the function + connects to the node and queries `pg_replication_origin_progress()`, using the invoking node as + the origin. + If the `nodename` parameter is NULL, the function will wait for all nodes to reach the specified + LSN. If the `target` LSN is NULL, it will wait for the current `wal_flush_lsn`. + jira: BDR-5200 + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: Improvements made in SO Node Management and Progress Tracking. + component: BDR + component_version: 5.6.1 + details: | + An update addresses the movement of group slots in SO nodes, ensuring they don't appear as peers in + progress updates. Improvements include enhanced watermark management for SO leaders in the Optimized Topology + configuration, where write leaders now include watermarks in their updates. Watermarks are broadcasted + to simplify progress tracking on idle clusters. The peer progress mapping for SO nodes has been corrected, + and the tap test for group slot movement has been revised. + Additionally, the `bdr_get_all_origins` function now considers SO node origins. + jira: BDR-5549 + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: LSN Progress in Optimized Topology Configurations is now communicated. + component: BDR + component_version: 5.6.1 + details: | + While there are no connections from non-leader data nodes to subscriber-only nodes in an optimized + topology configuration, the LSN progress of all data nodes is periodically communicated to these + subscriber-only nodes through logical replication. + jira: BDR-5549 + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: Fixed parting SO node with multiple origins. + component: BDR + component_version: 5.6.1 + details: | + All relevant origins must be removed when parting SO node. + With Optimized Topology, parting an SO node should result in removing all origins it + has, not just the one related to its SO group leader. + When parting a data node, even though there is no subscription to it + from SO node, the origin should be removed. + DO not make SO node target of a part catchup subscription when Optimized Topology is enabled. + jira: BDR-5552 + addresses: "" + type: Bug-fix + severity: High + impact: High +- relnote: Stopped creation of slots for subscriber only nodes on witness nodes. + component: BDR + component_version: 5.6.1 + details: | + Subscriber only nodes should not have slots on witness nodes. + jira: BDR-5618 + addresses: "" + type: Bug-fix + severity: High + impact: High +- relnote: Some DDL commands are now allowed by `bdr.permit_unsafe_commands` when set. + component: BDR + component_version: 5.6.1 + details: | + The `bdr.permit_unsafe_commands` parameter now allows some DDL commands that were previously disallowed. Specifically `ALTER COLUMN...TYPE...USING` can now be permitted if the user knows the operation is safe. + jira: "" + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: Ensure no waiting for DEGRADE timeout when in an already degraded state. + component: BDR + component_version: 5.6.1 + details: | + When using commit scope with DEGRADE clause, if system detects that it's in degraded state, transactions should start in the DEGRADE mode. This ensures that the timeout is not applied on every commit. + jira: BDR-5651 + addresses: "" + type: Bug-fix + severity: High + impact: High +- relnote: Fixed routing strategy for read nodes. + component: PGD Proxy + component_version: 5.6.1 + details: | + Corrected routing strategy for read nodes after a network partition. + jira: BDR-5216 + addresses: "" + type: Bug-fix + severity: Medium + impact: Medium diff --git a/product_docs/docs/pgd/5.6/scaling.mdx b/product_docs/docs/pgd/5.6/scaling.mdx index 2f064cee977..49bb625b580 100644 --- a/product_docs/docs/pgd/5.6/scaling.mdx +++ b/product_docs/docs/pgd/5.6/scaling.mdx @@ -1,6 +1,6 @@ --- title: PGD AutoPartition -description: How to use autopartioning in PGD to split tables into several partitions. +description: How to use autopartitioning in PGD to split tables into several partitions. redirects: - ../bdr/scaling --- @@ -184,3 +184,21 @@ to enable autopartitioning on the given table. If autopartitioning is already enabled, then no action occurs. Similarly, use [`bdr.autopartition_disable()`](/pgd/latest/reference/autopartition#bdrautopartition_disable) to disable autopartitioning on the given table. + +## Restrictions on EDB Postgres Advanced Server-native automatic partitioning + +EDB Postgres Advanced Server-native automatic partitioning is not supported in PGD. + +If the PGD extension is active on an EDB Postgres Advanced Server database, DDL commands to configure +EDB Postgres Advanced Server automatic partitioning (`ALTER TABLE ... SET AUTOMATIC` and `ALTER TABLE ... SET INTERVAL`) +are rejected. + +While it's possible to enable the PGD extension on an EDB Postgres Advanced Server database +containing tables configured to use EDB Postgres Advanced Server-native automatic partitioning, it +isn't possible to join more nodes using this node as a source node. + +You can disable EDB Postgres Advanced Server-native automatic partitioning with one of the following +commands: + +- `ALTER TABLE ... SET MANUAL` (for list partitioned tables) +- `ALTER TABLE ... SET INTERVAL ()` (for interval partitioned tables) diff --git a/product_docs/docs/pgd/5.6/security/pgd-predefined-roles.mdx b/product_docs/docs/pgd/5.6/security/pgd-predefined-roles.mdx index 5e735aa2107..97e74fae2a0 100644 --- a/product_docs/docs/pgd/5.6/security/pgd-predefined-roles.mdx +++ b/product_docs/docs/pgd/5.6/security/pgd-predefined-roles.mdx @@ -16,6 +16,7 @@ This is a role for an admin user that can manage anything PGD related. It allows - ALL PRIVILEGES ON ALL TABLES IN SCHEMA BDR - ALL PRIVILEGES ON ALL ROUTINES IN SCHEMA BDR + ### bdr_read_all_stats This role provides read access to most of the tables, views, and functions that users or applications may need to observe the statistics and state of the PGD cluster. @@ -150,6 +151,7 @@ This role is designed for applications that require access to PGD features, obje - [`bdr.trigger_get_xid`](/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_xid) - [`bdr.wait_for_camo_partner_queue`](/pgd/latest/reference/functions#bdrwait_for_camo_partner_queue) - [`bdr.wait_slot_confirm_lsn`](/pgd/latest/reference/functions#bdrwait_slot_confirm_lsn) +- [`bdr.wait_node_confirm_lsn`](/pgd/latest/reference/functions#bdrwait_node_confirm_lsn) Many of these functions require additional privileges before you can use them. For example, you must be the table owner to successfully execute diff --git a/product_docs/docs/pgd/5.6/upgrades/compatibility.mdx b/product_docs/docs/pgd/5.6/upgrades/compatibility.mdx index d1ef52660d1..ac30086652a 100644 --- a/product_docs/docs/pgd/5.6/upgrades/compatibility.mdx +++ b/product_docs/docs/pgd/5.6/upgrades/compatibility.mdx @@ -29,7 +29,7 @@ The `global` scope no longer exists. To create scope with the same behavior, use [Group Commit](../commit-scopes/group-commit). ```sql -SELECT bdr.add_commit_scope( +SELECT bdr.create_commit_scope( commit_scope_name := 'eager_scope', origin_node_group := 'top_group', rule := 'ALL (top_group) GROUP COMMIT (conflict_resolution = eager, commit_decision = raft) ABORT ON (timeout = 60s)', diff --git a/product_docs/docs/pgd/5/durability/group-commit.mdx b/product_docs/docs/pgd/5/durability/group-commit.mdx index 4ad0ce83d87..c13c42aeb75 100644 --- a/product_docs/docs/pgd/5/durability/group-commit.mdx +++ b/product_docs/docs/pgd/5/durability/group-commit.mdx @@ -15,6 +15,12 @@ node to successfully confirm a transaction at COMMIT time. Confirmation can be s at a number of points in the transaction processing but defaults to "visible" when the transaction has been flushed to disk and is visible to all other transactions. +!!! Warning + Group commit is currently offered as an experimental feature intended for preview and + evaluation purposes. While it provides valuable capabilities, it has known limitations + and challenges that make it unsuitable for production environments. We recommend that + customers avoid using this feature in production scenarios until these limitations + are addressed in future releases. ## Example diff --git a/product_docs/docs/pgd/5/overview/index.mdx b/product_docs/docs/pgd/5/overview/index.mdx index 190e12f9b55..d4bb1924ac1 100644 --- a/product_docs/docs/pgd/5/overview/index.mdx +++ b/product_docs/docs/pgd/5/overview/index.mdx @@ -100,7 +100,7 @@ In the future, one node will be elected as the main replicator to other groups, ### Supported Postgres database servers -PGD is compatible with [PostgreSQL](https://www.postgresql.org/), [EDB Postgres Extended Server](https://techsupport.enterprisedb.com/customer_portal/sw/2ndqpostgres/), and [EDB Postgres Advanced Server](/epas/latest) and is deployed as a standard Postgres extension named BDR. See [Compatibility](../#compatibility) for details about supported version combinations. +PGD is compatible with [PostgreSQL](https://www.postgresql.org/), [EDB Postgres Extended Server](/pge/latest), and [EDB Postgres Advanced Server](/epas/latest) and is deployed as a standard Postgres extension named BDR. See [Compatibility](../#compatibility) for details about supported version combinations. Some key PGD features depend on certain core capabilities being available in the target Postgres database server. Therefore, PGD users must also adopt the Postgres database server distribution that's best suited to their business needs. For example, if having the PGD feature Commit At Most Once (CAMO) is mission critical to your use case, don't adopt the community PostgreSQL distribution. It doesn't have the core capability required to handle CAMO. See the full feature matrix compatibility in [Choosing a Postgres distribution](../planning/choosing_server/). diff --git a/product_docs/docs/pgd/5/reference/index.json b/product_docs/docs/pgd/5/reference/index.json index fbce200c01e..7fce149a51b 100644 --- a/product_docs/docs/pgd/5/reference/index.json +++ b/product_docs/docs/pgd/5/reference/index.json @@ -1,351 +1,351 @@ { - "bdrcamo_decision_journal": "/pgd/5/reference/catalogs-visible#bdrcamo_decision_journal", - "bdrcommit_scopes": "/pgd/5/reference/catalogs-visible#bdrcommit_scopes", - "bdrconflict_history": "/pgd/5/reference/catalogs-visible#bdrconflict_history", - "bdrconflict_history_summary": "/pgd/5/reference/catalogs-visible#bdrconflict_history_summary", - "bdrconsensus_kv_data": "/pgd/5/reference/catalogs-visible#bdrconsensus_kv_data", - "bdrcrdt_handlers": "/pgd/5/reference/catalogs-visible#bdrcrdt_handlers", - "bdrddl_replication": "/pgd/5/reference/pgd-settings#bdrddl_replication", - "bdrdepend": "/pgd/5/reference/catalogs-visible#bdrdepend", - "bdrglobal_consensus_journal": "/pgd/5/reference/catalogs-visible#bdrglobal_consensus_journal", - "bdrglobal_consensus_journal_details": "/pgd/5/reference/catalogs-visible#bdrglobal_consensus_journal_details", - "bdrglobal_consensus_response_journal": "/pgd/5/reference/catalogs-visible#bdrglobal_consensus_response_journal", - "bdrglobal_lock": "/pgd/5/reference/catalogs-visible#bdrglobal_lock", - "bdrglobal_locks": "/pgd/5/reference/catalogs-visible#bdrglobal_locks", - "bdrgroup_camo_details": "/pgd/5/reference/catalogs-visible#bdrgroup_camo_details", - "bdrgroup_raft_details": "/pgd/5/reference/catalogs-visible#bdrgroup_raft_details", - "bdrgroup_replslots_details": "/pgd/5/reference/catalogs-visible#bdrgroup_replslots_details", - "bdrgroup_subscription_summary": "/pgd/5/reference/catalogs-visible#bdrgroup_subscription_summary", - "bdrgroup_versions_details": "/pgd/5/reference/catalogs-visible#bdrgroup_versions_details", - "bdrlocal_consensus_snapshot": "/pgd/5/reference/catalogs-visible#bdrlocal_consensus_snapshot", - "bdrlocal_consensus_state": "/pgd/5/reference/catalogs-visible#bdrlocal_consensus_state", - "bdrlocal_node": "/pgd/5/reference/catalogs-visible#bdrlocal_node", - "bdrlocal_node_summary": "/pgd/5/reference/catalogs-visible#bdrlocal_node_summary", - "bdrlocal_sync_status": "/pgd/5/reference/catalogs-visible#bdrlocal_sync_status", - "bdrnode": "/pgd/5/reference/catalogs-visible#bdrnode", - "bdrnode_catchup_info": "/pgd/5/reference/catalogs-visible#bdrnode_catchup_info", - "bdrnode_catchup_info_details": "/pgd/5/reference/catalogs-visible#bdrnode_catchup_info_details", - "bdrnode_conflict_resolvers": "/pgd/5/reference/catalogs-visible#bdrnode_conflict_resolvers", - "bdrnode_group": "/pgd/5/reference/catalogs-visible#bdrnode_group", - "bdrnode_group_replication_sets": "/pgd/5/reference/catalogs-visible#bdrnode_group_replication_sets", - "bdrnode_group_summary": "/pgd/5/reference/catalogs-visible#bdrnode_group_summary", - "bdrnode_local_info": "/pgd/5/reference/catalogs-visible#bdrnode_local_info", - "bdrnode_log_config": "/pgd/5/reference/catalogs-visible#bdrnode_log_config", - "bdrnode_peer_progress": "/pgd/5/reference/catalogs-visible#bdrnode_peer_progress", - "bdrnode_replication_rates": "/pgd/5/reference/catalogs-visible#bdrnode_replication_rates", - "bdrnode_slots": "/pgd/5/reference/catalogs-visible#bdrnode_slots", - "bdrnode_summary": "/pgd/5/reference/catalogs-visible#bdrnode_summary", - "bdrqueue": "/pgd/5/reference/catalogs-visible#bdrqueue", - "bdrreplication_set": "/pgd/5/reference/catalogs-visible#bdrreplication_set", - "bdrreplication_set_table": "/pgd/5/reference/catalogs-visible#bdrreplication_set_table", - "bdrreplication_set_ddl": "/pgd/5/reference/catalogs-visible#bdrreplication_set_ddl", - "bdrreplication_sets": "/pgd/5/reference/catalogs-visible#bdrreplication_sets", - "bdrschema_changes": "/pgd/5/reference/catalogs-visible#bdrschema_changes", - "bdrsequence_alloc": "/pgd/5/reference/catalogs-visible#bdrsequence_alloc", - "bdrsequences": "/pgd/5/reference/catalogs-visible#bdrsequences", - "bdrstat_activity": "/pgd/5/reference/catalogs-visible#bdrstat_activity", - "bdrstat_relation": "/pgd/5/reference/catalogs-visible#bdrstat_relation", - "bdrstat_subscription": "/pgd/5/reference/catalogs-visible#bdrstat_subscription", - "bdrsubscription": "/pgd/5/reference/catalogs-visible#bdrsubscription", - "bdrsubscription_summary": "/pgd/5/reference/catalogs-visible#bdrsubscription_summary", - "bdrtables": "/pgd/5/reference/catalogs-visible#bdrtables", - "bdrtaskmgr_work_queue": "/pgd/5/reference/catalogs-visible#bdrtaskmgr_work_queue", - "bdrtaskmgr_workitem_status": "/pgd/5/reference/catalogs-visible#bdrtaskmgr_workitem_status", - "bdrtaskmgr_local_work_queue": "/pgd/5/reference/catalogs-visible#bdrtaskmgr_local_work_queue", - "bdrtaskmgr_local_workitem_status": "/pgd/5/reference/catalogs-visible#bdrtaskmgr_local_workitem_status", - "bdrtrigger": "/pgd/5/reference/catalogs-visible#bdrtrigger", - "bdrtriggers": "/pgd/5/reference/catalogs-visible#bdrtriggers", - "bdrworkers": "/pgd/5/reference/catalogs-visible#bdrworkers", - "bdrwriters": "/pgd/5/reference/catalogs-visible#bdrwriters", - "bdrworker_tasks": "/pgd/5/reference/catalogs-visible#bdrworker_tasks", - "bdrbdr_version": "/pgd/5/reference/functions#bdrbdr_version", - "bdrbdr_version_num": "/pgd/5/reference/functions#bdrbdr_version_num", - "bdrget_relation_stats": "/pgd/5/reference/functions#bdrget_relation_stats", - "bdrget_subscription_stats": "/pgd/5/reference/functions#bdrget_subscription_stats", - "bdrlocal_node_id": "/pgd/5/reference/functions#bdrlocal_node_id", - "bdrlast_committed_lsn": "/pgd/5/reference/functions#bdrlast_committed_lsn", - "transaction_id": "/pgd/5/reference/functions#transaction_id", - "bdris_node_connected": "/pgd/5/reference/functions#bdris_node_connected", - "bdris_node_ready": "/pgd/5/reference/functions#bdris_node_ready", - "bdrconsensus_disable": "/pgd/5/reference/functions#bdrconsensus_disable", - "bdrconsensus_enable": "/pgd/5/reference/functions#bdrconsensus_enable", - "bdrconsensus_proto_version": "/pgd/5/reference/functions#bdrconsensus_proto_version", - "bdrconsensus_snapshot_export": "/pgd/5/reference/functions#bdrconsensus_snapshot_export", - "bdrconsensus_snapshot_import": "/pgd/5/reference/functions#bdrconsensus_snapshot_import", - "bdrconsensus_snapshot_verify": "/pgd/5/reference/functions#bdrconsensus_snapshot_verify", - "bdrget_consensus_status": "/pgd/5/reference/functions#bdrget_consensus_status", - "bdrget_raft_status": "/pgd/5/reference/functions#bdrget_raft_status", - "bdrraft_leadership_transfer": "/pgd/5/reference/functions#bdrraft_leadership_transfer", - "bdrwait_slot_confirm_lsn": "/pgd/5/reference/functions#bdrwait_slot_confirm_lsn", - "bdrwait_for_apply_queue": "/pgd/5/reference/functions#bdrwait_for_apply_queue", - "bdrget_node_sub_receive_lsn": "/pgd/5/reference/functions#bdrget_node_sub_receive_lsn", - "bdrget_node_sub_apply_lsn": "/pgd/5/reference/functions#bdrget_node_sub_apply_lsn", - "bdrreplicate_ddl_command": "/pgd/5/reference/functions#bdrreplicate_ddl_command", - "bdrrun_on_all_nodes": "/pgd/5/reference/functions#bdrrun_on_all_nodes", - "bdrrun_on_nodes": "/pgd/5/reference/functions#bdrrun_on_nodes", - "bdrrun_on_group": "/pgd/5/reference/functions#bdrrun_on_group", - "bdrglobal_lock_table": "/pgd/5/reference/functions#bdrglobal_lock_table", - "bdrwait_for_xid_progress": "/pgd/5/reference/functions#bdrwait_for_xid_progress", - "bdrlocal_group_slot_name": "/pgd/5/reference/functions#bdrlocal_group_slot_name", - "bdrnode_group_type": "/pgd/5/reference/functions#bdrnode_group_type", - "bdralter_node_kind": "/pgd/5/reference/functions#bdralter_node_kind", - "bdralter_subscription_skip_changes_upto": "/pgd/5/reference/functions#bdralter_subscription_skip_changes_upto", - "bdrglobal_advisory_lock": "/pgd/5/reference/functions#bdrglobal_advisory_lock", - "bdrglobal_advisory_unlock": "/pgd/5/reference/functions#bdrglobal_advisory_unlock", - "bdrmonitor_group_versions": "/pgd/5/reference/functions#bdrmonitor_group_versions", - "bdrmonitor_group_raft": "/pgd/5/reference/functions#bdrmonitor_group_raft", - "bdrmonitor_local_replslots": "/pgd/5/reference/functions#bdrmonitor_local_replslots", - "bdrwal_sender_stats": "/pgd/5/reference/functions#bdrwal_sender_stats", - "bdrget_decoding_worker_stat": "/pgd/5/reference/functions#bdrget_decoding_worker_stat", - "bdrlag_control": "/pgd/5/reference/functions#bdrlag_control", - "bdris_camo_partner_connected": "/pgd/5/reference/functions#bdris_camo_partner_connected", - "bdris_camo_partner_ready": "/pgd/5/reference/functions#bdris_camo_partner_ready", - "bdrget_configured_camo_partner": "/pgd/5/reference/functions#bdrget_configured_camo_partner", - "bdrwait_for_camo_partner_queue": "/pgd/5/reference/functions#bdrwait_for_camo_partner_queue", - "bdrcamo_transactions_resolved": "/pgd/5/reference/functions#bdrcamo_transactions_resolved", - "bdrlogical_transaction_status": "/pgd/5/reference/functions#bdrlogical_transaction_status", - "bdradd_commit_scope": "/pgd/5/reference/functions#bdradd_commit_scope", - "bdralter_commit_scope": "/pgd/5/reference/functions#bdralter_commit_scope", - "bdrremove_commit_scope": "/pgd/5/reference/functions#bdrremove_commit_scope", - "bdrdefault_conflict_detection": "/pgd/5/reference/pgd-settings#bdrdefault_conflict_detection", - "bdrdefault_sequence_kind": "/pgd/5/reference/pgd-settings#bdrdefault_sequence_kind", - "bdrdefault_replica_identity": "/pgd/5/reference/pgd-settings#bdrdefault_replica_identity", - "bdrrole_replication": "/pgd/5/reference/pgd-settings#bdrrole_replication", - "bdrddl_locking": "/pgd/5/reference/pgd-settings#bdrddl_locking", - "bdrtruncate_locking": "/pgd/5/reference/pgd-settings#bdrtruncate_locking", - "bdrglobal_lock_max_locks": "/pgd/5/reference/pgd-settings#bdrglobal_lock_max_locks", - "bdrglobal_lock_timeout": "/pgd/5/reference/pgd-settings#bdrglobal_lock_timeout", - "bdrglobal_lock_statement_timeout": "/pgd/5/reference/pgd-settings#bdrglobal_lock_statement_timeout", - "bdrglobal_lock_idle_timeout": "/pgd/5/reference/pgd-settings#bdrglobal_lock_idle_timeout", - "bdrlock_table_locking": "/pgd/5/reference/pgd-settings#bdrlock_table_locking", - "bdrpredictive_checks": "/pgd/5/reference/pgd-settings#bdrpredictive_checks", - "bdrreplay_progress_frequency": "/pgd/5/reference/pgd-settings#bdrreplay_progress_frequency", - "bdrstandby_slot_names": "/pgd/5/reference/pgd-settings#bdrstandby_slot_names", - "bdrwriters_per_subscription": "/pgd/5/reference/pgd-settings#bdrwriters_per_subscription", - "bdrmax_writers_per_subscription": "/pgd/5/reference/pgd-settings#bdrmax_writers_per_subscription", - "bdrxact_replication": "/pgd/5/reference/pgd-settings#bdrxact_replication", - "bdrpermit_unsafe_commands": "/pgd/5/reference/pgd-settings#bdrpermit_unsafe_commands", - "bdrbatch_inserts": "/pgd/5/reference/pgd-settings#bdrbatch_inserts", - "bdrmaximum_clock_skew": "/pgd/5/reference/pgd-settings#bdrmaximum_clock_skew", - "bdrmaximum_clock_skew_action": "/pgd/5/reference/pgd-settings#bdrmaximum_clock_skew_action", - "bdraccept_connections": "/pgd/5/reference/pgd-settings#bdraccept_connections", - "bdrstandby_slots_min_confirmed": "/pgd/5/reference/pgd-settings#bdrstandby_slots_min_confirmed", - "bdrwriter_input_queue_size": "/pgd/5/reference/pgd-settings#bdrwriter_input_queue_size", - "bdrwriter_output_queue_size": "/pgd/5/reference/pgd-settings#bdrwriter_output_queue_size", - "bdrmin_worker_backoff_delay": "/pgd/5/reference/pgd-settings#bdrmin_worker_backoff_delay", - "bdrcrdt_raw_value": "/pgd/5/reference/pgd-settings#bdrcrdt_raw_value", - "bdrcommit_scope": "/pgd/5/reference/pgd-settings#bdrcommit_scope", - "bdrcamo_local_mode_delay": "/pgd/5/reference/pgd-settings#bdrcamo_local_mode_delay", - "bdrcamo_enable_client_warnings": "/pgd/5/reference/pgd-settings#bdrcamo_enable_client_warnings", - "bdrdefault_streaming_mode": "/pgd/5/reference/pgd-settings#bdrdefault_streaming_mode", - "bdrlag_control_max_commit_delay": "/pgd/5/reference/pgd-settings#bdrlag_control_max_commit_delay", - "bdrlag_control_max_lag_size": "/pgd/5/reference/pgd-settings#bdrlag_control_max_lag_size", - "bdrlag_control_max_lag_time": "/pgd/5/reference/pgd-settings#bdrlag_control_max_lag_time", - "bdrlag_control_min_conforming_nodes": "/pgd/5/reference/pgd-settings#bdrlag_control_min_conforming_nodes", - "bdrlag_control_commit_delay_adjust": "/pgd/5/reference/pgd-settings#bdrlag_control_commit_delay_adjust", - "bdrlag_control_sample_interval": "/pgd/5/reference/pgd-settings#bdrlag_control_sample_interval", - "bdrlag_control_commit_delay_start": "/pgd/5/reference/pgd-settings#bdrlag_control_commit_delay_start", - "bdrtimestamp_snapshot_keep": "/pgd/5/reference/pgd-settings#bdrtimestamp_snapshot_keep", - "bdrdebug_level": "/pgd/5/reference/pgd-settings#bdrdebug_level", - "bdrtrace_level": "/pgd/5/reference/pgd-settings#bdrtrace_level", - "bdrtrack_subscription_apply": "/pgd/5/reference/pgd-settings#bdrtrack_subscription_apply", - "bdrtrack_relation_apply": "/pgd/5/reference/pgd-settings#bdrtrack_relation_apply", - "bdrtrack_apply_lock_timing": "/pgd/5/reference/pgd-settings#bdrtrack_apply_lock_timing", - "bdrenable_wal_decoder": "/pgd/5/reference/pgd-settings#bdrenable_wal_decoder", - "bdrreceive_lcr": "/pgd/5/reference/pgd-settings#bdrreceive_lcr", - "bdrlcr_cleanup_interval": "/pgd/5/reference/pgd-settings#bdrlcr_cleanup_interval", - "bdrglobal_connection_timeout": "/pgd/5/reference/pgd-settings#bdrglobal_connection_timeout", - "bdrglobal_keepalives": "/pgd/5/reference/pgd-settings#bdrglobal_keepalives", - "bdrglobal_keepalives_idle": "/pgd/5/reference/pgd-settings#bdrglobal_keepalives_idle", - "bdrglobal_keepalives_interval": "/pgd/5/reference/pgd-settings#bdrglobal_keepalives_interval", - "bdrglobal_keepalives_count": "/pgd/5/reference/pgd-settings#bdrglobal_keepalives_count", - "bdrglobal_tcp_user_timeout": "/pgd/5/reference/pgd-settings#bdrglobal_tcp_user_timeout", - "bdrraft_global_election_timeout": "/pgd/5/reference/pgd-settings#bdrraft_global_election_timeout", - "bdrraft_group_election_timeout": "/pgd/5/reference/pgd-settings#bdrraft_group_election_timeout", - "bdrraft_response_timeout": "/pgd/5/reference/pgd-settings#bdrraft_response_timeout", - "bdrraft_keep_min_entries": "/pgd/5/reference/pgd-settings#bdrraft_keep_min_entries", - "bdrraft_log_min_apply_duration": "/pgd/5/reference/pgd-settings#bdrraft_log_min_apply_duration", - "bdrraft_log_min_message_duration": "/pgd/5/reference/pgd-settings#bdrraft_log_min_message_duration", - "bdrraft_group_max_connections": "/pgd/5/reference/pgd-settings#bdrraft_group_max_connections", - "bdrbackwards_compatibility": "/pgd/5/reference/pgd-settings#bdrbackwards_compatibility", - "bdrtrack_replication_estimates": "/pgd/5/reference/pgd-settings#bdrtrack_replication_estimates", - "bdrlag_tracker_apply_rate_weight": "/pgd/5/reference/pgd-settings#bdrlag_tracker_apply_rate_weight", - "bdrenable_auto_sync_reconcile": "/pgd/5/reference/pgd-settings#bdrenable_auto_sync_reconcile", - "list-of-node-states": "/pgd/5/reference/nodes#list-of-node-states", - "node-management-commands": "/pgd/5/reference/nodes#node-management-commands", - "bdr_init_physical": "/pgd/5/reference/nodes#bdr_init_physical", - "bdralter_node_group_option": "/pgd/5/reference/nodes-management-interfaces#bdralter_node_group_option", - "bdralter_node_interface": "/pgd/5/reference/nodes-management-interfaces#bdralter_node_interface", - "bdralter_node_option": "/pgd/5/reference/nodes-management-interfaces#bdralter_node_option", - "bdralter_subscription_enable": "/pgd/5/reference/nodes-management-interfaces#bdralter_subscription_enable", - "bdralter_subscription_disable": "/pgd/5/reference/nodes-management-interfaces#bdralter_subscription_disable", - "bdrcreate_node": "/pgd/5/reference/nodes-management-interfaces#bdrcreate_node", - "bdrcreate_node_group": "/pgd/5/reference/nodes-management-interfaces#bdrcreate_node_group", - "bdrjoin_node_group": "/pgd/5/reference/nodes-management-interfaces#bdrjoin_node_group", - "bdrpart_node": "/pgd/5/reference/nodes-management-interfaces#bdrpart_node", - "bdrpromote_node": "/pgd/5/reference/nodes-management-interfaces#bdrpromote_node", - "bdrswitch_node_group": "/pgd/5/reference/nodes-management-interfaces#bdrswitch_node_group", - "bdrwait_for_join_completion": "/pgd/5/reference/nodes-management-interfaces#bdrwait_for_join_completion", - "bdralter_node_group_config": "/pgd/5/reference/nodes-management-interfaces#bdralter_node_group_config", - "bdrdrop_node_group": "/pgd/5/reference/nodes-management-interfaces#bdrdrop_node_group", - "bdrcreate_proxy": "/pgd/5/reference/routing#bdrcreate_proxy", - "bdralter_proxy_option": "/pgd/5/reference/routing#bdralter_proxy_option", - "bdrdrop_proxy": "/pgd/5/reference/routing#bdrdrop_proxy", - "bdrrouting_leadership_transfer": "/pgd/5/reference/routing#bdrrouting_leadership_transfer", - "cs.commit-scope-syntax": "/pgd/5/reference/commit-scopes#commit-scope-syntax", - "cs.commit-scope-groups": "/pgd/5/reference/commit-scopes#commit-scope-groups", - "cs.any": "/pgd/5/reference/commit-scopes#any", - "cs.any-not": "/pgd/5/reference/commit-scopes#any-not", - "cs.majority": "/pgd/5/reference/commit-scopes#majority", - "cs.majority-not": "/pgd/5/reference/commit-scopes#majority-not", - "cs.all": "/pgd/5/reference/commit-scopes#all", - "cs.all-not": "/pgd/5/reference/commit-scopes#all-not", - "cs.confirmation-level": "/pgd/5/reference/commit-scopes#confirmation-level", - "cs.on-received": "/pgd/5/reference/commit-scopes#on-received", - "cs.on-replicated": "/pgd/5/reference/commit-scopes#on-replicated", - "cs.on-durable": "/pgd/5/reference/commit-scopes#on-durable", - "cs.on-visible": "/pgd/5/reference/commit-scopes#on-visible", - "cs.commit-scope-kinds": "/pgd/5/reference/commit-scopes#commit-scope-kinds", - "cs.group-commit": "/pgd/5/reference/commit-scopes#group-commit", - "cs.group-commit-parameters": "/pgd/5/reference/commit-scopes#group-commit-parameters", - "cs.abort-on-parameters": "/pgd/5/reference/commit-scopes#abort-on-parameters", - "cs.transaction_tracking-settings": "/pgd/5/reference/commit-scopes#transaction_tracking-settings", - "cs.conflict_resolution-settings": "/pgd/5/reference/commit-scopes#conflict_resolution-settings", - "cs.commit_decision-settings": "/pgd/5/reference/commit-scopes#commit_decision-settings", - "cs.camo": "/pgd/5/reference/commit-scopes#camo", - "cs.degrade-on-parameters": "/pgd/5/reference/commit-scopes#degrade-on-parameters", - "cs.lag-control": "/pgd/5/reference/commit-scopes#lag-control", - "cs.lag-control-parameters": "/pgd/5/reference/commit-scopes#lag-control-parameters", - "cs.synchronous_commit": "/pgd/5/reference/commit-scopes#synchronous_commit", - "conflict-detection": "/pgd/5/reference/conflicts#conflict-detection", - "list-of-conflict-types": "/pgd/5/reference/conflicts#list-of-conflict-types", - "conflict-resolution": "/pgd/5/reference/conflicts#conflict-resolution", - "list-of-conflict-resolvers": "/pgd/5/reference/conflicts#list-of-conflict-resolvers", - "default-conflict-resolvers": "/pgd/5/reference/conflicts#default-conflict-resolvers", - "list-of-conflict-resolutions": "/pgd/5/reference/conflicts#list-of-conflict-resolutions", - "conflict-logging": "/pgd/5/reference/conflicts#conflict-logging", - "bdralter_table_conflict_detection": "/pgd/5/reference/conflict_functions#bdralter_table_conflict_detection", - "bdralter_node_set_conflict_resolver": "/pgd/5/reference/conflict_functions#bdralter_node_set_conflict_resolver", - "bdralter_node_set_log_config": "/pgd/5/reference/conflict_functions#bdralter_node_set_log_config", - "bdrcreate_replication_set": "/pgd/5/reference/repsets-management#bdrcreate_replication_set", - "bdralter_replication_set": "/pgd/5/reference/repsets-management#bdralter_replication_set", - "bdrdrop_replication_set": "/pgd/5/reference/repsets-management#bdrdrop_replication_set", - "bdralter_node_replication_sets": "/pgd/5/reference/repsets-management#bdralter_node_replication_sets", - "bdrreplication_set_add_table": "/pgd/5/reference/repsets-membership#bdrreplication_set_add_table", - "bdrreplication_set_remove_table": "/pgd/5/reference/repsets-membership#bdrreplication_set_remove_table", - "bdrreplication_set_add_ddl_filter": "/pgd/5/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter", - "bdrreplication_set_remove_ddl_filter": "/pgd/5/reference/repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter", - "pgd_bench": "/pgd/5/reference/testingandtuning#pgd_bench", - "bdralter_sequence_set_kind": "/pgd/5/reference/sequences#bdralter_sequence_set_kind", - "bdrextract_timestamp_from_snowflakeid": "/pgd/5/reference/sequences#bdrextract_timestamp_from_snowflakeid", - "bdrextract_nodeid_from_snowflakeid": "/pgd/5/reference/sequences#bdrextract_nodeid_from_snowflakeid", - "bdrextract_localseqid_from_snowflakeid": "/pgd/5/reference/sequences#bdrextract_localseqid_from_snowflakeid", - "bdrtimestamp_to_snowflakeid": "/pgd/5/reference/sequences#bdrtimestamp_to_snowflakeid", - "bdrextract_timestamp_from_timeshard": "/pgd/5/reference/sequences#bdrextract_timestamp_from_timeshard", - "bdrextract_nodeid_from_timeshard": "/pgd/5/reference/sequences#bdrextract_nodeid_from_timeshard", - "bdrextract_localseqid_from_timeshard": "/pgd/5/reference/sequences#bdrextract_localseqid_from_timeshard", - "bdrtimestamp_to_timeshard": "/pgd/5/reference/sequences#bdrtimestamp_to_timeshard", - "bdrgen_ksuuid_v2": "/pgd/5/reference/sequences#bdrgen_ksuuid_v2", - "bdrksuuid_v2_cmp": "/pgd/5/reference/sequences#bdrksuuid_v2_cmp", - "bdrextract_timestamp_from_ksuuid_v2": "/pgd/5/reference/sequences#bdrextract_timestamp_from_ksuuid_v2", - "bdrgen_ksuuid": "/pgd/5/reference/sequences#bdrgen_ksuuid", - "bdruuid_v1_cmp": "/pgd/5/reference/sequences#bdruuid_v1_cmp", - "bdrextract_timestamp_from_ksuuid": "/pgd/5/reference/sequences#bdrextract_timestamp_from_ksuuid", - "bdrautopartition": "/pgd/5/reference/autopartition#bdrautopartition", - "bdrdrop_autopartition": "/pgd/5/reference/autopartition#bdrdrop_autopartition", - "bdrautopartition_wait_for_partitions": "/pgd/5/reference/autopartition#bdrautopartition_wait_for_partitions", - "bdrautopartition_wait_for_partitions_on_all_nodes": "/pgd/5/reference/autopartition#bdrautopartition_wait_for_partitions_on_all_nodes", - "bdrautopartition_find_partition": "/pgd/5/reference/autopartition#bdrautopartition_find_partition", - "bdrautopartition_enable": "/pgd/5/reference/autopartition#bdrautopartition_enable", - "bdrautopartition_disable": "/pgd/5/reference/autopartition#bdrautopartition_disable", - "internal-functions": "/pgd/5/reference/autopartition#internal-functions", - "bdrautopartition_create_partition": "/pgd/5/reference/autopartition#bdrautopartition_create_partition", - "bdrautopartition_drop_partition": "/pgd/5/reference/autopartition#bdrautopartition_drop_partition", - "bdrcreate_conflict_trigger": "/pgd/5/reference/streamtriggers/interfaces#bdrcreate_conflict_trigger", - "bdrcreate_transform_trigger": "/pgd/5/reference/streamtriggers/interfaces#bdrcreate_transform_trigger", - "bdrdrop_trigger": "/pgd/5/reference/streamtriggers/interfaces#bdrdrop_trigger", - "bdrtrigger_get_row": "/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_row", - "bdrtrigger_get_committs": "/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_committs", - "bdrtrigger_get_xid": "/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_xid", - "bdrtrigger_get_type": "/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_type", - "bdrtrigger_get_conflict_type": "/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_conflict_type", - "bdrtrigger_get_origin_node_id": "/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_origin_node_id", - "bdrri_fkey_on_del_trigger": "/pgd/5/reference/streamtriggers/rowfunctions#bdrri_fkey_on_del_trigger", - "tg_name": "/pgd/5/reference/streamtriggers/rowvariables#tg_name", - "tg_when": "/pgd/5/reference/streamtriggers/rowvariables#tg_when", - "tg_level": "/pgd/5/reference/streamtriggers/rowvariables#tg_level", - "tg_op": "/pgd/5/reference/streamtriggers/rowvariables#tg_op", - "tg_relid": "/pgd/5/reference/streamtriggers/rowvariables#tg_relid", - "tg_table_name": "/pgd/5/reference/streamtriggers/rowvariables#tg_table_name", - "tg_table_schema": "/pgd/5/reference/streamtriggers/rowvariables#tg_table_schema", - "tg_nargs": "/pgd/5/reference/streamtriggers/rowvariables#tg_nargs", - "tg_argv": "/pgd/5/reference/streamtriggers/rowvariables#tg_argv", - "bdrautopartition_partitions": "/pgd/5/reference/catalogs-internal#bdrautopartition_partitions", - "bdrautopartition_rules": "/pgd/5/reference/catalogs-internal#bdrautopartition_rules", - "bdrddl_epoch": "/pgd/5/reference/catalogs-internal#bdrddl_epoch", - "bdrevent_history": "/pgd/5/reference/catalogs-internal#bdrevent_history", - "bdrevent_summary": "/pgd/5/reference/catalogs-internal#bdrevent_summary", - "bdrnode_config": "/pgd/5/reference/catalogs-internal#bdrnode_config", - "bdrnode_config_summary": "/pgd/5/reference/catalogs-internal#bdrnode_config_summary", - "bdrnode_group_config": "/pgd/5/reference/catalogs-internal#bdrnode_group_config", - "bdrnode_group_routing_config_summary": "/pgd/5/reference/catalogs-internal#bdrnode_group_routing_config_summary", - "bdrnode_group_routing_info": "/pgd/5/reference/catalogs-internal#bdrnode_group_routing_info", - "bdrnode_group_routing_summary": "/pgd/5/reference/catalogs-internal#bdrnode_group_routing_summary", - "bdrnode_routing_config_summary": "/pgd/5/reference/catalogs-internal#bdrnode_routing_config_summary", - "bdrproxy_config": "/pgd/5/reference/catalogs-internal#bdrproxy_config", - "bdrproxy_config_summary": "/pgd/5/reference/catalogs-internal#bdrproxy_config_summary", - "bdrsequence_kind": "/pgd/5/reference/catalogs-internal#bdrsequence_kind", - "bdrbdr_get_commit_decisions": "/pgd/5/reference/functions-internal#bdrbdr_get_commit_decisions", - "bdrbdr_track_commit_decision": "/pgd/5/reference/functions-internal#bdrbdr_track_commit_decision", - "bdrconsensus_kv_fetch": "/pgd/5/reference/functions-internal#bdrconsensus_kv_fetch", - "bdrconsensus_kv_store": "/pgd/5/reference/functions-internal#bdrconsensus_kv_store", - "bdrdecode_message_payload": "/pgd/5/reference/functions-internal#bdrdecode_message_payload", - "bdrdecode_message_response_payload": "/pgd/5/reference/functions-internal#bdrdecode_message_response_payload", - "bdrdifference_fix_origin_create": "/pgd/5/reference/functions-internal#bdrdifference_fix_origin_create", - "bdrdifference_fix_session_reset": "/pgd/5/reference/functions-internal#bdrdifference_fix_session_reset", - "bdrdifference_fix_session_setup": "/pgd/5/reference/functions-internal#bdrdifference_fix_session_setup", - "bdrdifference_fix_xact_set_avoid_conflict": "/pgd/5/reference/functions-internal#bdrdifference_fix_xact_set_avoid_conflict", - "bdrdrop_node": "/pgd/5/reference/functions-internal#bdrdrop_node", - "bdrget_global_locks": "/pgd/5/reference/functions-internal#bdrget_global_locks", - "bdrget_node_conflict_resolvers": "/pgd/5/reference/functions-internal#bdrget_node_conflict_resolvers", - "bdrget_slot_flush_timestamp": "/pgd/5/reference/functions-internal#bdrget_slot_flush_timestamp", - "bdrinternal_alter_sequence_set_kind": "/pgd/5/reference/functions-internal#bdrinternal_alter_sequence_set_kind", - "bdrinternal_replication_set_add_table": "/pgd/5/reference/functions-internal#bdrinternal_replication_set_add_table", - "bdrinternal_replication_set_remove_table": "/pgd/5/reference/functions-internal#bdrinternal_replication_set_remove_table", - "bdrinternal_submit_join_request": "/pgd/5/reference/functions-internal#bdrinternal_submit_join_request", - "bdrisolation_test_session_is_blocked": "/pgd/5/reference/functions-internal#bdrisolation_test_session_is_blocked", - "bdrlocal_node_info": "/pgd/5/reference/functions-internal#bdrlocal_node_info", - "bdrmsgb_connect": "/pgd/5/reference/functions-internal#bdrmsgb_connect", - "bdrmsgb_deliver_message": "/pgd/5/reference/functions-internal#bdrmsgb_deliver_message", - "bdrnode_catchup_state_name": "/pgd/5/reference/functions-internal#bdrnode_catchup_state_name", - "bdrnode_kind_name": "/pgd/5/reference/functions-internal#bdrnode_kind_name", - "bdrpeer_state_name": "/pgd/5/reference/functions-internal#bdrpeer_state_name", - "bdrpg_xact_origin": "/pgd/5/reference/functions-internal#bdrpg_xact_origin", - "bdrrequest_replay_progress_update": "/pgd/5/reference/functions-internal#bdrrequest_replay_progress_update", - "bdrreset_relation_stats": "/pgd/5/reference/functions-internal#bdrreset_relation_stats", - "bdrreset_subscription_stats": "/pgd/5/reference/functions-internal#bdrreset_subscription_stats", - "bdrresynchronize_table_from_node": "/pgd/5/reference/functions-internal#bdrresynchronize_table_from_node", - "bdrseq_currval": "/pgd/5/reference/functions-internal#bdrseq_currval", - "bdrseq_lastval": "/pgd/5/reference/functions-internal#bdrseq_lastval", - "bdrseq_nextval": "/pgd/5/reference/functions-internal#bdrseq_nextval", - "bdrshow_subscription_status": "/pgd/5/reference/functions-internal#bdrshow_subscription_status", - "bdrshow_workers": "/pgd/5/reference/functions-internal#bdrshow_workers", - "bdrshow_writers": "/pgd/5/reference/functions-internal#bdrshow_writers", - "bdrtaskmgr_set_leader": "/pgd/5/reference/functions-internal#bdrtaskmgr_set_leader", - "bdrtaskmgr_get_last_completed_workitem": "/pgd/5/reference/functions-internal#bdrtaskmgr_get_last_completed_workitem", - "bdrtaskmgr_work_queue_check_status": "/pgd/5/reference/functions-internal#bdrtaskmgr_work_queue_check_status", - "bdrpglogical_proto_version_ranges": "/pgd/5/reference/functions-internal#bdrpglogical_proto_version_ranges", - "bdrget_min_required_replication_slots": "/pgd/5/reference/functions-internal#bdrget_min_required_replication_slots", - "bdrget_min_required_worker_processes": "/pgd/5/reference/functions-internal#bdrget_min_required_worker_processes", - "bdrstat_get_activity": "/pgd/5/reference/functions-internal#bdrstat_get_activity", - "bdrworker_role_id_name": "/pgd/5/reference/functions-internal#bdrworker_role_id_name", - "bdrlag_history": "/pgd/5/reference/functions-internal#bdrlag_history", - "bdrget_raft_instance_by_nodegroup": "/pgd/5/reference/functions-internal#bdrget_raft_instance_by_nodegroup", - "bdrmonitor_camo_on_all_nodes": "/pgd/5/reference/functions-internal#bdrmonitor_camo_on_all_nodes", - "bdrmonitor_raft_details_on_all_nodes": "/pgd/5/reference/functions-internal#bdrmonitor_raft_details_on_all_nodes", - "bdrmonitor_replslots_details_on_all_nodes": "/pgd/5/reference/functions-internal#bdrmonitor_replslots_details_on_all_nodes", - "bdrmonitor_subscription_details_on_all_nodes": "/pgd/5/reference/functions-internal#bdrmonitor_subscription_details_on_all_nodes", - "bdrmonitor_version_details_on_all_nodes": "/pgd/5/reference/functions-internal#bdrmonitor_version_details_on_all_nodes", - "bdrnode_group_member_info": "/pgd/5/reference/functions-internal#bdrnode_group_member_info", - "bdrcolumn_timestamps_create": "/pgd/5/reference/clcd#bdrcolumn_timestamps_create" -} \ No newline at end of file + "bdrcamo_decision_journal": "/pgd/latest/reference/catalogs-visible#bdrcamo_decision_journal", + "bdrcommit_scopes": "/pgd/latest/reference/catalogs-visible#bdrcommit_scopes", + "bdrconflict_history": "/pgd/latest/reference/catalogs-visible#bdrconflict_history", + "bdrconflict_history_summary": "/pgd/latest/reference/catalogs-visible#bdrconflict_history_summary", + "bdrconsensus_kv_data": "/pgd/latest/reference/catalogs-visible#bdrconsensus_kv_data", + "bdrcrdt_handlers": "/pgd/latest/reference/catalogs-visible#bdrcrdt_handlers", + "bdrddl_replication": "/pgd/latest/reference/pgd-settings#bdrddl_replication", + "bdrdepend": "/pgd/latest/reference/catalogs-visible#bdrdepend", + "bdrglobal_consensus_journal": "/pgd/latest/reference/catalogs-visible#bdrglobal_consensus_journal", + "bdrglobal_consensus_journal_details": "/pgd/latest/reference/catalogs-visible#bdrglobal_consensus_journal_details", + "bdrglobal_consensus_response_journal": "/pgd/latest/reference/catalogs-visible#bdrglobal_consensus_response_journal", + "bdrglobal_lock": "/pgd/latest/reference/catalogs-visible#bdrglobal_lock", + "bdrglobal_locks": "/pgd/latest/reference/catalogs-visible#bdrglobal_locks", + "bdrgroup_camo_details": "/pgd/latest/reference/catalogs-visible#bdrgroup_camo_details", + "bdrgroup_raft_details": "/pgd/latest/reference/catalogs-visible#bdrgroup_raft_details", + "bdrgroup_replslots_details": "/pgd/latest/reference/catalogs-visible#bdrgroup_replslots_details", + "bdrgroup_subscription_summary": "/pgd/latest/reference/catalogs-visible#bdrgroup_subscription_summary", + "bdrgroup_versions_details": "/pgd/latest/reference/catalogs-visible#bdrgroup_versions_details", + "bdrlocal_consensus_snapshot": "/pgd/latest/reference/catalogs-visible#bdrlocal_consensus_snapshot", + "bdrlocal_consensus_state": "/pgd/latest/reference/catalogs-visible#bdrlocal_consensus_state", + "bdrlocal_node": "/pgd/latest/reference/catalogs-visible#bdrlocal_node", + "bdrlocal_node_summary": "/pgd/latest/reference/catalogs-visible#bdrlocal_node_summary", + "bdrlocal_sync_status": "/pgd/latest/reference/catalogs-visible#bdrlocal_sync_status", + "bdrnode": "/pgd/latest/reference/catalogs-visible#bdrnode", + "bdrnode_catchup_info": "/pgd/latest/reference/catalogs-visible#bdrnode_catchup_info", + "bdrnode_catchup_info_details": "/pgd/latest/reference/catalogs-visible#bdrnode_catchup_info_details", + "bdrnode_conflict_resolvers": "/pgd/latest/reference/catalogs-visible#bdrnode_conflict_resolvers", + "bdrnode_group": "/pgd/latest/reference/catalogs-visible#bdrnode_group", + "bdrnode_group_replication_sets": "/pgd/latest/reference/catalogs-visible#bdrnode_group_replication_sets", + "bdrnode_group_summary": "/pgd/latest/reference/catalogs-visible#bdrnode_group_summary", + "bdrnode_local_info": "/pgd/latest/reference/catalogs-visible#bdrnode_local_info", + "bdrnode_log_config": "/pgd/latest/reference/catalogs-visible#bdrnode_log_config", + "bdrnode_peer_progress": "/pgd/latest/reference/catalogs-visible#bdrnode_peer_progress", + "bdrnode_replication_rates": "/pgd/latest/reference/catalogs-visible#bdrnode_replication_rates", + "bdrnode_slots": "/pgd/latest/reference/catalogs-visible#bdrnode_slots", + "bdrnode_summary": "/pgd/latest/reference/catalogs-visible#bdrnode_summary", + "bdrqueue": "/pgd/latest/reference/catalogs-visible#bdrqueue", + "bdrreplication_set": "/pgd/latest/reference/catalogs-visible#bdrreplication_set", + "bdrreplication_set_table": "/pgd/latest/reference/catalogs-visible#bdrreplication_set_table", + "bdrreplication_set_ddl": "/pgd/latest/reference/catalogs-visible#bdrreplication_set_ddl", + "bdrreplication_sets": "/pgd/latest/reference/catalogs-visible#bdrreplication_sets", + "bdrschema_changes": "/pgd/latest/reference/catalogs-visible#bdrschema_changes", + "bdrsequence_alloc": "/pgd/latest/reference/catalogs-visible#bdrsequence_alloc", + "bdrsequences": "/pgd/latest/reference/catalogs-visible#bdrsequences", + "bdrstat_activity": "/pgd/latest/reference/catalogs-visible#bdrstat_activity", + "bdrstat_relation": "/pgd/latest/reference/catalogs-visible#bdrstat_relation", + "bdrstat_subscription": "/pgd/latest/reference/catalogs-visible#bdrstat_subscription", + "bdrsubscription": "/pgd/latest/reference/catalogs-visible#bdrsubscription", + "bdrsubscription_summary": "/pgd/latest/reference/catalogs-visible#bdrsubscription_summary", + "bdrtables": "/pgd/latest/reference/catalogs-visible#bdrtables", + "bdrtaskmgr_work_queue": "/pgd/latest/reference/catalogs-visible#bdrtaskmgr_work_queue", + "bdrtaskmgr_workitem_status": "/pgd/latest/reference/catalogs-visible#bdrtaskmgr_workitem_status", + "bdrtaskmgr_local_work_queue": "/pgd/latest/reference/catalogs-visible#bdrtaskmgr_local_work_queue", + "bdrtaskmgr_local_workitem_status": "/pgd/latest/reference/catalogs-visible#bdrtaskmgr_local_workitem_status", + "bdrtrigger": "/pgd/latest/reference/catalogs-visible#bdrtrigger", + "bdrtriggers": "/pgd/latest/reference/catalogs-visible#bdrtriggers", + "bdrworkers": "/pgd/latest/reference/catalogs-visible#bdrworkers", + "bdrwriters": "/pgd/latest/reference/catalogs-visible#bdrwriters", + "bdrworker_tasks": "/pgd/latest/reference/catalogs-visible#bdrworker_tasks", + "bdrbdr_version": "/pgd/latest/reference/functions#bdrbdr_version", + "bdrbdr_version_num": "/pgd/latest/reference/functions#bdrbdr_version_num", + "bdrget_relation_stats": "/pgd/latest/reference/functions#bdrget_relation_stats", + "bdrget_subscription_stats": "/pgd/latest/reference/functions#bdrget_subscription_stats", + "bdrlocal_node_id": "/pgd/latest/reference/functions#bdrlocal_node_id", + "bdrlast_committed_lsn": "/pgd/latest/reference/functions#bdrlast_committed_lsn", + "transaction_id": "/pgd/latest/reference/functions#transaction_id", + "bdris_node_connected": "/pgd/latest/reference/functions#bdris_node_connected", + "bdris_node_ready": "/pgd/latest/reference/functions#bdris_node_ready", + "bdrconsensus_disable": "/pgd/latest/reference/functions#bdrconsensus_disable", + "bdrconsensus_enable": "/pgd/latest/reference/functions#bdrconsensus_enable", + "bdrconsensus_proto_version": "/pgd/latest/reference/functions#bdrconsensus_proto_version", + "bdrconsensus_snapshot_export": "/pgd/latest/reference/functions#bdrconsensus_snapshot_export", + "bdrconsensus_snapshot_import": "/pgd/latest/reference/functions#bdrconsensus_snapshot_import", + "bdrconsensus_snapshot_verify": "/pgd/latest/reference/functions#bdrconsensus_snapshot_verify", + "bdrget_consensus_status": "/pgd/latest/reference/functions#bdrget_consensus_status", + "bdrget_raft_status": "/pgd/latest/reference/functions#bdrget_raft_status", + "bdrraft_leadership_transfer": "/pgd/latest/reference/functions#bdrraft_leadership_transfer", + "bdrwait_slot_confirm_lsn": "/pgd/latest/reference/functions#bdrwait_slot_confirm_lsn", + "bdrwait_for_apply_queue": "/pgd/latest/reference/functions#bdrwait_for_apply_queue", + "bdrget_node_sub_receive_lsn": "/pgd/latest/reference/functions#bdrget_node_sub_receive_lsn", + "bdrget_node_sub_apply_lsn": "/pgd/latest/reference/functions#bdrget_node_sub_apply_lsn", + "bdrreplicate_ddl_command": "/pgd/latest/reference/functions#bdrreplicate_ddl_command", + "bdrrun_on_all_nodes": "/pgd/latest/reference/functions#bdrrun_on_all_nodes", + "bdrrun_on_nodes": "/pgd/latest/reference/functions#bdrrun_on_nodes", + "bdrrun_on_group": "/pgd/latest/reference/functions#bdrrun_on_group", + "bdrglobal_lock_table": "/pgd/latest/reference/functions#bdrglobal_lock_table", + "bdrwait_for_xid_progress": "/pgd/latest/reference/functions#bdrwait_for_xid_progress", + "bdrlocal_group_slot_name": "/pgd/latest/reference/functions#bdrlocal_group_slot_name", + "bdrnode_group_type": "/pgd/latest/reference/functions#bdrnode_group_type", + "bdralter_node_kind": "/pgd/latest/reference/functions#bdralter_node_kind", + "bdralter_subscription_skip_changes_upto": "/pgd/latest/reference/functions#bdralter_subscription_skip_changes_upto", + "bdrglobal_advisory_lock": "/pgd/latest/reference/functions#bdrglobal_advisory_lock", + "bdrglobal_advisory_unlock": "/pgd/latest/reference/functions#bdrglobal_advisory_unlock", + "bdrmonitor_group_versions": "/pgd/latest/reference/functions#bdrmonitor_group_versions", + "bdrmonitor_group_raft": "/pgd/latest/reference/functions#bdrmonitor_group_raft", + "bdrmonitor_local_replslots": "/pgd/latest/reference/functions#bdrmonitor_local_replslots", + "bdrwal_sender_stats": "/pgd/latest/reference/functions#bdrwal_sender_stats", + "bdrget_decoding_worker_stat": "/pgd/latest/reference/functions#bdrget_decoding_worker_stat", + "bdrlag_control": "/pgd/latest/reference/functions#bdrlag_control", + "bdris_camo_partner_connected": "/pgd/latest/reference/functions#bdris_camo_partner_connected", + "bdris_camo_partner_ready": "/pgd/latest/reference/functions#bdris_camo_partner_ready", + "bdrget_configured_camo_partner": "/pgd/latest/reference/functions#bdrget_configured_camo_partner", + "bdrwait_for_camo_partner_queue": "/pgd/latest/reference/functions#bdrwait_for_camo_partner_queue", + "bdrcamo_transactions_resolved": "/pgd/latest/reference/functions#bdrcamo_transactions_resolved", + "bdrlogical_transaction_status": "/pgd/latest/reference/functions#bdrlogical_transaction_status", + "bdradd_commit_scope": "/pgd/latest/reference/functions#bdradd_commit_scope", + "bdralter_commit_scope": "/pgd/latest/reference/functions#bdralter_commit_scope", + "bdrremove_commit_scope": "/pgd/latest/reference/functions#bdrremove_commit_scope", + "bdrdefault_conflict_detection": "/pgd/latest/reference/pgd-settings#bdrdefault_conflict_detection", + "bdrdefault_sequence_kind": "/pgd/latest/reference/pgd-settings#bdrdefault_sequence_kind", + "bdrdefault_replica_identity": "/pgd/latest/reference/pgd-settings#bdrdefault_replica_identity", + "bdrrole_replication": "/pgd/latest/reference/pgd-settings#bdrrole_replication", + "bdrddl_locking": "/pgd/latest/reference/pgd-settings#bdrddl_locking", + "bdrtruncate_locking": "/pgd/latest/reference/pgd-settings#bdrtruncate_locking", + "bdrglobal_lock_max_locks": "/pgd/latest/reference/pgd-settings#bdrglobal_lock_max_locks", + "bdrglobal_lock_timeout": "/pgd/latest/reference/pgd-settings#bdrglobal_lock_timeout", + "bdrglobal_lock_statement_timeout": "/pgd/latest/reference/pgd-settings#bdrglobal_lock_statement_timeout", + "bdrglobal_lock_idle_timeout": "/pgd/latest/reference/pgd-settings#bdrglobal_lock_idle_timeout", + "bdrlock_table_locking": "/pgd/latest/reference/pgd-settings#bdrlock_table_locking", + "bdrpredictive_checks": "/pgd/latest/reference/pgd-settings#bdrpredictive_checks", + "bdrreplay_progress_frequency": "/pgd/latest/reference/pgd-settings#bdrreplay_progress_frequency", + "bdrstandby_slot_names": "/pgd/latest/reference/pgd-settings#bdrstandby_slot_names", + "bdrwriters_per_subscription": "/pgd/latest/reference/pgd-settings#bdrwriters_per_subscription", + "bdrmax_writers_per_subscription": "/pgd/latest/reference/pgd-settings#bdrmax_writers_per_subscription", + "bdrxact_replication": "/pgd/latest/reference/pgd-settings#bdrxact_replication", + "bdrpermit_unsafe_commands": "/pgd/latest/reference/pgd-settings#bdrpermit_unsafe_commands", + "bdrbatch_inserts": "/pgd/latest/reference/pgd-settings#bdrbatch_inserts", + "bdrmaximum_clock_skew": "/pgd/latest/reference/pgd-settings#bdrmaximum_clock_skew", + "bdrmaximum_clock_skew_action": "/pgd/latest/reference/pgd-settings#bdrmaximum_clock_skew_action", + "bdraccept_connections": "/pgd/latest/reference/pgd-settings#bdraccept_connections", + "bdrstandby_slots_min_confirmed": "/pgd/latest/reference/pgd-settings#bdrstandby_slots_min_confirmed", + "bdrwriter_input_queue_size": "/pgd/latest/reference/pgd-settings#bdrwriter_input_queue_size", + "bdrwriter_output_queue_size": "/pgd/latest/reference/pgd-settings#bdrwriter_output_queue_size", + "bdrmin_worker_backoff_delay": "/pgd/latest/reference/pgd-settings#bdrmin_worker_backoff_delay", + "bdrcrdt_raw_value": "/pgd/latest/reference/pgd-settings#bdrcrdt_raw_value", + "bdrcommit_scope": "/pgd/latest/reference/pgd-settings#bdrcommit_scope", + "bdrcamo_local_mode_delay": "/pgd/latest/reference/pgd-settings#bdrcamo_local_mode_delay", + "bdrcamo_enable_client_warnings": "/pgd/latest/reference/pgd-settings#bdrcamo_enable_client_warnings", + "bdrdefault_streaming_mode": "/pgd/latest/reference/pgd-settings#bdrdefault_streaming_mode", + "bdrlag_control_max_commit_delay": "/pgd/latest/reference/pgd-settings#bdrlag_control_max_commit_delay", + "bdrlag_control_max_lag_size": "/pgd/latest/reference/pgd-settings#bdrlag_control_max_lag_size", + "bdrlag_control_max_lag_time": "/pgd/latest/reference/pgd-settings#bdrlag_control_max_lag_time", + "bdrlag_control_min_conforming_nodes": "/pgd/latest/reference/pgd-settings#bdrlag_control_min_conforming_nodes", + "bdrlag_control_commit_delay_adjust": "/pgd/latest/reference/pgd-settings#bdrlag_control_commit_delay_adjust", + "bdrlag_control_sample_interval": "/pgd/latest/reference/pgd-settings#bdrlag_control_sample_interval", + "bdrlag_control_commit_delay_start": "/pgd/latest/reference/pgd-settings#bdrlag_control_commit_delay_start", + "bdrtimestamp_snapshot_keep": "/pgd/latest/reference/pgd-settings#bdrtimestamp_snapshot_keep", + "bdrdebug_level": "/pgd/latest/reference/pgd-settings#bdrdebug_level", + "bdrtrace_level": "/pgd/latest/reference/pgd-settings#bdrtrace_level", + "bdrtrack_subscription_apply": "/pgd/latest/reference/pgd-settings#bdrtrack_subscription_apply", + "bdrtrack_relation_apply": "/pgd/latest/reference/pgd-settings#bdrtrack_relation_apply", + "bdrtrack_apply_lock_timing": "/pgd/latest/reference/pgd-settings#bdrtrack_apply_lock_timing", + "bdrenable_wal_decoder": "/pgd/latest/reference/pgd-settings#bdrenable_wal_decoder", + "bdrreceive_lcr": "/pgd/latest/reference/pgd-settings#bdrreceive_lcr", + "bdrlcr_cleanup_interval": "/pgd/latest/reference/pgd-settings#bdrlcr_cleanup_interval", + "bdrglobal_connection_timeout": "/pgd/latest/reference/pgd-settings#bdrglobal_connection_timeout", + "bdrglobal_keepalives": "/pgd/latest/reference/pgd-settings#bdrglobal_keepalives", + "bdrglobal_keepalives_idle": "/pgd/latest/reference/pgd-settings#bdrglobal_keepalives_idle", + "bdrglobal_keepalives_interval": "/pgd/latest/reference/pgd-settings#bdrglobal_keepalives_interval", + "bdrglobal_keepalives_count": "/pgd/latest/reference/pgd-settings#bdrglobal_keepalives_count", + "bdrglobal_tcp_user_timeout": "/pgd/latest/reference/pgd-settings#bdrglobal_tcp_user_timeout", + "bdrraft_global_election_timeout": "/pgd/latest/reference/pgd-settings#bdrraft_global_election_timeout", + "bdrraft_group_election_timeout": "/pgd/latest/reference/pgd-settings#bdrraft_group_election_timeout", + "bdrraft_response_timeout": "/pgd/latest/reference/pgd-settings#bdrraft_response_timeout", + "bdrraft_keep_min_entries": "/pgd/latest/reference/pgd-settings#bdrraft_keep_min_entries", + "bdrraft_log_min_apply_duration": "/pgd/latest/reference/pgd-settings#bdrraft_log_min_apply_duration", + "bdrraft_log_min_message_duration": "/pgd/latest/reference/pgd-settings#bdrraft_log_min_message_duration", + "bdrraft_group_max_connections": "/pgd/latest/reference/pgd-settings#bdrraft_group_max_connections", + "bdrbackwards_compatibility": "/pgd/latest/reference/pgd-settings#bdrbackwards_compatibility", + "bdrtrack_replication_estimates": "/pgd/latest/reference/pgd-settings#bdrtrack_replication_estimates", + "bdrlag_tracker_apply_rate_weight": "/pgd/latest/reference/pgd-settings#bdrlag_tracker_apply_rate_weight", + "bdrenable_auto_sync_reconcile": "/pgd/latest/reference/pgd-settings#bdrenable_auto_sync_reconcile", + "list-of-node-states": "/pgd/latest/reference/nodes#list-of-node-states", + "node-management-commands": "/pgd/latest/reference/nodes#node-management-commands", + "bdr_init_physical": "/pgd/latest/reference/nodes#bdr_init_physical", + "bdralter_node_group_option": "/pgd/latest/reference/nodes-management-interfaces#bdralter_node_group_option", + "bdralter_node_interface": "/pgd/latest/reference/nodes-management-interfaces#bdralter_node_interface", + "bdralter_node_option": "/pgd/latest/reference/nodes-management-interfaces#bdralter_node_option", + "bdralter_subscription_enable": "/pgd/latest/reference/nodes-management-interfaces#bdralter_subscription_enable", + "bdralter_subscription_disable": "/pgd/latest/reference/nodes-management-interfaces#bdralter_subscription_disable", + "bdrcreate_node": "/pgd/latest/reference/nodes-management-interfaces#bdrcreate_node", + "bdrcreate_node_group": "/pgd/latest/reference/nodes-management-interfaces#bdrcreate_node_group", + "bdrjoin_node_group": "/pgd/latest/reference/nodes-management-interfaces#bdrjoin_node_group", + "bdrpart_node": "/pgd/latest/reference/nodes-management-interfaces#bdrpart_node", + "bdrpromote_node": "/pgd/latest/reference/nodes-management-interfaces#bdrpromote_node", + "bdrswitch_node_group": "/pgd/latest/reference/nodes-management-interfaces#bdrswitch_node_group", + "bdrwait_for_join_completion": "/pgd/latest/reference/nodes-management-interfaces#bdrwait_for_join_completion", + "bdralter_node_group_config": "/pgd/latest/reference/nodes-management-interfaces#bdralter_node_group_config", + "bdrdrop_node_group": "/pgd/latest/reference/nodes-management-interfaces#bdrdrop_node_group", + "bdrcreate_proxy": "/pgd/latest/reference/routing#bdrcreate_proxy", + "bdralter_proxy_option": "/pgd/latest/reference/routing#bdralter_proxy_option", + "bdrdrop_proxy": "/pgd/latest/reference/routing#bdrdrop_proxy", + "bdrrouting_leadership_transfer": "/pgd/latest/reference/routing#bdrrouting_leadership_transfer", + "cs.commit-scope-syntax": "/pgd/latest/reference/commit-scopes#commit-scope-syntax", + "cs.commit-scope-groups": "/pgd/latest/reference/commit-scopes#commit-scope-groups", + "cs.any": "/pgd/latest/reference/commit-scopes#any", + "cs.any-not": "/pgd/latest/reference/commit-scopes#any-not", + "cs.majority": "/pgd/latest/reference/commit-scopes#majority", + "cs.majority-not": "/pgd/latest/reference/commit-scopes#majority-not", + "cs.all": "/pgd/latest/reference/commit-scopes#all", + "cs.all-not": "/pgd/latest/reference/commit-scopes#all-not", + "cs.confirmation-level": "/pgd/latest/reference/commit-scopes#confirmation-level", + "cs.on-received": "/pgd/latest/reference/commit-scopes#on-received", + "cs.on-replicated": "/pgd/latest/reference/commit-scopes#on-replicated", + "cs.on-durable": "/pgd/latest/reference/commit-scopes#on-durable", + "cs.on-visible": "/pgd/latest/reference/commit-scopes#on-visible", + "cs.commit-scope-kinds": "/pgd/latest/reference/commit-scopes#commit-scope-kinds", + "cs.group-commit": "/pgd/latest/reference/commit-scopes#group-commit", + "cs.group-commit-parameters": "/pgd/latest/reference/commit-scopes#group-commit-parameters", + "cs.abort-on-parameters": "/pgd/latest/reference/commit-scopes#abort-on-parameters", + "cs.transaction_tracking-settings": "/pgd/latest/reference/commit-scopes#transaction_tracking-settings", + "cs.conflict_resolution-settings": "/pgd/latest/reference/commit-scopes#conflict_resolution-settings", + "cs.commit_decision-settings": "/pgd/latest/reference/commit-scopes#commit_decision-settings", + "cs.camo": "/pgd/latest/reference/commit-scopes#camo", + "cs.degrade-on-parameters": "/pgd/latest/reference/commit-scopes#degrade-on-parameters", + "cs.lag-control": "/pgd/latest/reference/commit-scopes#lag-control", + "cs.lag-control-parameters": "/pgd/latest/reference/commit-scopes#lag-control-parameters", + "cs.synchronous_commit": "/pgd/latest/reference/commit-scopes#synchronous_commit", + "conflict-detection": "/pgd/latest/reference/conflicts#conflict-detection", + "list-of-conflict-types": "/pgd/latest/reference/conflicts#list-of-conflict-types", + "conflict-resolution": "/pgd/latest/reference/conflicts#conflict-resolution", + "list-of-conflict-resolvers": "/pgd/latest/reference/conflicts#list-of-conflict-resolvers", + "default-conflict-resolvers": "/pgd/latest/reference/conflicts#default-conflict-resolvers", + "list-of-conflict-resolutions": "/pgd/latest/reference/conflicts#list-of-conflict-resolutions", + "conflict-logging": "/pgd/latest/reference/conflicts#conflict-logging", + "bdralter_table_conflict_detection": "/pgd/latest/reference/conflict_functions#bdralter_table_conflict_detection", + "bdralter_node_set_conflict_resolver": "/pgd/latest/reference/conflict_functions#bdralter_node_set_conflict_resolver", + "bdralter_node_set_log_config": "/pgd/latest/reference/conflict_functions#bdralter_node_set_log_config", + "bdrcreate_replication_set": "/pgd/latest/reference/repsets-management#bdrcreate_replication_set", + "bdralter_replication_set": "/pgd/latest/reference/repsets-management#bdralter_replication_set", + "bdrdrop_replication_set": "/pgd/latest/reference/repsets-management#bdrdrop_replication_set", + "bdralter_node_replication_sets": "/pgd/latest/reference/repsets-management#bdralter_node_replication_sets", + "bdrreplication_set_add_table": "/pgd/latest/reference/repsets-membership#bdrreplication_set_add_table", + "bdrreplication_set_remove_table": "/pgd/latest/reference/repsets-membership#bdrreplication_set_remove_table", + "bdrreplication_set_add_ddl_filter": "/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter", + "bdrreplication_set_remove_ddl_filter": "/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter", + "pgd_bench": "/pgd/latest/reference/testingandtuning#pgd_bench", + "bdralter_sequence_set_kind": "/pgd/latest/reference/sequences#bdralter_sequence_set_kind", + "bdrextract_timestamp_from_snowflakeid": "/pgd/latest/reference/sequences#bdrextract_timestamp_from_snowflakeid", + "bdrextract_nodeid_from_snowflakeid": "/pgd/latest/reference/sequences#bdrextract_nodeid_from_snowflakeid", + "bdrextract_localseqid_from_snowflakeid": "/pgd/latest/reference/sequences#bdrextract_localseqid_from_snowflakeid", + "bdrtimestamp_to_snowflakeid": "/pgd/latest/reference/sequences#bdrtimestamp_to_snowflakeid", + "bdrextract_timestamp_from_timeshard": "/pgd/latest/reference/sequences#bdrextract_timestamp_from_timeshard", + "bdrextract_nodeid_from_timeshard": "/pgd/latest/reference/sequences#bdrextract_nodeid_from_timeshard", + "bdrextract_localseqid_from_timeshard": "/pgd/latest/reference/sequences#bdrextract_localseqid_from_timeshard", + "bdrtimestamp_to_timeshard": "/pgd/latest/reference/sequences#bdrtimestamp_to_timeshard", + "bdrgen_ksuuid_v2": "/pgd/latest/reference/sequences#bdrgen_ksuuid_v2", + "bdrksuuid_v2_cmp": "/pgd/latest/reference/sequences#bdrksuuid_v2_cmp", + "bdrextract_timestamp_from_ksuuid_v2": "/pgd/latest/reference/sequences#bdrextract_timestamp_from_ksuuid_v2", + "bdrgen_ksuuid": "/pgd/latest/reference/sequences#bdrgen_ksuuid", + "bdruuid_v1_cmp": "/pgd/latest/reference/sequences#bdruuid_v1_cmp", + "bdrextract_timestamp_from_ksuuid": "/pgd/latest/reference/sequences#bdrextract_timestamp_from_ksuuid", + "bdrautopartition": "/pgd/latest/reference/autopartition#bdrautopartition", + "bdrdrop_autopartition": "/pgd/latest/reference/autopartition#bdrdrop_autopartition", + "bdrautopartition_wait_for_partitions": "/pgd/latest/reference/autopartition#bdrautopartition_wait_for_partitions", + "bdrautopartition_wait_for_partitions_on_all_nodes": "/pgd/latest/reference/autopartition#bdrautopartition_wait_for_partitions_on_all_nodes", + "bdrautopartition_find_partition": "/pgd/latest/reference/autopartition#bdrautopartition_find_partition", + "bdrautopartition_enable": "/pgd/latest/reference/autopartition#bdrautopartition_enable", + "bdrautopartition_disable": "/pgd/latest/reference/autopartition#bdrautopartition_disable", + "internal-functions": "/pgd/latest/reference/autopartition#internal-functions", + "bdrautopartition_create_partition": "/pgd/latest/reference/autopartition#bdrautopartition_create_partition", + "bdrautopartition_drop_partition": "/pgd/latest/reference/autopartition#bdrautopartition_drop_partition", + "bdrcreate_conflict_trigger": "/pgd/latest/reference/streamtriggers/interfaces#bdrcreate_conflict_trigger", + "bdrcreate_transform_trigger": "/pgd/latest/reference/streamtriggers/interfaces#bdrcreate_transform_trigger", + "bdrdrop_trigger": "/pgd/latest/reference/streamtriggers/interfaces#bdrdrop_trigger", + "bdrtrigger_get_row": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_row", + "bdrtrigger_get_committs": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_committs", + "bdrtrigger_get_xid": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_xid", + "bdrtrigger_get_type": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_type", + "bdrtrigger_get_conflict_type": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_conflict_type", + "bdrtrigger_get_origin_node_id": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_origin_node_id", + "bdrri_fkey_on_del_trigger": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrri_fkey_on_del_trigger", + "tg_name": "/pgd/latest/reference/streamtriggers/rowvariables#tg_name", + "tg_when": "/pgd/latest/reference/streamtriggers/rowvariables#tg_when", + "tg_level": "/pgd/latest/reference/streamtriggers/rowvariables#tg_level", + "tg_op": "/pgd/latest/reference/streamtriggers/rowvariables#tg_op", + "tg_relid": "/pgd/latest/reference/streamtriggers/rowvariables#tg_relid", + "tg_table_name": "/pgd/latest/reference/streamtriggers/rowvariables#tg_table_name", + "tg_table_schema": "/pgd/latest/reference/streamtriggers/rowvariables#tg_table_schema", + "tg_nargs": "/pgd/latest/reference/streamtriggers/rowvariables#tg_nargs", + "tg_argv": "/pgd/latest/reference/streamtriggers/rowvariables#tg_argv", + "bdrautopartition_partitions": "/pgd/latest/reference/catalogs-internal#bdrautopartition_partitions", + "bdrautopartition_rules": "/pgd/latest/reference/catalogs-internal#bdrautopartition_rules", + "bdrddl_epoch": "/pgd/latest/reference/catalogs-internal#bdrddl_epoch", + "bdrevent_history": "/pgd/latest/reference/catalogs-internal#bdrevent_history", + "bdrevent_summary": "/pgd/latest/reference/catalogs-internal#bdrevent_summary", + "bdrnode_config": "/pgd/latest/reference/catalogs-internal#bdrnode_config", + "bdrnode_config_summary": "/pgd/latest/reference/catalogs-internal#bdrnode_config_summary", + "bdrnode_group_config": "/pgd/latest/reference/catalogs-internal#bdrnode_group_config", + "bdrnode_group_routing_config_summary": "/pgd/latest/reference/catalogs-internal#bdrnode_group_routing_config_summary", + "bdrnode_group_routing_info": "/pgd/latest/reference/catalogs-internal#bdrnode_group_routing_info", + "bdrnode_group_routing_summary": "/pgd/latest/reference/catalogs-internal#bdrnode_group_routing_summary", + "bdrnode_routing_config_summary": "/pgd/latest/reference/catalogs-internal#bdrnode_routing_config_summary", + "bdrproxy_config": "/pgd/latest/reference/catalogs-internal#bdrproxy_config", + "bdrproxy_config_summary": "/pgd/latest/reference/catalogs-internal#bdrproxy_config_summary", + "bdrsequence_kind": "/pgd/latest/reference/catalogs-internal#bdrsequence_kind", + "bdrbdr_get_commit_decisions": "/pgd/latest/reference/functions-internal#bdrbdr_get_commit_decisions", + "bdrbdr_track_commit_decision": "/pgd/latest/reference/functions-internal#bdrbdr_track_commit_decision", + "bdrconsensus_kv_fetch": "/pgd/latest/reference/functions-internal#bdrconsensus_kv_fetch", + "bdrconsensus_kv_store": "/pgd/latest/reference/functions-internal#bdrconsensus_kv_store", + "bdrdecode_message_payload": "/pgd/latest/reference/functions-internal#bdrdecode_message_payload", + "bdrdecode_message_response_payload": "/pgd/latest/reference/functions-internal#bdrdecode_message_response_payload", + "bdrdifference_fix_origin_create": "/pgd/latest/reference/functions-internal#bdrdifference_fix_origin_create", + "bdrdifference_fix_session_reset": "/pgd/latest/reference/functions-internal#bdrdifference_fix_session_reset", + "bdrdifference_fix_session_setup": "/pgd/latest/reference/functions-internal#bdrdifference_fix_session_setup", + "bdrdifference_fix_xact_set_avoid_conflict": "/pgd/latest/reference/functions-internal#bdrdifference_fix_xact_set_avoid_conflict", + "bdrdrop_node": "/pgd/latest/reference/functions-internal#bdrdrop_node", + "bdrget_global_locks": "/pgd/latest/reference/functions-internal#bdrget_global_locks", + "bdrget_node_conflict_resolvers": "/pgd/latest/reference/functions-internal#bdrget_node_conflict_resolvers", + "bdrget_slot_flush_timestamp": "/pgd/latest/reference/functions-internal#bdrget_slot_flush_timestamp", + "bdrinternal_alter_sequence_set_kind": "/pgd/latest/reference/functions-internal#bdrinternal_alter_sequence_set_kind", + "bdrinternal_replication_set_add_table": "/pgd/latest/reference/functions-internal#bdrinternal_replication_set_add_table", + "bdrinternal_replication_set_remove_table": "/pgd/latest/reference/functions-internal#bdrinternal_replication_set_remove_table", + "bdrinternal_submit_join_request": "/pgd/latest/reference/functions-internal#bdrinternal_submit_join_request", + "bdrisolation_test_session_is_blocked": "/pgd/latest/reference/functions-internal#bdrisolation_test_session_is_blocked", + "bdrlocal_node_info": "/pgd/latest/reference/functions-internal#bdrlocal_node_info", + "bdrmsgb_connect": "/pgd/latest/reference/functions-internal#bdrmsgb_connect", + "bdrmsgb_deliver_message": "/pgd/latest/reference/functions-internal#bdrmsgb_deliver_message", + "bdrnode_catchup_state_name": "/pgd/latest/reference/functions-internal#bdrnode_catchup_state_name", + "bdrnode_kind_name": "/pgd/latest/reference/functions-internal#bdrnode_kind_name", + "bdrpeer_state_name": "/pgd/latest/reference/functions-internal#bdrpeer_state_name", + "bdrpg_xact_origin": "/pgd/latest/reference/functions-internal#bdrpg_xact_origin", + "bdrrequest_replay_progress_update": "/pgd/latest/reference/functions-internal#bdrrequest_replay_progress_update", + "bdrreset_relation_stats": "/pgd/latest/reference/functions-internal#bdrreset_relation_stats", + "bdrreset_subscription_stats": "/pgd/latest/reference/functions-internal#bdrreset_subscription_stats", + "bdrresynchronize_table_from_node": "/pgd/latest/reference/functions-internal#bdrresynchronize_table_from_node", + "bdrseq_currval": "/pgd/latest/reference/functions-internal#bdrseq_currval", + "bdrseq_lastval": "/pgd/latest/reference/functions-internal#bdrseq_lastval", + "bdrseq_nextval": "/pgd/latest/reference/functions-internal#bdrseq_nextval", + "bdrshow_subscription_status": "/pgd/latest/reference/functions-internal#bdrshow_subscription_status", + "bdrshow_workers": "/pgd/latest/reference/functions-internal#bdrshow_workers", + "bdrshow_writers": "/pgd/latest/reference/functions-internal#bdrshow_writers", + "bdrtaskmgr_set_leader": "/pgd/latest/reference/functions-internal#bdrtaskmgr_set_leader", + "bdrtaskmgr_get_last_completed_workitem": "/pgd/latest/reference/functions-internal#bdrtaskmgr_get_last_completed_workitem", + "bdrtaskmgr_work_queue_check_status": "/pgd/latest/reference/functions-internal#bdrtaskmgr_work_queue_check_status", + "bdrpglogical_proto_version_ranges": "/pgd/latest/reference/functions-internal#bdrpglogical_proto_version_ranges", + "bdrget_min_required_replication_slots": "/pgd/latest/reference/functions-internal#bdrget_min_required_replication_slots", + "bdrget_min_required_worker_processes": "/pgd/latest/reference/functions-internal#bdrget_min_required_worker_processes", + "bdrstat_get_activity": "/pgd/latest/reference/functions-internal#bdrstat_get_activity", + "bdrworker_role_id_name": "/pgd/latest/reference/functions-internal#bdrworker_role_id_name", + "bdrlag_history": "/pgd/latest/reference/functions-internal#bdrlag_history", + "bdrget_raft_instance_by_nodegroup": "/pgd/latest/reference/functions-internal#bdrget_raft_instance_by_nodegroup", + "bdrmonitor_camo_on_all_nodes": "/pgd/latest/reference/functions-internal#bdrmonitor_camo_on_all_nodes", + "bdrmonitor_raft_details_on_all_nodes": "/pgd/latest/reference/functions-internal#bdrmonitor_raft_details_on_all_nodes", + "bdrmonitor_replslots_details_on_all_nodes": "/pgd/latest/reference/functions-internal#bdrmonitor_replslots_details_on_all_nodes", + "bdrmonitor_subscription_details_on_all_nodes": "/pgd/latest/reference/functions-internal#bdrmonitor_subscription_details_on_all_nodes", + "bdrmonitor_version_details_on_all_nodes": "/pgd/latest/reference/functions-internal#bdrmonitor_version_details_on_all_nodes", + "bdrnode_group_member_info": "/pgd/latest/reference/functions-internal#bdrnode_group_member_info", + "bdrcolumn_timestamps_create": "/pgd/latest/reference/clcd#bdrcolumn_timestamps_create" +} diff --git a/product_docs/docs/pge/13/release_notes/index.mdx b/product_docs/docs/pge/13/release_notes/index.mdx index 9cb122c248a..f92750561f1 100644 --- a/product_docs/docs/pge/13/release_notes/index.mdx +++ b/product_docs/docs/pge/13/release_notes/index.mdx @@ -4,6 +4,14 @@ navTitle: Release notes description: Release notes for EDB Postgres Extended Server 13. --- +## 2ndQuadrant Postgres 13.18 + +Release date: 2024-11-21 + +This release is primarily an upstream merge. + +Merged with community PostgreSQL 13.18. See the [PostgreSQL 13.18 Release Notes](https://www.postgresql.org/docs/13/release-13-18.html) for more information. + ## 2ndQuadrant Postgres 13.16.1r1.1.19 Release date: 2024-08-22 diff --git a/product_docs/docs/pge/14/release_notes/index.mdx b/product_docs/docs/pge/14/release_notes/index.mdx index 43c04bd5fba..b571d80f2e3 100644 --- a/product_docs/docs/pge/14/release_notes/index.mdx +++ b/product_docs/docs/pge/14/release_notes/index.mdx @@ -4,6 +4,14 @@ navTitle: Release notes description: Release notes for EDB Postgres Extended Server 14. --- +## EDB Postgres Extended Server 14.15 + +Release date: 2024-11-21 + +This release is primarily an upstream merge. + +Merged with community PostgreSQL 14.15. See the [PostgreSQL 14.15 Release Notes](https://www.postgresql.org/docs/14/release-14-15.html) for more information. + ## EDB Postgres Extended Server 14.13.1 Release date: 2024-08-22 diff --git a/product_docs/docs/pge/15/installing/index.mdx b/product_docs/docs/pge/15/installing/index.mdx index db0991e6253..3d95d0b0b15 100644 --- a/product_docs/docs/pge/15/installing/index.mdx +++ b/product_docs/docs/pge/15/installing/index.mdx @@ -6,6 +6,7 @@ description: Installation instructions for EDB Postgres Extended Server on Linux navigation: - linux_x86_64 + - linux_arm64 --- Select a link to access the applicable installation instructions: @@ -27,3 +28,11 @@ Select a link to access the applicable installation instructions: - [Ubuntu 22.04](linux_x86_64/pge_ubuntu_22), [Ubuntu 20.04](linux_x86_64/pge_ubuntu_20) - [Debian 11](linux_x86_64/pge_debian_11) + +## Linux [AArch64 (ARM64)](linux_arm64) + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/pge_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/pge_rhel_9) diff --git a/product_docs/docs/pge/15/installing/linux_arm64/index.mdx b/product_docs/docs/pge/15/installing/linux_arm64/index.mdx new file mode 100644 index 00000000000..5827df314c9 --- /dev/null +++ b/product_docs/docs/pge/15/installing/linux_arm64/index.mdx @@ -0,0 +1,15 @@ +--- +title: "Installing EDB Postgres Extended Server on Linux AArch64 (ARM64)" +navTitle: "On Linux ARM64" + +navigation: + - pge_rhel_9 +--- + +Operating system-specific install instructions are described in the corresponding documentation: + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](pge_rhel_9) + +- [Oracle Linux (OL) 9](pge_rhel_9) diff --git a/product_docs/docs/pge/15/installing/linux_arm64/pge_rhel_9.mdx b/product_docs/docs/pge/15/installing/linux_arm64/pge_rhel_9.mdx new file mode 100644 index 00000000000..3b793cc0af9 --- /dev/null +++ b/product_docs/docs/pge/15/installing/linux_arm64/pge_rhel_9.mdx @@ -0,0 +1,129 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing EDB Postgres Extended Server on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +## Install the package + +```shell +sudo dnf -y install edb-postgresextended15-server edb-postgresextended15-contrib +``` + +## Initial configuration + +Getting started with your cluster involves logging in, ensuring the installation and initial configuration was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-pge-15-setup` script creates a cluster. + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/edb/pge15/bin/edb-pge-15-setup initdb + +sudo systemctl start edb-pge-15 +``` + +To work in your cluster, log in as the postgres user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo -iu postgres + +psql postgres +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, postgres. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../administration/01_setting_configuration_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE postgres with PASSWORD 'password'; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +You are now connected to database "hr" as user "postgres". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/pge/15/release_notes/index.mdx b/product_docs/docs/pge/15/release_notes/index.mdx index 7cbffcbd0ad..581c0ba8e02 100644 --- a/product_docs/docs/pge/15/release_notes/index.mdx +++ b/product_docs/docs/pge/15/release_notes/index.mdx @@ -1,6 +1,7 @@ --- title: "Release notes" navigation: + - rel_notes15.10 - rel_notes15.8.1 - rel_notes15.8 - rel_notes15.7 @@ -14,16 +15,17 @@ The EDB Postgres Extended Server documentation describes the latest version of EDB Postgres Extended Server 15, including minor releases and patches. These release notes cover what was new in each release. -| Version | Release date | -|-----------------------|--------------| +| Version | Release date | +|---------------------------|--------------| +| [15.10](rel_notes15.10) | 21 Nov 2024 | | [15.8.1](rel_notes15.8.1) | 22 Aug 2024 | -| [15.8](rel_notes15.8) | 08 Aug 2024 | -| [15.7](rel_notes15.7) | 09 May 2024 | -| [15.6](rel_notes15.6) | 08 Feb 2024 | -| [15.5](rel_notes15.5) | 09 Nov 2023 | -| [15.4](rel_notes15.4) | 21 Aug 2023 | -| [15.3](rel_notes15.3) | 11 May 2023 | -| [15.2](rel_notes15.2) | 14 Feb 2023 | +| [15.8](rel_notes15.8) | 08 Aug 2024 | +| [15.7](rel_notes15.7) | 09 May 2024 | +| [15.6](rel_notes15.6) | 08 Feb 2024 | +| [15.5](rel_notes15.5) | 09 Nov 2023 | +| [15.4](rel_notes15.4) | 21 Aug 2023 | +| [15.3](rel_notes15.3) | 11 May 2023 | +| [15.2](rel_notes15.2) | 14 Feb 2023 | diff --git a/product_docs/docs/pge/15/release_notes/rel_notes15.10.mdx b/product_docs/docs/pge/15/release_notes/rel_notes15.10.mdx new file mode 100644 index 00000000000..fb3bc8cdbf0 --- /dev/null +++ b/product_docs/docs/pge/15/release_notes/rel_notes15.10.mdx @@ -0,0 +1,12 @@ +--- +title: "EDB Postgres Extended Server 15.10 release notes" +navTitle: Version 15.10 +--- + +Released: 21 Nov 2024 + +New features, enhancements, bug fixes, and other changes in EDB Postgres Extended Server 15.10 include: + +| Type | Description | Ticket | +|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------| +| Upstream merge | Merged with community PostgreSQL 15.10. This release includes a fix for [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/). See the [PostgreSQL 15.10 Release Notes](https://www.postgresql.org/docs/15/release-15-10.html) for more information. | [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/) | diff --git a/product_docs/docs/pge/16/installing/index.mdx b/product_docs/docs/pge/16/installing/index.mdx index 0cc88e55d5e..4883cc3bb07 100644 --- a/product_docs/docs/pge/16/installing/index.mdx +++ b/product_docs/docs/pge/16/installing/index.mdx @@ -31,6 +31,12 @@ Select a link to access the applicable installation instructions: ## Linux [AArch64 (ARM64)](linux_arm64) +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/pge_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/pge_rhel_9) + ### Debian and derivatives - [Debian 12](linux_arm64/pge_debian_12) diff --git a/product_docs/docs/pge/16/installing/linux_arm64/index.mdx b/product_docs/docs/pge/16/installing/linux_arm64/index.mdx index 230f6000758..04d27036fdd 100644 --- a/product_docs/docs/pge/16/installing/linux_arm64/index.mdx +++ b/product_docs/docs/pge/16/installing/linux_arm64/index.mdx @@ -3,11 +3,18 @@ title: "Installing EDB Postgres Extended Server on Linux AArch64 (ARM64)" navTitle: "On Linux ARM64" navigation: + - pge_rhel_9 - pge_debian_12 --- Operating system-specific install instructions are described in the corresponding documentation: +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](pge_rhel_9) + +- [Oracle Linux (OL) 9](pge_rhel_9) + ### Debian and derivatives - [Debian 12](pge_debian_12) diff --git a/product_docs/docs/pge/16/installing/linux_arm64/pge_rhel_9.mdx b/product_docs/docs/pge/16/installing/linux_arm64/pge_rhel_9.mdx new file mode 100644 index 00000000000..3b9f60bc7ec --- /dev/null +++ b/product_docs/docs/pge/16/installing/linux_arm64/pge_rhel_9.mdx @@ -0,0 +1,129 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing EDB Postgres Extended Server on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +## Install the package + +```shell +sudo dnf -y install edb-postgresextended16-server edb-postgresextended16-contrib +``` + +## Initial configuration + +Getting started with your cluster involves logging in, ensuring the installation and initial configuration was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-pge-16-setup` script creates a cluster. + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/edb/pge16/bin/edb-pge-16-setup initdb + +sudo systemctl start edb-pge-16 +``` + +To work in your cluster, log in as the postgres user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo -iu postgres + +psql postgres +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, postgres. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../administration/01_setting_configuration_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE postgres with PASSWORD 'password'; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +You are now connected to database "hr" as user "postgres". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/pge/16/release_notes/index.mdx b/product_docs/docs/pge/16/release_notes/index.mdx index a7907266d63..148db547aa4 100644 --- a/product_docs/docs/pge/16/release_notes/index.mdx +++ b/product_docs/docs/pge/16/release_notes/index.mdx @@ -2,6 +2,7 @@ title: "Release notes" description: "Release notes for EDB Postgres Extended Server 16." navigation: + - rel_notes16.6 - rel_notes16.4.1 - rel_notes16.4 - rel_notes16.3 @@ -14,6 +15,7 @@ cover what was new in each release. | Version | Release date | |---------------------------|--------------| +| [16.6](rel_notes16.6) | 21 Nov 2024 | | [16.4.1](rel_notes16.4.1) | 22 Aug 2024 | | [16.4](rel_notes16.4) | 08 Aug 2024 | | [16.3](rel_notes16.3) | 09 May 2024 | diff --git a/product_docs/docs/pge/16/release_notes/rel_notes16.6.mdx b/product_docs/docs/pge/16/release_notes/rel_notes16.6.mdx new file mode 100644 index 00000000000..d63003701d5 --- /dev/null +++ b/product_docs/docs/pge/16/release_notes/rel_notes16.6.mdx @@ -0,0 +1,12 @@ +--- +title: EDB Postgres Extended Server 16.6 release notes +navTitle: "Version 16.6" +--- + +Released: 21 Nov 2024 + +EDB Postgres Extended Server 16.6 includes the following enhancements and bug fixes: + +| Type | Description | Ticket | +|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------| +| Upstream merge | Merged with community PostgreSQL 16.6. This release includes a fix for [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/). See the [PostgreSQL 16.6 Release Notes](https://www.postgresql.org/docs/16/release-16-6.html) for more information. | [CVE-2024-10978](https://www.postgresql.org/support/security/CVE-2024-10978/) | diff --git a/product_docs/docs/pge/17/administration/01_setting_configuration_parameters.mdx b/product_docs/docs/pge/17/administration/01_setting_configuration_parameters.mdx new file mode 100644 index 00000000000..6a0cd8e19c1 --- /dev/null +++ b/product_docs/docs/pge/17/administration/01_setting_configuration_parameters.mdx @@ -0,0 +1,95 @@ +--- +title: "Setting configuration parameters" +navTitle: "Setting configuration parameters" +description: "Describes how to set the configuration parameters for EDB Postgres Extended Server." +--- + +Set each configuration parameter using a name/value pair. Parameter names aren't case sensitive. The parameter name is typically separated from its value by an optional equals sign (`=`). + +This example shows some configuration parameter settings in the `postgresql.conf` file: + +```ini +# This is a comment +log_connections = yes +log_destination = 'syslog' +search_path = '"$user", public' +shared_buffers = 128MB +``` + +## Types of parameter values + +Parameter values are specified as one of five types: + +- **Boolean** — Acceptable values are `on`, `off`, `true`, `false`, `yes`, `no`, `1`, `0`, or any unambiguous prefix of these. +- **Integer** — Number without a fractional part. +- **Floating point** — Number with an optional fractional part separated by a decimal point. +- **String** — Text value enclosed in single quotes if the value isn't a simple identifier or number, that is, the value contains special characters such as spaces or other punctuation marks. +- **Enum** — Specific set of string values. The allowed values can be found in the system view `pg_settings.enumvals`. Enum values are not case sensitive. + +Some settings specify a memory or time value. Each of these has an implicit unit, which is kilobytes, blocks (typically 8 kilobytes), milliseconds, seconds, or minutes. You can find default units by referencing the system view `pg_settings.unit`. You can specify a different unit explicitly. + +Valid memory units are: +- `kB` (kilobytes) +- `MB` (megabytes) +- `GB` (gigabytes). + +Valid time units are: +- `ms` (milliseconds) +- `s` (seconds) +- `min` (minutes) +- `h` (hours) +- `d` (days). + +The multiplier for memory units is 1024. + +## Specifying configuration parameter settings + +A number of parameter settings are set when the EDB Postgres Extended Server database product is built. These are read-only parameters, and you can't change their values. A couple of parameters are also permanently set for each database when the database is created. These parameters are read-only and you can't later change them for the database. However, there are a number of ways to specify the configuration parameter settings: + +- The initial settings for almost all configurable parameters across the entire database cluster are listed in the `postgresql.conf` configuration file. These settings are put into effect upon database server start or restart. You can override some of these initial parameter settings. All configuration parameters have built-in default settings that are in effect unless you explicitly override them. + +- Configuration parameters in the `postgresql.conf` file are overridden when the same parameters are included in the `postgresql.auto.conf` file. Use the `ALTER SYSTEM` command to manage the configuration parameters in the `postgresql.auto.conf` file. + +- You can modify parameter settings in the configuration file while the database server is running. If the configuration file is then reloaded (meaning a SIGHUP signal is issued), for certain parameter types, the changed parameters settings immediately take effect. For some of these parameter types, the new settings are available in a currently running session immediately after the reload. For others, you must start a new session to use the new settings. And for some others, modified settings don't take effect until the database server is stopped and restarted. See the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/config-setting.html) for information on how to reload the configuration file. + +- You can use the SQL commands `ALTER DATABASE`, `ALTER ROLE`, or `ALTER ROLE IN DATABASE` to modify certain parameter settings. The modified parameter settings take effect for new sessions after you execute the command. `ALTER DATABASE` affects new sessions connecting to the specified database. `ALTER ROLE` affects new sessions started by the specified role. `ALTER ROLE IN DATABASE` affects new sessions started by the specified role connecting to the specified database. Parameter settings established by these SQL commands remain in effect indefinitely, across database server restarts, overriding settings established by the other methods. You can change parameter settings established using the `ALTER DATABASE`, `ALTER ROLE`, or `ALTER ROLE IN DATABASE` commands by either: + + - Reissuing these commands with a different parameter value. + + - Issuing these commands using the `SET parameter TO DEFAULT` clause or the `RESET parameter` clause. These clauses change the parameter back to using the setting set by the other methods. See the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/sql-commands.html) for the syntax of these SQL commands. + +- You can make changes for certain parameter settings for the duration of individual sessions using the `PGOPTIONS` environment variable or by using the `SET` command in the EDB-PSQL or PSQL command-line programs. Parameter settings made this way override settings established using any of the methods discussed earlier, but only during that session. + +## Modifying the postgresql.conf file + +The configuration parameters in the `postgresql.conf` file specify server behavior with regard to auditing, authentication, encryption, and other behaviors. On Linux and Windows hosts, the `postgresql.conf` file resides in the `data` directory under your EDB Postgres Extended Server installation. + +Parameters that are preceded by a pound sign (#) are set to their default value. To change a parameter value, remove the pound sign and enter a new value. After setting or changing a parameter, you must either `reload` or `restart` the server for the new parameter value to take effect. + +In the `postgresql.conf` file, some parameters contain comments that indicate `change requires restart`. To view a list of the parameters that require a server restart, use the following query at the psql command line: + +```sql +SELECT name FROM pg_settings WHERE context = 'postmaster'; +``` + + + +## Modifying the pg_hba.conf file + +Appropriate authentication methods provide protection and security. Entries in the `pg_hba.conf` file specify the authentication methods that the server uses with connecting clients. Before connecting to the server, you might need to modify the authentication properties specified in the `pg_hba.conf` file. + +When you invoke the initdb utility to create a cluster, the utility creates a `pg_hba.conf` file for that cluster that specifies the type of authentication required from connecting clients. You can modify this file. After modifying the authentication settings in the `pg_hba.conf` file, restart the server and apply the changes. For more information about authentication and modifying the `pg_hba.conf` file, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html). + +When the server receives a connection request, it verifies the credentials provided against the authentication settings in the `pg_hba.conf` file before allowing a connection to a database. To log the `pg_hba.conf` file entry to authenticate a connection to the server, set the `log_connections` parameter to `ON` in the `postgresql.conf` file. + +A record specifies a connection type, database name, user name, client IP address, and the authentication method to authorize a connection upon matching these parameters in the `pg_hba.conf` file. Once the connection to a server is authorized, you can see the matched line number and the authentication record from the `pg_hba.conf` file. + +This example shows a log detail for a valid `pg_hba.conf` entry after successful authentication: + +```shell +2020-05-08 10:42:17 IST LOG: connection received: host=[local] +2020-05-08 10:42:17 IST LOG: connection authorized: user=u1 database=edb +application_name=psql +2020-05-08 10:42:17 IST DETAIL: Connection matched pg_hba.conf line 84: +"local all all md5" +``` diff --git a/product_docs/docs/pge/17/administration/index.mdx b/product_docs/docs/pge/17/administration/index.mdx new file mode 100644 index 00000000000..08b4255196f --- /dev/null +++ b/product_docs/docs/pge/17/administration/index.mdx @@ -0,0 +1,16 @@ +--- +title: "Database configuration" +description: "How to configure EDB Postgres Extended Server databases." +navigation: +- 01_configuration_parameters +--- + +EDB Postgres Extended Server includes features to help you to maintain, secure, and operate EDB Postgres Extended Server databases. + +You can configure grand unified configuration (GUC) parameters at runtime by modifying the `postgresql.conf` and `pg_hba.conf` files. + +- The `postgresql.conf` file allows you to make persistent changes to your database configuration. +- The `pg_hba.conf` file allows you to change access and authentication settings. + +See [Setting configuration parameters](01_setting_configuration_parameters) for more information. + diff --git a/product_docs/docs/pge/17/deploy_options.mdx b/product_docs/docs/pge/17/deploy_options.mdx new file mode 100644 index 00000000000..7499961f6ff --- /dev/null +++ b/product_docs/docs/pge/17/deploy_options.mdx @@ -0,0 +1,14 @@ +--- + +title: Deployment options +originalFilePath: index.md +description: Deployment options available for EDB Postgres Extended Server. +--- + +The deployment options include: + +- [Installing](installing) on a virtual machine or physical server using native packages + +- Deploying it with [EDB Postgres Distributed](/pgd/latest/) using [Trusted Postgres Architect](/pgd/latest/deploy-config/deploy-tpa/) + +- Deploying it on [EDB Postgres AI Cloud Service](/edb-postgres-ai/cloud-service/) with extreme-high-availability cluster types diff --git a/product_docs/docs/pge/17/extensions.mdx b/product_docs/docs/pge/17/extensions.mdx new file mode 100644 index 00000000000..86bf83bce1d --- /dev/null +++ b/product_docs/docs/pge/17/extensions.mdx @@ -0,0 +1,13 @@ +--- +navTitle: Extensions +title: Postgres extensions supported in EDB Postgres Extended Server +description: Postgres extensions supported in EDB Postgres Extended Server. +--- + +EDB provides support for several Postgres extensions on EDB Postgres Extended Server: + +- Open-source extensions +- EDB supported open-source extensions +- EDB-developed extensions + +See [Postgres extensions available by deployment](/pg_extensions/) for an overview of all supported extensions and links to their documentation sites. \ No newline at end of file diff --git a/product_docs/docs/pge/17/index.mdx b/product_docs/docs/pge/17/index.mdx new file mode 100644 index 00000000000..daaeaa72cb7 --- /dev/null +++ b/product_docs/docs/pge/17/index.mdx @@ -0,0 +1,34 @@ +--- +title: EDB Postgres Extended Server +originalFilePath: index.md +navigation: + - release_notes + - "#Get Started" + - deploy_options + - installing + - administration + - "#Features" + - tde + - replication + - extensions + - "#Upgrade" + - upgrading + - "#Reference" + - parameters + - sql_features + - operation +indexCards: simple +--- + +EDB Postgres Extended Server is a Postgres database server distribution built on open-source, community PostgreSQL. It's fully compatible with PostgreSQL. If you have applications written and tested to work with PostgreSQL, they will behave the same with EDB Postgres Extended Server. We will support and fix any functionality or behavior differences between community PostgreSQL and EDB Postgres Extended Server. + +EDB Postgres Extended Server's primary purpose is to extend PostgreSQL with a limited number of features that can't be implemented as extensions, such as [enhanced replication optimization](replication) used by [EDB Postgres Distributed](/pgd/latest/) and [Transparent Data Encryption](/tde/latest/), while maintaining parity in other respects. + +Additional value-add enterprise features include: +- Security though [Transparent Data Encryption](/tde/latest/) + +- Optional [SQL superset](sql_features) to community PostgreSQL +- [WAL pacing delays to avoid flooding transaction logs](./operation/#avoid-flooding-transaction-logs) +- [Additional tracing and diagnostics options](./operation/#additional-tracing-and-diagnostics-options) + +--- diff --git a/product_docs/docs/pge/17/installing/component_locations.mdx b/product_docs/docs/pge/17/installing/component_locations.mdx new file mode 100644 index 00000000000..4c4a1f70932 --- /dev/null +++ b/product_docs/docs/pge/17/installing/component_locations.mdx @@ -0,0 +1,43 @@ +--- +title: Default component locations +navTitle: Component locations +description: "Provides information about accessing EDB Postgres Extended Server components after installation." +--- + +The package managers for the various Linux variations install EDB Postgres Extended Server components in different locations. If you need to access the components after installation, see: + +- [RHEL/OL/Rocky Linux/AlmaLinux/CentOS/SLES locations](#rhelolrocky-linuxalmalinuxcentossles-locations) +- [Debian/Ubuntu locations](#debianubuntu-locations) + +## RHEL/OL/Rocky Linux/AlmaLinux/CentOS/SLES Locations + +The RPM installers place EDB Postgres Extended Server components in the directories listed in the table. + +| Component | Location | +|-----------------------------|--------------------------------| +| Executables | `/usr/edb/pge17/bin` | +| Libraries | `/usr/edb/pge17/lib` | +| Cluster configuration files | `/var/lib/edb-pge/17` | +| Documentation | `/usr/edb/pge17/share/man` | +| Contrib | `/usr/edb/pge17/share/contrib` | +| Data | `/var/lib/edb-pge/17/data` | +| Logs | `/var/log/edb/pge17` | +| Lock files | `/var/lock/edb/pge17` | +| Backup area | `/var/lib/edb-pge/17/backups` | +| Templates | `/usr/edb/pge17/share` | +| Procedural Languages | `/usr/edb/pge17/lib` | +| Development Headers | `/usr/edb/pge17/include` | +| Shared data | `/usr/edb/pge17/share` | + +## Debian/Ubuntu Locations + +The Debian package manager places EDB Postgres Extended Server components in the directories listed in the table. + +| Component | Location | +|-----------------------------|------------------------------------| +| Executables | `/usr/lib/edb-pge/17/bin` | +| Libraries | `/usr/lib/edb-pge/17/lib` | +| Cluster configuration files | `/var/lib/edb-pge/17/main` | +| Data | `/var/lib/edb-pge/17/main` | +| Logs | `/var/log/edb-pge/` | +| Lock files | `/var/lock/edb/pge17` | diff --git a/product_docs/docs/pge/17/installing/index.mdx b/product_docs/docs/pge/17/installing/index.mdx new file mode 100644 index 00000000000..61a8900ad8e --- /dev/null +++ b/product_docs/docs/pge/17/installing/index.mdx @@ -0,0 +1,42 @@ +--- +navTitle: Installing +title: Installing EDB Postgres Extended Server on Linux + +description: Installation instructions for EDB Postgres Extended Server on Linux. + +navigation: + - linux_x86_64 + - linux_arm64 +--- + +Select a link to access the applicable installation instructions: + +## Linux [x86-64 (amd64)](linux_x86_64) + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_x86_64/pge_rhel_9), [RHEL 8](linux_x86_64/pge_rhel_8) + +- [Oracle Linux (OL) 9](linux_x86_64/pge_rhel_9), [Oracle Linux (OL) 8](linux_x86_64/pge_rhel_8) + +- [Rocky Linux 9](linux_x86_64/pge_other_linux_9), [Rocky Linux 8](linux_x86_64/pge_other_linux_8) + +- [AlmaLinux 9](linux_x86_64/pge_other_linux_9), [AlmaLinux 8](linux_x86_64/pge_other_linux_8) + +### Debian and derivatives + +- [Ubuntu 22.04](linux_x86_64/pge_ubuntu_22) + +- [Debian 12](linux_x86_64/pge_debian_12), [Debian 11](linux_x86_64/pge_debian_11) + +## Linux [AArch64 (ARM64)](linux_arm64) + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/pge_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/pge_rhel_9) + +### Debian and derivatives + +- [Debian 12](linux_arm64/pge_debian_12) diff --git a/product_docs/docs/pge/17/installing/linux_arm64/index.mdx b/product_docs/docs/pge/17/installing/linux_arm64/index.mdx new file mode 100644 index 00000000000..04d27036fdd --- /dev/null +++ b/product_docs/docs/pge/17/installing/linux_arm64/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Installing EDB Postgres Extended Server on Linux AArch64 (ARM64)" +navTitle: "On Linux ARM64" + +navigation: + - pge_rhel_9 + - pge_debian_12 +--- + +Operating system-specific install instructions are described in the corresponding documentation: + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](pge_rhel_9) + +- [Oracle Linux (OL) 9](pge_rhel_9) + +### Debian and derivatives + +- [Debian 12](pge_debian_12) diff --git a/product_docs/docs/pge/17/installing/linux_arm64/pge_debian_12.mdx b/product_docs/docs/pge/17/installing/linux_arm64/pge_debian_12.mdx new file mode 100644 index 00000000000..c3832d6fa55 --- /dev/null +++ b/product_docs/docs/pge/17/installing/linux_arm64/pge_debian_12.mdx @@ -0,0 +1,124 @@ +--- +navTitle: Debian 12 +title: Installing EDB Postgres Extended Server on Debian 12 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `apt-cache search enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +## Install the package + +```shell +sudo apt-get -y install edb-postgresextended-17 +``` + +## Initial configuration + +This section steps you through getting started with your cluster including logging in, ensuring the installation was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-pge-17-setup` script creates a cluster. + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/lib/edb-pge/17/bin/edb-pge-17-setup initdb + +sudo systemctl start edb-pge-17 +``` + +To work in your cluster, log in as the postgres user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo -iu postgres + +psql postgres +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, postgres. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../administration/01_setting_configuration_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE postgres with PASSWORD 'password'; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +You are now connected to database "hr" as user "postgres". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/pge/17/installing/linux_arm64/pge_rhel_9.mdx b/product_docs/docs/pge/17/installing/linux_arm64/pge_rhel_9.mdx new file mode 100644 index 00000000000..37ed943c9e6 --- /dev/null +++ b/product_docs/docs/pge/17/installing/linux_arm64/pge_rhel_9.mdx @@ -0,0 +1,129 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing EDB Postgres Extended Server on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +## Install the package + +```shell +sudo dnf -y install edb-postgresextended17-server edb-postgresextended17-contrib +``` + +## Initial configuration + +Getting started with your cluster involves logging in, ensuring the installation and initial configuration was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-pge-17-setup` script creates a cluster. + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/edb/pge17/bin/edb-pge-17-setup initdb + +sudo systemctl start edb-pge-17 +``` + +To work in your cluster, log in as the postgres user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo -iu postgres + +psql postgres +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, postgres. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../administration/01_setting_configuration_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE postgres with PASSWORD 'password'; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +You are now connected to database "hr" as user "postgres". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/pge/17/installing/linux_x86_64/index.mdx b/product_docs/docs/pge/17/installing/linux_x86_64/index.mdx new file mode 100644 index 00000000000..6e773b54920 --- /dev/null +++ b/product_docs/docs/pge/17/installing/linux_x86_64/index.mdx @@ -0,0 +1,41 @@ +--- +title: "Installing EDB Postgres Extended Server on Linux x86 (amd64)" +navTitle: "On Linux x86" + +navigation: + - pge_rhel_9 + - pge_rhel_8 + - pge_other_linux_9 + - pge_other_linux_8 + - pge_ubuntu_22 + - pge_debian_12 + - pge_debian_11 +--- + +Operating system-specific install instructions are described in the corresponding documentation: + +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](pge_rhel_9) + +- [RHEL 8](pge_rhel_8) + +- [Oracle Linux (OL) 9](pge_rhel_9) + +- [Oracle Linux (OL) 8](pge_rhel_8) + +- [Rocky Linux 9](pge_other_linux_9) + +- [Rocky Linux 8](pge_other_linux_8) + +- [AlmaLinux 9](pge_other_linux_9) + +- [AlmaLinux 8](pge_other_linux_8) + +### Debian and derivatives + +- [Ubuntu 22.04](pge_ubuntu_22) + +- [Debian 12](pge_debian_12) + +- [Debian 11](pge_debian_11) diff --git a/product_docs/docs/pge/17/installing/linux_x86_64/pge_debian_11.mdx b/product_docs/docs/pge/17/installing/linux_x86_64/pge_debian_11.mdx new file mode 100644 index 00000000000..15ead90f66e --- /dev/null +++ b/product_docs/docs/pge/17/installing/linux_x86_64/pge_debian_11.mdx @@ -0,0 +1,124 @@ +--- +navTitle: Debian 11 +title: Installing EDB Postgres Extended Server on Debian 11 x86_64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `apt-cache search enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +## Install the package + +```shell +sudo apt-get -y install edb-postgresextended-17 +``` + +## Initial configuration + +This section steps you through getting started with your cluster including logging in, ensuring the installation was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-pge-17-setup` script creates a cluster. + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/lib/edb-pge/17/bin/edb-pge-17-setup initdb + +sudo systemctl start edb-pge-17 +``` + +To work in your cluster, log in as the postgres user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo -iu postgres + +psql postgres +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, postgres. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../administration/01_setting_configuration_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE postgres with PASSWORD 'password'; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +You are now connected to database "hr" as user "postgres". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/pge/17/installing/linux_x86_64/pge_debian_12.mdx b/product_docs/docs/pge/17/installing/linux_x86_64/pge_debian_12.mdx new file mode 100644 index 00000000000..9f268a516ff --- /dev/null +++ b/product_docs/docs/pge/17/installing/linux_x86_64/pge_debian_12.mdx @@ -0,0 +1,124 @@ +--- +navTitle: Debian 12 +title: Installing EDB Postgres Extended Server on Debian 12 x86_64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `apt-cache search enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +## Install the package + +```shell +sudo apt-get -y install edb-postgresextended-17 +``` + +## Initial configuration + +This section steps you through getting started with your cluster including logging in, ensuring the installation was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-pge-17-setup` script creates a cluster. + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/lib/edb-pge/17/bin/edb-pge-17-setup initdb + +sudo systemctl start edb-pge-17 +``` + +To work in your cluster, log in as the postgres user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo -iu postgres + +psql postgres +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, postgres. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../administration/01_setting_configuration_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE postgres with PASSWORD 'password'; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +You are now connected to database "hr" as user "postgres". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/pge/17/installing/linux_x86_64/pge_other_linux_8.mdx b/product_docs/docs/pge/17/installing/linux_x86_64/pge_other_linux_8.mdx new file mode 100644 index 00000000000..7747051e4fd --- /dev/null +++ b/product_docs/docs/pge/17/installing/linux_x86_64/pge_other_linux_8.mdx @@ -0,0 +1,135 @@ +--- +navTitle: AlmaLinux 8 or Rocky Linux 8 +title: Installing EDB Postgres Extended Server on AlmaLinux 8 or Rocky Linux 8 x86_64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + + ```shell + sudo dnf -y install epel-release + ``` + +- Enable additional repositories to resolve dependencies: + ```shell + sudo dnf config-manager --set-enabled powertools + ``` + +## Install the package + +```shell +sudo dnf -y install edb-postgresextended17-server edb-postgresextended17-contrib +``` + +## Initial configuration + +Getting started with your cluster involves logging in, ensuring the installation and initial configuration was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-pge-17-setup` script creates a cluster. + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/edb/pge17/bin/edb-pge-17-setup initdb + +sudo systemctl start edb-pge-17 +``` + +To work in your cluster, log in as the postgres user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo -iu postgres + +psql postgres +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, postgres. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../administration/01_setting_configuration_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE postgres with PASSWORD 'password'; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +You are now connected to database "hr" as user "postgres". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/pge/17/installing/linux_x86_64/pge_other_linux_9.mdx b/product_docs/docs/pge/17/installing/linux_x86_64/pge_other_linux_9.mdx new file mode 100644 index 00000000000..fb16eea337d --- /dev/null +++ b/product_docs/docs/pge/17/installing/linux_x86_64/pge_other_linux_9.mdx @@ -0,0 +1,135 @@ +--- +navTitle: AlmaLinux 9 or Rocky Linux 9 +title: Installing EDB Postgres Extended Server on AlmaLinux 9 or Rocky Linux 9 x86_64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + + ```shell + sudo dnf -y install epel-release + ``` + +- Enable additional repositories to resolve dependencies: + ```shell + sudo dnf config-manager --set-enabled crb + ``` + +## Install the package + +```shell +sudo dnf -y install edb-postgresextended17-server edb-postgresextended17-contrib +``` + +## Initial configuration + +Getting started with your cluster involves logging in, ensuring the installation and initial configuration was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-pge-17-setup` script creates a cluster. + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/edb/pge17/bin/edb-pge-17-setup initdb + +sudo systemctl start edb-pge-17 +``` + +To work in your cluster, log in as the postgres user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo -iu postgres + +psql postgres +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, postgres. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../administration/01_setting_configuration_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE postgres with PASSWORD 'password'; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +You are now connected to database "hr" as user "postgres". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/pge/17/installing/linux_x86_64/pge_rhel_8.mdx b/product_docs/docs/pge/17/installing/linux_x86_64/pge_rhel_8.mdx new file mode 100644 index 00000000000..8f82e772f13 --- /dev/null +++ b/product_docs/docs/pge/17/installing/linux_x86_64/pge_rhel_8.mdx @@ -0,0 +1,129 @@ +--- +navTitle: RHEL 8 or OL 8 +title: Installing EDB Postgres Extended Server on RHEL 8 or OL 8 x86_64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + ``` + +## Install the package + +```shell +sudo dnf -y install edb-postgresextended17-server edb-postgresextended17-contrib +``` + +## Initial configuration + +Getting started with your cluster involves logging in, ensuring the installation and initial configuration was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-pge-17-setup` script creates a cluster. + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/edb/pge17/bin/edb-pge-17-setup initdb + +sudo systemctl start edb-pge-17 +``` + +To work in your cluster, log in as the postgres user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo -iu postgres + +psql postgres +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, postgres. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../administration/01_setting_configuration_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE postgres with PASSWORD 'password'; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +You are now connected to database "hr" as user "postgres". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/pge/17/installing/linux_x86_64/pge_rhel_9.mdx b/product_docs/docs/pge/17/installing/linux_x86_64/pge_rhel_9.mdx new file mode 100644 index 00000000000..d7ca1bdc9b8 --- /dev/null +++ b/product_docs/docs/pge/17/installing/linux_x86_64/pge_rhel_9.mdx @@ -0,0 +1,129 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing EDB Postgres Extended Server on RHEL 9 or OL 9 x86_64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +## Install the package + +```shell +sudo dnf -y install edb-postgresextended17-server edb-postgresextended17-contrib +``` + +## Initial configuration + +Getting started with your cluster involves logging in, ensuring the installation and initial configuration was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-pge-17-setup` script creates a cluster. + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/edb/pge17/bin/edb-pge-17-setup initdb + +sudo systemctl start edb-pge-17 +``` + +To work in your cluster, log in as the postgres user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo -iu postgres + +psql postgres +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, postgres. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../administration/01_setting_configuration_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE postgres with PASSWORD 'password'; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +You are now connected to database "hr" as user "postgres". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/pge/17/installing/linux_x86_64/pge_ubuntu_22.mdx b/product_docs/docs/pge/17/installing/linux_x86_64/pge_ubuntu_22.mdx new file mode 100644 index 00000000000..4f68a0b917a --- /dev/null +++ b/product_docs/docs/pge/17/installing/linux_x86_64/pge_ubuntu_22.mdx @@ -0,0 +1,124 @@ +--- +navTitle: Ubuntu 22.04 +title: Installing EDB Postgres Extended Server on Ubuntu 22.04 x86_64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: +--- + +## Prerequisites + +Before you begin the installation process: + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `apt-cache search enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +## Install the package + +```shell +sudo apt-get -y install edb-postgresextended-17 +``` + +## Initial configuration + +This section steps you through getting started with your cluster including logging in, ensuring the installation was successful, connecting to your cluster, and creating the user password. + +First, you need to initialize and start the database cluster. The `edb-pge-17-setup` script creates a cluster. + +```shell +sudo PGSETUP_INITDB_OPTIONS="-E UTF-8" /usr/lib/edb-pge/17/bin/edb-pge-17-setup initdb + +sudo systemctl start edb-pge-17 +``` + +To work in your cluster, log in as the postgres user. Connect to the database server using the psql command-line client. Alternatively, you can use a client of your choice with the appropriate connection string. + +```shell +sudo -iu postgres + +psql postgres +``` + +The server runs with the `peer` or `ident` permission by default. You can change the authentication method by modifying the `pg_hba.conf` file. + +Before changing the authentication method, assign a password to the database superuser, postgres. For more information on changing the authentication, see [Modifying the pg_hba.conf file](../../administration/01_setting_configuration_parameters/#modifying-the-pg_hbaconf-file). + +```sql +ALTER ROLE postgres with PASSWORD 'password'; +``` + +## Experiment + +Now you're ready to create and connect to a database, create a table, insert data in a table, and view the data from the table. + +First, use psql to create a database named `hr` to hold human resource information. + +```sql +# running in psql +CREATE DATABASE hr; +__OUTPUT__ +CREATE DATABASE +``` + +Connect to the `hr` database inside psql: + +``` +\c hr +__OUTPUT__ +You are now connected to database "hr" as user "postgres". +``` + +Create columns to hold department numbers, unique department names, and locations: + +``` +CREATE TABLE public.dept (deptno numeric(2) NOT NULL CONSTRAINT dept_pk +PRIMARY KEY, dname varchar(14) CONSTRAINT dept_dname_uq UNIQUE, loc +varchar(13)); +__OUTPUT__ +CREATE TABLE +``` + +Insert values into the `dept` table: + +``` +INSERT INTO dept VALUES (10,'ACCOUNTING','NEW YORK'); +__OUTPUT__ +INSERT 0 1 +``` + +``` +INSERT into dept VALUES (20,'RESEARCH','DALLAS'); +__OUTPUT__ +INSERT 0 1 +``` + +View the table data by selecting the values from the table: + +``` +SELECT * FROM dept; +__OUTPUT__ +deptno | dname | loc +--------+------------+---------- +10 | ACCOUNTING | NEW YORK +20 | RESEARCH | DALLAS +(2 rows) +``` diff --git a/product_docs/docs/pge/17/operation.mdx b/product_docs/docs/pge/17/operation.mdx new file mode 100644 index 00000000000..2fbcc4aef69 --- /dev/null +++ b/product_docs/docs/pge/17/operation.mdx @@ -0,0 +1,42 @@ +--- +title: Operations +originalFilePath: operation.md +description: Operational features of EDB Postgres Extended Server including WAL pacing delays, additional tracing and diagnostics options. +--- + +EDB Postgres Extended Server has a number of features that relate to operations. + +## Avoid flooding transaction logs + +EDB Postgres Extended Server provides WAL pacing delays to avoid flooding transaction logs. The WAL pacing configuration parameters are: + - `wal_insert_delay_enabled` + - `wal_insert_delay` + - `wal_insert_delay_size` + +When `wal_insert_delay_enabled` is enabled, a session sleeps based on the value of `wal_insert_delay` after WAL data of at least the value of `wal_insert_delay_size` is generated. The default is off. + +## Additional tracing and diagnostics options + +EDB Postgres Extended Server allows you to enable timeouts based on logging trace messages in specific code +paths. Use the `tracelog_timeout` configuration parameter to allow logging of trace messages after a timeout of +the specified time occurs. + +## Selective physical base backup and subsequent selective recovery/restore + +By default, backups are always taken of the entire database cluster. You can also back up individual databases or database objects by specifying +the `-L` option with the `pg_basebackup` utility multiple times for multiple databases. + +Template databases are backed up by default. WAL data for excluded +databases is still part of the WAL archives. + +The backup activity stores the list of database objects specified using this option +in the backup label file. The presence of these objects in the backup label file causes selective recovery of these databases. Recovery of template +databases and of global metadata related to users, languages, and so on is also carried +out as usual. WAL data belonging to excluded databases is ignored during the +recovery process. Attempts to connect to excluded databases cause errors after regular +operations start following the recovery. + + +## Additional operations feature + +- Reduced locking of `ALTER TABLE ... REPLICA IDENTITY` diff --git a/product_docs/docs/pge/17/parameters.mdx b/product_docs/docs/pge/17/parameters.mdx new file mode 100644 index 00000000000..7b4f140bb2d --- /dev/null +++ b/product_docs/docs/pge/17/parameters.mdx @@ -0,0 +1,147 @@ +--- +navTitle: Configuration parameters +title: Configuration parameters (GUCs) +originalFilePath: parameters.md +description: Configuration parameters available with EDB Postgres Extended Server. +--- + +These Grand Unified Configuration (GUC) configuration parameters are available with EDB Postgres Extended Server. + +## Backend parameters + +Backend parameters introduce a test probe point infrastructure for injecting sleeps or errors into PostgreSQL and extensions. + +Any `PROBE_POINT` defined throughout the Postgres code code marks important code paths. These probe points might be activated to signal the current backend or to elog(...) a `LOG`/`ERROR`/`FATAL`/`PANIC`. They might also, or instead, add a delay at that point in the code. + +Unless explicitly activated, probe points have no effect and add only a single optimizer-hinted branch, so they're safe on hot paths. + +When an active probe point is hit and the counter is satisfied, after any specified sleep interval, a log message is always emitted at `DEBUG1` or higher. + +### `pg2q.probe_point` + +The name of a `PROBE_POINT` in the code of 2ndQPostgres or in an extension that defines a `PROBE_POINT`. This parameter isn't validated. If a nonexistent probe point is named, it's never hit. + +Only one probe point can be active. This isn't a list, and attempting to supply a list means nothing matches. + +Probe points generally have a unique name, given as the argument to the `PROBE_POINT` macro in the code where it's defined. It's also possible to use the same `PROBE_POINT` name where multiple code paths trigger the same action of interest. A probe fires when either path is taken. + +### `pg2q.probe_counter` + +You might need to act on a probe only after a loop is run for the number of times specified with this parameter. In such cases, set this GUC to the number of iterations at which point the probe point fires, and reset the counter. + +The default value is `1`, meaning the probe points always fire when the name matches. + +### `pg2q.probe_sleep` + +Sleep for `pg2q.probe_sleep` milliseconds after hitting the probe point. Then fire the action in `pg2q.probe_action`. + +### `pg2q.probe_action` + +Action to take when the named `pg2q.probe_point` is hit. Available actions are: + +- `sleep` — Emit a `DEBUG` message with the probe name. +- `log` — Emit a `LOG` message with the probe name. +- `error` — `elog(ERROR, ...)` to raise an `ERROR` condition. +- `fatal`— `elog(FATAL, ...)`. +- `panic` — `elog(PANIC, ...)`, which generally then calls `abort()` and delivers a `SIGABRT` (signal 6) to cause the backend to core dump. The probe point tries to set the core file limit to enable core dumps if the hard ulimit permits. +- `sigint`, `sigterm`, `sigquit`, `sigkill` — Deliver the named signal to the backend that hit the probe point. + +### `pg2q.probe_backend_pid` + +If nonzero, the probe sleep and action are skipped for backends other than the backend with this ID. + +## `server_2q_version_num` and `server_2q_version` + +The `server_2q_version_num` and `server_2q_version` configuration parameters allow the 2ndQuadrant-specific version number and version substring, respectively, to be accessible to external modules. + +## Table-level compression control option + +You can set the table-level option `compress_tuple_target` to decide when to trigger compression on a tuple. Previously, you used the `toast_tuple_target` (or the compile time default) to decide whether to compress a tuple. However, this was detrimental when a tuple is large enough and has a good compression ratio but not large enough to cross the toast threshold. + + + +## `pg2q.max_tuple_field_size` + +Restricts the maximum uncompressed size of the internal representation of any one field that can be written to a table, in bytes. + +The default `pg2q.max_tuple_field_size` is 1073740799 bytes, which is 1024 bytes less than 1 GiB. This value is slightly less than the 1 GiB maximum field size usually imposed by PostgreSQL. This margin helps prevent cases where tuples are committed to disk but can't then be processed by logical decoding output plugins and sent to downstream servers. + +Set `pg2q.max_tuple_field_size` to `1GB` or `11073741823` to disable the feature. + +If your application doesn't rely on inserting large fields, consider setting `pg2q.max_tuple_field_size` to a much smaller value, such as 100MB or even less. Among other issues, large fields can: + +- Cause surprising application behavior +- Increase memory consumption for the database engine during queries and replication +- Slow down logical replication + +While this parameter is enabled, oversized fields cause queries that `INSERT` or `UPDATE` an oversized field to fail with an `ERROR` such as: + +``` +ERROR: field big_binary_field_name in row is larger than pg2q.max_tuple_field_size +DETAIL: New or updated row in relation some_table has field big_binary_field_name + (attno=2) with size 8161 bytes which exceeds limit 1073740799B configured + in pg2q.max_tuple_field_size +SQLSTATE: 53400 configuration_limit_exceeded +``` + +Only the superuser can set `pg2q.max_tuple_field_size`. You can use a `SECURITY DEFINER` function wrapper if you want to allow a non-superuser to set it. + +If you change `pg2q.max_tuple_field_size`, fields larger than the current `pg2q.max_tuple_field_size` that are already on disk don't change. You can `SELECT` them as usual. Any `UPDATE` that affects tuples with oversized fields fails, even if the oversized field isn't modified, unless the new tuple created by the update operation satisfies the currently active size limits. + +A `DELETE` operation doesn't check the field-size limit. + +The limit isn't enforced on the text-representation size for I/O of fields because doing so also prevents PostgreSQL from creating and processing temporary in-memory json objects larger than the limit. + +The limit isn't enforced for temporary tuples in tuplestores, such as set-returning functions, CTEs, and views. Size checks are deliberately not enforced for any `MATERIALIZED VIEW` either. + +!!! WARNING + `pg2q.max_tuple_field_size` is enforced for `pg_restore`. If a + database contains oversized tuples, it does a `pg_dump` as usual. However, a + subsequent `pg_restore` fails with the error shown previously. To work around + this issue, restore the dump with `pg2q.max_tuple_field_size` overridden in + connection options using `PGOPTIONS` or the `options` connection-parameter + string. For example: + + ``` + PGOPTIONS='-c pg2q.max_tuple_field_size=11073741823' pg_restore ... + ``` + +Data type specifics: + +- For a `bytea` field, the size used is the decoded binary size. It isn't the text-representation size in hex or octal escape form, that is, the `octet_length()` of the field. + + Assuming `bytea_output = 'hex'`, the maximum size of the I/O representation is `2 * pg2q.max_tuple_field_size + 2` bytes. + +- For a `text`, `json`, or `xml` field, the measured size is the number of bytes of text in the current database encoding (the `octet_length()` of the field), not the number of characters. In UTF-8 encodings, one character usually consumes one byte but might consume six or more bytes for some languages and scripts. + +- For a `jsonb` field, the measured size is that of the PostgreSQL internal jsonb-encoded datatype representation, the text representation of the json document. In some cases the `jsonb` representation for larger json documents is smaller than the text representation. This means that it's possible to insert json documents with text representations larger than any given `pg2q.max_tuple_field_size`, although it's uncommon. + +- Extension-defined data type behavior depends on the implementation of the data type. + + The field size used for this limit is the size reported by them `pg_column_size()` function, minus the 4 bytes of header PostgreSQL adds to variable-length data types, when used on a literal of the target data type. For example: + + ``` + demo=> SELECT pg_column_size(BYTEA '\x00010203040506070809') - 4; + 14 + ``` + + For example, to see the computed size of the jsonb field, use: + + ``` + SELECT pg_column_size(JSONB '{"my_json_document": "yes"}') - 4; + ``` + + Due to TOAST compression ,`pg_column_size()` often reports smaller + values when called on existing on-disk fields. Also, the header for + shorter values on disk might be 1 byte instead of 4. + +## `pg2q.max_tuple_size` + +Restricts the maximum size of a single tuple that can be written to a table. This value is the total row width, including the uncompressed width of all potentially compressible or external-storage-capable field values. Field headers count against the size, but fixed row headers don't. + +Many PostgreSQL operations, such as logical replication, work on whole rows, as do many applications. You can use this setting to impose a limit on the maximum row size you consider reasonable for your application to prevent inadvertent creation of oversized rows that might pose operational issues. + +When applied to an `UPDATE` of existing tuples, `pg2q.max_tuple_size` isn't enforced as strictly as `pg2q.max_tuple_field_size`. It doesn't count the full size of unmodified values in columns with storage other than `PLAIN`. + +!!! WARNING + `pg2q.max_tuple_size` is enforced for `pg_restore`. See the caveat for `pg2q.max_tuple_field_size`. diff --git a/product_docs/docs/pge/17/release_notes/index.mdx b/product_docs/docs/pge/17/release_notes/index.mdx new file mode 100644 index 00000000000..1b96b3cd1b5 --- /dev/null +++ b/product_docs/docs/pge/17/release_notes/index.mdx @@ -0,0 +1,15 @@ +--- +title: "Release notes" +description: "Release notes for EDB Postgres Extended Server 17." +navigation: + - rel_notes17.2 +--- +The EDB Postgres Extended Server documentation describes the latest version of EDB Postgres Extended Server 17, including minor releases and patches. These release notes cover what was new in each release. + +| Version | Release date | +|---------------------------|--------------| +| [17.2](rel_notes17.2) | 22 Nov 2024 | + + + + diff --git a/product_docs/docs/pge/17/release_notes/rel_notes17.2.mdx b/product_docs/docs/pge/17/release_notes/rel_notes17.2.mdx new file mode 100644 index 00000000000..90883c2a010 --- /dev/null +++ b/product_docs/docs/pge/17/release_notes/rel_notes17.2.mdx @@ -0,0 +1,12 @@ +--- +title: EDB Postgres Extended Server 17.2 release notes +navTitle: "Version 17.2" +--- + +Released: 22 Nov 2024 + +EDB Postgres Extended Server 17.2 includes the following enhancements and bug fixes: + +| Type | Description | Addresses | +|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------| +| Upstream merge | Merged with community PostgreSQL 17.2. See the [PostgreSQL 17.2 Release Notes](https://www.postgresql.org/docs/17/release-17-2.html) for more information. | | diff --git a/product_docs/docs/pge/17/replication.mdx b/product_docs/docs/pge/17/replication.mdx new file mode 100644 index 00000000000..c7557bc05b4 --- /dev/null +++ b/product_docs/docs/pge/17/replication.mdx @@ -0,0 +1,33 @@ +--- +navTitle: Replication +title: Replication +originalFilePath: replication.md +description: EDB Postgres Extended Server provides the core functionality to support replication and high-availability features in EDB Postgres Distributed. +--- + + +EDB Postgres Extended Server provides the core functionality to support the following replication and high availability features in [EDB Postgres Distributed](/pgd/latest): + +- Commit At Most Once (CAMO) +- Group commit +- Eager replication +- Decoding worker +- Assessment tooling +- Lag tracker +- Lag control +- Timestamp snapshots +- Transaction streaming +- Missing partition conflict +- No need for UPDATE trigger on tables with TOAST +- Automatically hold back FREEZE + +## Asynchronous processing + +EDB Postgres Extended Server includes a `synchronous_replication_availability` parameter. +A value of `async` for this parameter enables asynchronous +processing when not enough standby +servers are available (when compared with the values as per +`synchronous_standby_names`). The behavior reverts +to synchronous replication when the required number of synchronous +standby servers reappear. + diff --git a/product_docs/docs/pge/17/sql_features/index.mdx b/product_docs/docs/pge/17/sql_features/index.mdx new file mode 100644 index 00000000000..adec227d769 --- /dev/null +++ b/product_docs/docs/pge/17/sql_features/index.mdx @@ -0,0 +1,57 @@ +--- +title: SQL enhancements +originalFilePath: compat.md +description: EDB Postgres Extended Server includes a number of SQL enhancements including rollback options, cursors with prepared statements, and PL/pgSQL compatibility. +navigation: + - txnrollback + - jdbctxnroolback +--- +EDB Postgres Extended Server includes a number of SQL enhancements. + +## Rollback options + +In PostgreSQL, any error in a transaction rolls back all actions +by that transaction. This behavior is different from other DBMS, such +as Oracle and SQL Server, where an error causes rollback of only the last +statement. This difference in transaction handling semantics +doesn't cause a problem in all cases, but it does make implementing business logic +in PostgreSQL difficult for Oracle Database and Microsoft SQL Server developers. + +One workaround is to manually introduce a savepoint, internally known as +subtransactions, into the application code. This is time consuming and +difficult to test. A savepoint is an additional statement and therefore increases +transaction latency. Given the overhead of additional development work +and slower performance, this approach isn't viable in most cases. + +EDB Postgres Extended Server allows you to roll back just the current statement. +The statement-level rollback feature provides an +optional mode to choose whether to allow rollback of the whole transaction +or just the current statement. No manual recoding is required. There's some added overhead, but it's lower than for a savepoint. + +See [`transaction_rollback_scope`](txnrollback) for information on setting the transaction rollback scope inside the database and [JDBC properties for rollback scope](jdbctxnrollback) for information on continuing past an error on a JDBC batch job. + + + +## Cursors with prepared statements + +EDB Postgres Extended Server allows declaring a cursor over a previously created prepared statement. + +For example: + + ``` + PREPARE foo AS ...; DECLARE c1 CURSOR FOR foo; + ``` + +## PL/pgSQL compatibility + +EDB Postgres Extended Server integrates with other migration tools with a number of PL/pgSQL compatibility features. + +For general simplicity, EDB Postgres Extended Server allows calling functions using plpqsl without the PERFORM keyword. + +For example, + +``` +BEGIN somefunc(); END +``` + +Where `somefunc` is not a keyword. diff --git a/product_docs/docs/pge/17/sql_features/jdbctxnrollback.mdx b/product_docs/docs/pge/17/sql_features/jdbctxnrollback.mdx new file mode 100644 index 00000000000..f2269a4ec46 --- /dev/null +++ b/product_docs/docs/pge/17/sql_features/jdbctxnrollback.mdx @@ -0,0 +1,111 @@ +--- +title: JDBC properties for setting rollback scope +originalFilePath: jdbctxnrollback.md + +--- + +If you're using a JDBC connector to connect to a client application, you use the `autosave` and `transaction_rollback_scope` properties together to specify the transaction rollback scope. + +You can specify these properties in either the connection URL or as an +additional properties object parameter to `DriverManager.getConnection`. + +## autosave + +The `autosave` parameter is a string that specifies what the driver does if a query containing +multiple statements fails. The possible values are: `server`, `always`, `never`, and `conservative`. + +- In `autosave=server` mode, JDBC relies on the server-side parameter `transaction_rollback_scope` +to save each statement by way of internal server savepoints before +executing the next. The server rolls back to the previous +statement if any statement in the query fails. If this parameter +isn't supported on the server side, JDBC rejects the connection. + +- In `autosave=always` mode, the JDBC driver first tries to use the server-side `transaction_rollback_scope` property. If it isn't supported, +then JDBC driver sets a savepoint before each query +statement and rolls back to that savepoint in case of failure. + +- In `autosave=never` mode (default), no savepoint activity is ever +carried out. In `autosave=conservative` mode, savepoint is set for +each query. However, the rollback is done only for rare cases like +'cached statement cannot change return type' or 'statement XXX is +not valid', so JDBC driver rolls back and retries. + +The default value for this property is `never`. + +This `autosave=server` property is useful only +with the PostgreSQL server providing `transaction_rollback_scope` +functionality. + +## transaction_rollback_scope + +The `autosave` parameter is a string that determines the range of +operations that roll back when an SQL statement fails. + +The default value is `TRANSACTION`, which causes the entire transaction or +current subtransaction to roll back. This is the only mode +that you can select with the `SET TRANSACTION` command. + +You can specify the other possible mode, `STATEMENT`, only during connection establishment, `ALTER USER`, or `ALTER DATABASE`. In that mode, only +the failed `SQL` statement is rolled back, and the transaction is put back in normal mode. + +## `autosave` test cases +Test cases for trying out values of the `autosave` property +are available in the `BatchAutoSaveTest.java` file. The following +SQL code shows the behavior that's expected when the +server provides `transaction_rollback_scope` functionality and +`autosave=server` is used on the JDBC side. + +With `autosave=server`, the following query inserts values `(1)`, `(3)`, and +`(4)` and disregards the `duplicate key violation` error: + +```sql +CREATE TABLE test (id INT PRIMARY KEY); +INSERT INTO test VALUES (2); +BEGIN; +INSERT INTO test VALUES (1); +INSERT INTO test VALUES (2); +INSERT INTO test VALUES (3); +INSERT INTO test VALUES (4); +COMMIT; +``` + + +The `artifacts` directory contains the `pgjdbc` jar file +`postgresql-REL2Q.42.2.3.180601.jar`. This file needs to +be added to the CLASSPATH as usual. It also contains the +`postgresql-REL2Q.42.2.3.180601-tests.jar` jar that can be used to test +the latest `autosave` functionality. + +You can test the `BatchAutoSaveTest.java` file provided in the `artifacts` as follows: + +1. Export CLASSPATH to build and run the test case: + + ```sh + cd artifacts + export CLASSPATH=$PWD:$PWD/postgresql-REL2Q.42.2.3.180601-tests.jar:$PWD/postgresql-REL2Q.42.2.3.180601.jar:$PWD/junit-4.12.jar:$PWD/hamcrest-core-1.3.jar + ``` + +1. Compile the supplied test file: + + ```sh + javac -d . BatchAutoSaveTest.java + ``` + +1. Run the test (assuming user as `test` and running on localhost): + + ```shell + java -Dusername=test -Dport=5432 -Dhost=localhost -Ddatabase=postgres org.junit.runner.JUnitCore org.postgresql.test.jdbc2.BatchAutoSaveTest + __OUTPUT__ + JUnit version 4.12 + .Configuration file /Users/altaf/pg/artifacts/../build.properties does not exist. Consider adding it to specify test db host and login + Configuration file /Users/altaf/pg/artifacts/../build.local.properties does not exist. Consider adding it to specify test db host and login + Configuration file /Users/altaf/pg/artifacts/../build.properties does not exist. Consider adding it to specify test db host and login + Configuration file /Users/altaf/pg/artifacts/../build.local.properties does not exist. Consider adding it to specify test db host and login + ......... + Time: 0.556 + + OK (10 tests) + ``` + +To modify the test cases, you can modify the `BatchAutoSaveTest.java` file +in the `artifacts` directory. Then compile and run the test cases. diff --git a/product_docs/docs/pge/17/sql_features/txnrollback.mdx b/product_docs/docs/pge/17/sql_features/txnrollback.mdx new file mode 100644 index 00000000000..c4c8feb5f79 --- /dev/null +++ b/product_docs/docs/pge/17/sql_features/txnrollback.mdx @@ -0,0 +1,55 @@ +--- +title: "transaction_rollback_scope parameter" +originalFilePath: txnrollback.md + +--- + +To set the transaction rollback scope inside the database, use the `transaction_rollback_scope` parameter. +The `transaction_rollback_scope` parameter has two possible values: + +- `transaction` — Standard Postgres behavior, where each error +aborts the whole transaction. + +- `statement`— An error while executing one statement affects only +that statement and not the status of the transaction as a whole. + +## Setting the parameter + +You can set the parameter as a user-level property, a connection option, or the mode for specific functions or procedures. + +### Set the parameter as a user-level property + +```sql +ALTER USER somebody SET transaction_rollback_scope TO statement; +``` + +### Set the parameter as a connection option + +```sh-session +PGOPTIONS="-c transaction_rollback_scope=statement" psql +``` + +### Set the mode for specific functions or procedures + +If using PL/pgSQL, you can set the mode for specific functions or procedures: + +```sql +ALTER FUNCTION myfunc SET transaction_rollback_scope TO statement; +``` + +## How subtransactions are handled + +If you select the `statement` value, then a subtransaction is opened just before each SQL +command. If the command is successful, the subtransaction is *committed*. If the command causes an error, the subtransaction is +rolled back, and the parent transaction can continue normally. The +effect is that an error during execution of one statement affects only +that statement and not the status of the transaction as a whole. + +Committing a subtransaction assigns the +resources it holds only to its parent transaction, which might be the top-level +transaction. Or it might be some other subtransaction if there are user-defined +savepoints involved. So this is not an "autonomous transaction." +Rolling back a subtransaction releases all the +resources it holds, such as any locks it acquired. + + diff --git a/product_docs/docs/pge/17/tde.mdx b/product_docs/docs/pge/17/tde.mdx new file mode 100644 index 00000000000..e5c33231759 --- /dev/null +++ b/product_docs/docs/pge/17/tde.mdx @@ -0,0 +1,8 @@ +--- +title: Transparent data encryption +description: Transparent data encryption (TDE) encrypts any user data stored in the database system. +--- + +Transparent data encryption (TDE) encrypts any user data stored in the database system. This encryption is transparent to the user. User data includes the actual data stored in tables and other objects as well as system catalog data such as the names of objects. + +See [Transparent data encryption](/tde/latest) for more information. \ No newline at end of file diff --git a/product_docs/docs/pge/17/upgrading/index.mdx b/product_docs/docs/pge/17/upgrading/index.mdx new file mode 100644 index 00000000000..094ca214b6c --- /dev/null +++ b/product_docs/docs/pge/17/upgrading/index.mdx @@ -0,0 +1,11 @@ +--- +navTitle: Upgrading +title: Upgrading EDB Postgres Extended Server +description: How to upgrade EDB Postgres Extended Server to a more recent version. +--- + +You can upgrade EDB Postgres Extended Server installations to a more recent version. + +- See [Upgrading a major version of EDB Postgres Extended Server](major_upgrade) for a major upgrade example. + +- See [Upgrading a minor version of EDB Postgres Extended Server](minor_upgrade) for minor upgrade examples according to your package format. diff --git a/product_docs/docs/pge/17/upgrading/major_upgrade.mdx b/product_docs/docs/pge/17/upgrading/major_upgrade.mdx new file mode 100644 index 00000000000..711706ffbce --- /dev/null +++ b/product_docs/docs/pge/17/upgrading/major_upgrade.mdx @@ -0,0 +1,155 @@ +--- +navTitle: "Major upgrades" +title: "Major version upgrade of EDB Postgres Extended Server" +description: Perform a major version upgrade of EDB Postgres Extended Server. +deepToc: true +--- + +To perform a major version upgrade, install the new version of EDB Postgres Extended Server, initialize an empty cluster and use pg_upgrade to migrate all data. + +If a problem occurs during the upgrade process, you can [revert to the previous version](/epas/latest/upgrading/major_upgrade/06_reverting_to_the_old_cluster/#reverting_to_the_old_cluster). + +## Overview + +1. [Prepare your upgrade](#preparing-your-upgrade) by performing a backup of the existing instance. +1. [Install the EDB Postgres Extended Server version](#installing-the-target-edb-postgres-extended-server-version) you're upgrading toward. +1. [Create a new database server](#creating-a-target-server): + 1. Create an empty directory for the new server and ensure postgres owns it. + 1. Initialize a server on a different port from the source server. + 1. Start the database server. + 1. Connect to the database server and ensure it's functioning. +1. [Upgrade to the target server](#upgrading-to-the-target-server): + 1. Stop both the source and the new server. + 1. Use pg_upgrade by specifying the source and target bin and data directories. + 1. Start the new database server. + 1. Connect to the encrypted database server and ensure the data was transferred. +1. [Clean up and delete the source server](#cleaning-up-after-upgrade): + 1. Clean up the database and its statistics. + 1. Remove the source EDB Postgres Extended Server cluster with the script provided by pg_upgrade. + +## Worked example + +This worked example upgrades an EDB Postgres Extended Server 16 database to EDB Postgres Extended Server 17. + +!!!note + You can perform major upgrades of EDB Postgres Extended Server instances in the same way you upgrade an EDB Postgres Advanced Server installation. If you need more information about the pg_upgrade utility, command line options, troubleshooting, and more, see [Upgrading an installation with pg_upgrade](/epas/latest/upgrading/major_upgrade/). + +### Preparing your upgrade + +Use [pg_dumpall](https://www.postgresql.org/docs/current/app-pg-dumpall.html), [pgBackRest](/supported-open-source/pgbackrest/), or [Barman](/supported-open-source/barman/) to create a backup of your source server. + +### Installing the target EDB Postgres Extended Server version + +[Install EDB Postgres Extended Server](../installing/) version 17. Only install the packages. Don't perform any other configurations. + +### Creating a target server + +If you don't want to create a new target instance but want to reuse an existing server with the target EDB Postgres Extended Server version, skip these steps and [ensure the target server is empty](/epas/latest/upgrading/major_upgrade/03_upgrading_to_advanced_server/#step-2---empty-the-target-database). + +1. As postgres, create an empty directory for the new server: + + ``` + mkdir /var/lib/edb-pge/17/upgrade_target + ``` + +1. As root, ensure the postgres user owns the directory: + + ``` + sudo chown postgres /var/lib/edb-pge/17/upgrade_target + sudo chgrp postgres /var/lib/edb-pge/17/upgrade_target + ``` + +1. As postgres, initialize the new server: + + ``` + /usr/lib/edb-pge/17/bin/initdb -D /var/lib/edb-pge/17/upgrade_target + ``` + + This command initializes a CONFIG directory with all configuration files for the encrypted server. + +1. Before you start the cluster, ensure the new database runs on a different port from the source server. To alter the port, edit `postgresql.conf` by uncommenting the line with `#port` and changing the port number, for example, to 5432. + +1. Start the target server: + + ``` + /usr/lib/edb-pge/17/bin/pg_ctl -D /var/lib/edb-pge/17/upgrade_target start + ``` + + !!!note + You can also start the server with the `logfile` option enabled to print errors into a logfile: + `/usr/lib/edb-pge/17/bin/pg_ctl -D /var/lib/edb-pge/17/upgrade_target -l logfile start` + + In this case, ensure the postgres user has rights to write to the log directory. + +1. Connect to the server: + + ``` + /usr/lib/edb-pge/17/bin/psql -p 5432 + ``` + + !!!note + If you're using two different Postgres versions, use the psql utility of the target server. Otherwise, the system will attempt to use psql from the previous instance. + +### Upgrading to the target server + +1. If you have any extensions or component services running in the source cluster, stop them before starting the upgrade. See [Stop all component services and servers](/epas/latest/upgrading/major_upgrade/03_upgrading_to_advanced_server/#step-4---stop-all-component-services-and-servers) for more information + +1. Stop both the source and target servers: + + ``` + /usr/lib/edb-pge/16/bin/pg_ctl -D /var/lib/edb-pge/16/upgrade-source stop + /usr/lib/edb-pge/17/bin/pg_ctl -D /var/lib/edb-pge/17/upgrade-target stop + ``` + +1. To test for incompatibilities, run the `pg_upgrade` command in check mode. + + With `-b` and `-B`, specify the source and target BIN directories. With `-d` and `-D`, specify the source and target CONFIG directories: + + ``` + /usr/lib/edb-pge/17/bin/pg_upgrade -b /usr/lib/edb-pge/16/bin -B /usr/lib/edb-pge/17/bin -d /var/lib/edb-pge/16/upgrade-source -D /var/lib/edb-pge/17/upgrade-target --check + ``` + + !!!note + The `--check` mode performs preliminary checks without executing the command. + +1. To copy data from the source server to the target server, run the `pg_upgrade` command in normal mode: + + ``` + /usr/lib/edb-pge/17/bin/pg_upgrade -b /usr/lib/edb-pge/16/bin -B /usr/lib/edb-pge/17/bin -d /var/lib/edb-pge/16/upgrade-source -D /var/lib/edb-pge/17/upgrade-target + ``` + +1. Start the target server: + + ``` + /usr/lib/edb-pge/17/bin/pg_ctl -D /var/lib/edb-pge/17/upgrade-target start + ``` + +1. Connect to the target database server: + + ``` + /usr/lib/edb-as/17/bin/psql -p 5432 + ``` + +1. Perform a spot check to ensure the databases, tables, schemas, and resources you had in the unencrypted server are available in the new server. For example, list all databases, explore the database objects, views, and so on. + +1. Restart the extensions or component services you disabled in the source cluster but in the target cluster. + +### Cleaning up after upgrade + +After you verify that pg_upgrade migrated the data successfully, and the services are running as expected, perform a cleanup. + +1. Clean up the database and its statistics: + + ``` + /usr/lib/edb-pge/17/bin/vacuumdb --all --analyze-in-stages + ``` + +1. Remove all data files of the unencrypted server with the script generated by `pg_upgrade`: + + ``` + ./delete_old_cluster.sh + ``` + +## More information + +Review [Upgrading an installation with pg_upgrade](/epas/latest/upgrading/major_upgrade/) for more information on pg_upgrade options, troubleshooting, and other considerations. diff --git a/product_docs/docs/pge/17/upgrading/minor_upgrade/apt_upgrade.mdx b/product_docs/docs/pge/17/upgrading/minor_upgrade/apt_upgrade.mdx new file mode 100644 index 00000000000..4ae55beda71 --- /dev/null +++ b/product_docs/docs/pge/17/upgrading/minor_upgrade/apt_upgrade.mdx @@ -0,0 +1,65 @@ +--- +title: "Minor EDB Postgres Extended Server upgrade of Debian packages" +navTitle: "Minor upgrades on Debian environments" +description: "Perform a minor version upgrade of EDB Postgres Extended Server with the apt-get package manager" +--- + +If you used `apt-get` to install a Debian package of EDB Postgres Extended Server (on Ubuntu, Debian), use `apt-get` to perform a minor version upgrade of the packages. + +## Overview + +1. Upgrade the EDB Postgres Extended Server packages with `apt-get install`. + +1. Restart the server with `pg_ctl`. + +1. Verify the server version with psql. + +## Worked example + +1. To upgrade the existing packages, open a command line, assume root privileges, and enter the command: + + ```shell + sudo apt-get install + ``` + + For example, if you want to upgrade to the latest minor version of EDB Postgres Extended Server 17, run: + + ```shell + sudo apt-get install edb-postgresextended-17 + ``` + + !!!Note + You can perform a search of the packages to ensure you update the right package beforehand. For example, to browse through all EDB Packages, you can run `sudo apt-cache edb`. For more information about using `apt-get` commands and options, enter `apt-get --help` at the command line. + +1. Confirm with `Y`. + + The output displays an overview of all performed processes, where you can see the packages that were upgraded. + +1. To finalize the upgrade, restart the server. Replace `` with the path to the data directory of the server or servers you're upgrading: + + ```shell + /usr/lib/edb-pge/17/bin/pg_ctl -D restart + ``` + + For example: + + ```shell + /usr/lib/edb-pge/17/bin/pg_ctl -D /var/lib/edb-pge/17/upgrade restart + ``` + +1. Verify the expected database version is running by connecting to psql: + + ```shell + /usr/lib/edb-pge/17/bin/psql + ``` + + Check the server version: + + ```psql + SHOW server_version; + __OUTPUT__ + server_version + -------------------------------------------------------------------- + 17.2 (EDB Postgres Extended Server 17.2.0) (Ubuntu 0:17.2-1.jammy) + (1 row) + ``` diff --git a/product_docs/docs/pge/17/upgrading/minor_upgrade/dnf_upgrade.mdx b/product_docs/docs/pge/17/upgrading/minor_upgrade/dnf_upgrade.mdx new file mode 100644 index 00000000000..821e2790674 --- /dev/null +++ b/product_docs/docs/pge/17/upgrading/minor_upgrade/dnf_upgrade.mdx @@ -0,0 +1,96 @@ +--- +title: "Minor EDB Postgres Extended Server upgrade of RPM packages" +navTitle: "Minor upgrades on RPM environments" +description: "Perform a minor version upgrade of EDB Postgres Extended Server with the dnf package manager" +--- + +If you used `dnf` to install an RPM package of EDB Postgres Extended Server (on RHEL, AlmaLinux, Rocky Linux), use `dnf` to perform a minor version upgrade of the packages. + +## Overview + +1. Check for available updates with `dnf check-update`. + +1. Upgrade the EDB Postgres Extended Server packages with `dnf update`. + +1. Restart the server with `pg_ctl`. + +1. Verify the server version with psql. + +## Worked example + +1. To list the package updates available for your system, open a command line, assume root privileges, and enter the command: + + ```shell + sudo dnf check-update + ``` + + For example, if you want to upgrade to the latest minor version of EDB Postgres Extended Server 17, run: + + ```shell + sudo dnf check-update edb-postgresextended17 + ``` + + !!!Note + You can include wildcard values in the search term. For example, if you're looking for EDB Packages, you can run `sudo dnf check-update edb-*`. For more information about using `dnf` commands and options, enter `dnf --help` at the command line. + +1. Once you've figured the name and version of the package you want to install, use `dnf update` to install the package: + + ```shell + sudo dnf update edb-postgresextended17 + __OUTPUT__ + ================================================================================ + Package Arch Version Repository Size + ================================================================================ + Upgrading: + edb-postgresextended17 x86_64 17.2-1.el9 enterprisedb-enterprise 1.7 M + edb-postgresextended17-contrib x86_64 17.2-1.el9 enterprisedb-enterprise 724 k + edb-postgresextended17-libs x86_64 17.2-1.el9 enterprisedb-enterprise 330 k + edb-postgresextended17-server x86_64 17.2-1.el9 enterprisedb-enterprise 6.8 M + + Transaction Summary + ================================================================================ + Upgrade 4 Packages + + Total download size: 9.5 M + Is this ok [y/N] + ``` + +1. Confirm with `y`. The output displays an overview of all performed processes, where you can see the packages that were upgraded: + + ```shell + y + __OUTPUT__ + edb-postgresextended17-17.2-1.el9.x86_64 + edb-postgresextended17-contrib-17.2-1.el9.x86_64 + edb-postgresextended17-libs-17.2-1.el9.x86_64 + edb-postgresextended17-server-17.2-1.el9.x86_64 + ``` + +1. To finalize the upgrade, restart the server. Replace `` with the path to the data directory of the server or servers you're upgrading: + + ```shell + /usr/edb/pge17/bin/pg_ctl -D restart + ``` + + For example: + + ```shell + /usr/edb/pge17/bin/pg_ctl -D /var/lib/edb-pge/17/upgrade restart + ``` + +1. Verify the expected database version is running by connecting to psql: + + ```shell + /usr/edb/pge17/bin/psql + ``` + + Check the server version: + + ```psql + SHOW server_version; + __OUTPUT__ + server_version + -------------------------------------------- + 17.2 (EDB Postgres Extended Server 17.2.0) + ``` + \ No newline at end of file diff --git a/product_docs/docs/pge/17/upgrading/minor_upgrade/index.mdx b/product_docs/docs/pge/17/upgrading/minor_upgrade/index.mdx new file mode 100644 index 00000000000..613945baedc --- /dev/null +++ b/product_docs/docs/pge/17/upgrading/minor_upgrade/index.mdx @@ -0,0 +1,15 @@ +--- +title: "Minor EDB Postgres Extended Server upgrade" +navTitle: "Minor upgrades" +description: "Minor version upgrade of EDB Postgres Extended Server" +--- + +To perform a minor upgrade of your EDB Postgres Extended Server you only need to update your packages and restart the server. + +To update your packages, ensure you use the correct package manager for your operating system. + +- If you installed an RPM package of EDB Postgres Extended Server (on RHEL, AlmaLinux, Rocky Linux) with `dnf`, see [Minor EDB Postgres Extended Server upgrade of RPM packages](dnf_upgrade). + +- If you installed a Debian package of EDB Postgres Extended Server (on Ubuntu, Debian) with `apt-get`, see [Minor EDB Postgres Extended Server upgrade of Debian packages ](apt_upgrade). + +When you upgrade the packages, the packager manager installs the latest available minor version. For example, if you're running an xy.1 minor version, and the latest available minor version is xy.5, the package manager will install xy.5, skipping xy.2 to xy.4. diff --git a/product_docs/docs/pgpool/4/installing/index.mdx b/product_docs/docs/pgpool/4/installing/index.mdx index 527aa99a1a1..5a49b78e14d 100644 --- a/product_docs/docs/pgpool/4/installing/index.mdx +++ b/product_docs/docs/pgpool/4/installing/index.mdx @@ -61,6 +61,12 @@ Select a link to access the applicable installation instructions: ## Linux [AArch64 (ARM64)](linux_arm64) +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/pgpool_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/pgpool_rhel_9) + ### Debian and derivatives - [Debian 12](linux_arm64/pgpool_debian_12) diff --git a/product_docs/docs/pgpool/4/installing/linux_arm64/index.mdx b/product_docs/docs/pgpool/4/installing/linux_arm64/index.mdx index 2078edd5ca2..685b1c5f36d 100644 --- a/product_docs/docs/pgpool/4/installing/linux_arm64/index.mdx +++ b/product_docs/docs/pgpool/4/installing/linux_arm64/index.mdx @@ -3,11 +3,18 @@ title: "Installing EDB Pgpool-II on Linux AArch64 (ARM64)" navTitle: "On Linux ARM64" navigation: + - pgpool_rhel_9 - pgpool_debian_12 --- Operating system-specific install instructions are described in the corresponding documentation: +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](pgpool_rhel_9) + +- [Oracle Linux (OL) 9](pgpool_rhel_9) + ### Debian and derivatives - [Debian 12](pgpool_debian_12) diff --git a/product_docs/docs/pgpool/4/installing/linux_arm64/pgpool_rhel_9.mdx b/product_docs/docs/pgpool/4/installing/linux_arm64/pgpool_rhel_9.mdx new file mode 100644 index 00000000000..e1909fa11c0 --- /dev/null +++ b/product_docs/docs/pgpool/4/installing/linux_arm64/pgpool_rhel_9.mdx @@ -0,0 +1,53 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing EDB Pgpool-II on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: + - /pgpool/4/01_installing_and_configuring_the_pgpool-II/arm64/pgpool_rhel9_arm +--- + +## Prerequisites + +Before you begin the installation process: + +- Install Postgres on the same host. See: + + - [Installing EDB Postgres Advanced Server](/epas/latest/installing/) + + - [Installing PostgreSQL](https://www.postgresql.org/download/) + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +## Install the package + +```shell +sudo dnf -y install edb-pgpool +``` + +Where `` is the version of EDB PgPool-II you are installing. For example, if you are installing version 4.3, the package name would be `edb-pgpool43`. diff --git a/product_docs/docs/pgpool/4/installing_extensions/index.mdx b/product_docs/docs/pgpool/4/installing_extensions/index.mdx index 52a68de8565..9664747ebf5 100644 --- a/product_docs/docs/pgpool/4/installing_extensions/index.mdx +++ b/product_docs/docs/pgpool/4/installing_extensions/index.mdx @@ -58,6 +58,12 @@ Select a link to access the applicable installation instructions: ## Linux [AArch64 (ARM64)](linux_arm64) +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/pgpoolext_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/pgpoolext_rhel_9) + ### Debian and derivatives - [Debian 12](linux_arm64/pgpoolext_debian_12) diff --git a/product_docs/docs/pgpool/4/installing_extensions/linux_arm64/index.mdx b/product_docs/docs/pgpool/4/installing_extensions/linux_arm64/index.mdx index 884bc272cc1..ce42a7975aa 100644 --- a/product_docs/docs/pgpool/4/installing_extensions/linux_arm64/index.mdx +++ b/product_docs/docs/pgpool/4/installing_extensions/linux_arm64/index.mdx @@ -3,11 +3,18 @@ title: "Installing EDB Pgpool-II Extensions on Linux AArch64 (ARM64)" navTitle: "On Linux ARM64" navigation: + - pgpoolext_rhel_9 - pgpoolext_debian_12 --- Operating system-specific install instructions are described in the corresponding documentation: +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](pgpoolext_rhel_9) + +- [Oracle Linux (OL) 9](pgpoolext_rhel_9) + ### Debian and derivatives - [Debian 12](pgpoolext_debian_12) diff --git a/product_docs/docs/pgpool/4/installing_extensions/linux_arm64/pgpoolext_rhel_9.mdx b/product_docs/docs/pgpool/4/installing_extensions/linux_arm64/pgpoolext_rhel_9.mdx new file mode 100644 index 00000000000..15e7590074e --- /dev/null +++ b/product_docs/docs/pgpool/4/installing_extensions/linux_arm64/pgpoolext_rhel_9.mdx @@ -0,0 +1,53 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing EDB Pgpool-II Extensions on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: + - /pgpool/4/02_extensions/arm64/pgpoolext_rhel9_arm +--- + +## Prerequisites + +Before you begin the installation process: + +- Install Postgres on the same host. See: + + - [Installing EDB Postgres Advanced Server](/epas/latest/installing/) + + - [Installing PostgreSQL](https://www.postgresql.org/download/) + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +## Install the package + +```shell +sudo dnf -y install edb-as-pgpool-extensions +``` + +Where `` is the EDB Postgres Advanced Server version and `` is the EDB Pgpool-II version you are installing. For example, if you are installing EDB Pgpool-II version 4.4 and EDB Postgres Advanced Server version 15, the package name would be `edb-as15-pgpool44-extensions`. diff --git a/product_docs/docs/pgpool/4/pgpool_rel_notes/451_rel_notes.mdx b/product_docs/docs/pgpool/4/pgpool_rel_notes/451_rel_notes.mdx new file mode 100644 index 00000000000..08030139892 --- /dev/null +++ b/product_docs/docs/pgpool/4/pgpool_rel_notes/451_rel_notes.mdx @@ -0,0 +1,13 @@ +--- +title: "EDB PgPool-II 4.5.1 release notes" +navTitle: Version 4.5.1 +--- + +Released: 22 Nov 2024 + +EDB Pgpool-II 4.5.1 includes the following upstream merge: + +| Type | Description | +|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------| +| Upstream merge | Merged with community Pgpool-II 4.5.1. See the community [Release Notes](https://www.pgpool.net/docs/latest/en/html/release-4-5-1.html) for details. | +| Enhancement | Added support for EDB Postgres Advanced Server 17. | \ No newline at end of file diff --git a/product_docs/docs/pgpool/4/pgpool_rel_notes/index.mdx b/product_docs/docs/pgpool/4/pgpool_rel_notes/index.mdx index f10f1812e85..2f95fb8e46d 100644 --- a/product_docs/docs/pgpool/4/pgpool_rel_notes/index.mdx +++ b/product_docs/docs/pgpool/4/pgpool_rel_notes/index.mdx @@ -3,6 +3,7 @@ title: "Release notes" redirects: - ../01_whats_new/ navigation: +- 451_rel_notes - 445_rel_notes - 444_rel_notes - 443_rel_notes @@ -21,6 +22,7 @@ The EDB Pgpool-II documentation describes the latest version of EDB Pgpool-II, i | Version | Release Date | Upstream merges | | |---------------------------------------------------------------------------------------------------------------------------------|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--| +| [4.5.1](451_rel_notes) | 22 Nov 2024 | Upstream [4.5.1](https://www.pgpool.net/docs/44/en/html/release-4-5-1.html) | | | [4.4.5](445_rel_notes) | 17 May 2024 | Upstream [4.4.5](https://www.pgpool.net/docs/44/en/html/release-4-4-5.html) | | | [4.4.4](444_rel_notes) | 26 Feb 2024 | Upstream [4.4.4](https://www.pgpool.net/docs/44/en/html/release-4-4-4.html) | | | [4.4.3](443_rel_notes) | 09 Nov 2023 | Upstream [4.4.3](https://www.pgpool.net/docs/44/en/html/release-4-4-3.html) | | diff --git a/product_docs/docs/postgis/3/installing/index.mdx b/product_docs/docs/postgis/3/installing/index.mdx index e1c63fe9183..eb8ea65c5bd 100644 --- a/product_docs/docs/postgis/3/installing/index.mdx +++ b/product_docs/docs/postgis/3/installing/index.mdx @@ -14,8 +14,8 @@ redirects: navigation: - linux_x86_64 - - linux_ppc64le - linux_arm64 + - linux_ppc64le - windows - upgrading - uninstalling @@ -57,6 +57,12 @@ Select a link to access the applicable installation instructions: ## Linux [AArch64 (ARM64)](linux_arm64) +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](linux_arm64/postgis_rhel_9) + +- [Oracle Linux (OL) 9](linux_arm64/postgis_rhel_9) + ### Debian and derivatives - [Debian 12](linux_arm64/postgis_debian_12) diff --git a/product_docs/docs/postgis/3/installing/linux_arm64/index.mdx b/product_docs/docs/postgis/3/installing/linux_arm64/index.mdx index 776defb5b04..1ce65b69c07 100644 --- a/product_docs/docs/postgis/3/installing/linux_arm64/index.mdx +++ b/product_docs/docs/postgis/3/installing/linux_arm64/index.mdx @@ -9,11 +9,18 @@ navTitle: "On Linux ARM64" redirects: navigation: + - postgis_rhel_9 - postgis_debian_12 --- Operating system-specific install instructions are described in the corresponding documentation: +### Red Hat Enterprise Linux (RHEL) and derivatives + +- [RHEL 9](postgis_rhel_9) + +- [Oracle Linux (OL) 9](postgis_rhel_9) + ### Debian and derivatives - [Debian 12](postgis_debian_12) diff --git a/product_docs/docs/postgis/3/installing/linux_arm64/postgis_rhel_9.mdx b/product_docs/docs/postgis/3/installing/linux_arm64/postgis_rhel_9.mdx new file mode 100644 index 00000000000..cf8ef005e3a --- /dev/null +++ b/product_docs/docs/postgis/3/installing/linux_arm64/postgis_rhel_9.mdx @@ -0,0 +1,73 @@ +--- +navTitle: RHEL 9 or OL 9 +title: Installing PostGIS on RHEL 9 or OL 9 arm64 +# This topic is generated from templates. If you have feedback on it, instead of +# editing the page and creating a pull request, please enter a GitHub issue and +# the documentation team will update the templates accordingly. + +redirects: + - /postgis/latest/01a_installing_postgis/installing_on_linux/arm64/postgis_rhel9_arm +--- + +## Prerequisites + +Before you begin the installation process: + +- Install Postgres on the same host. See: + + - [Installing EDB Postgres Advanced Server](/epas/latest/installing/) + + - [Installing PostgreSQL](https://www.postgresql.org/download/) + +- Set up the EDB repository. + + Setting up the repository is a one-time task. If you have already set up your repository, you don't need to perform this step. + + To determine if your repository exists, enter this command: + + `dnf repolist | grep enterprisedb` + + If no output is generated, the repository isn't installed. + + To set up the EDB repository: + + 1. Go to [EDB repositories](https://www.enterprisedb.com/repos-downloads). + + 1. Select the button that provides access to the EDB repository. + + 1. Select the platform and software that you want to download. + + 1. Follow the instructions for setting up the EDB repository. + +- Install the EPEL repository: + + ```shell + sudo dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + ``` + +- Enable additional repositories to resolve dependencies: + + ```shell + ARCH=$( /bin/arch ) subscription-manager repos --enable "codeready-builder-for-rhel-9-${ARCH}-rpms" + ``` + + !!!note + + If you are using a public cloud RHEL image, `subscription manager` may not be enabled and enabling it may incur unnecessary charges. Equivalent packages may be available under a different name such as `codeready-builder-for-rhel-8-rhui-rpms`. Consult the documentation for the RHEL image you are using to determine how to install `codeready-builder`. + + !!! + +## Install the package + +```shell +# To install PostGIS 3.4: +sudo dnf -y install edb-as-postgis34 + +# To install PostGIS 3.1 using EDB Postgres Advanced Server 13-15: +sudo dnf -y install edb-as-postgis3 + +# To install PostGIS 3.1 using EDB Postgres Advanced Server 11-12: +sudo dnf -y install edb-as-postgis +``` + +Where `` is the version of EDB Postgres Advanced Server. Replace `` with the version of EDB Postgres Advanced Server you are using. For example, `edb-as15-postgis34`. diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx index 41c66e7a42b..b338ba5839a 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/architecture.mdx @@ -98,8 +98,8 @@ Two kinds of routing are available with PGD proxies: In EDB Postgres Distributed for Kubernetes, local routing is used by default, and a configuration option is available to select global routing. -For more information, see the -[PGD documentation of routing with Raft](/pgd/latest/routing/raft/). +For more information on routing with Raft, see +[Proxies, Raft, and Raft subgroups](/pgd/latest/routing/raft/) in the PGD documentation. ### PGD architectures and high availability diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx index eafa8914125..e9185dfeeb9 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/backup.mdx @@ -60,19 +60,19 @@ The `.spec.backup.schedulers[].method` field allows you to define the scheduled - `volumeSnapshot` - `barmanObjectStore` (the default) -You can define more than one scheduler, but each method can only be used by one -scheduler, i.e. two schedulers are not allowed to use the same method. +You can define more than one scheduler, but each method can be used by only one +scheduler. That is, two schedulers aren't allowed to use the same method. -For object store backups, with the default `barmanObjectStore` method, the stanza -`spec.backup.configuration.barmanObjectStore` is used to define the object store information for both backup and wal archiving. -More information can be found in [EDB Postgres for Kubernetes Backup on Object Stores](/postgres_for_kubernetes/latest/backup_barmanobjectstore/). +For object store backups, with the default `barmanObjectStore` method, use the stanza +`spec.backup.configuration.barmanObjectStore` to define the object store information for both backup and WAL archiving. +For more information, see [Backup on object stores](/postgres_for_kubernetes/latest/backup_barmanobjectstore/) in the EDB Postgres for Kubernetes documentation. -To perform volumeSnapshot backups, the `volumeSnapshot` method can be selected. -The stanza -`spec.backup.configuration.barmanObjectStore.volumeSnapshot` is used to define the volumeSnapshot configuration. -More information can be found in [EDB Postgres for Kubernetes Backup on Volume Snapshots](/postgres_for_kubernetes/latest/backup_volumesnapshot/). +To perform volumeSnapshot backups, you can select the `volumeSnapshot` method. +Use the stanza +`spec.backup.configuration.barmanObjectStore.volumeSnapshot` to define the volumeSnapshot configuration. +For more information, see [Backup on volume snapshots](/postgres_for_kubernetes/latest/backup_volumesnapshot/) in the EDB Postgres for Kubernetes documentation. -The following example shows how to use the `volumeSnapshot` method for backup. WAL archiving is still done onto the barman object store. +This example shows how to use the `volumeSnapshot` method for backup. WAL archiving is still done onto the Barman object store. ```yaml apiVersion: pgd.k8s.enterprisedb.io/v1beta1 @@ -104,10 +104,10 @@ spec: immediate: true ``` -For more information about the comparison of two backup methods, see [EDB Postgres for Kubernetes for Object stores or volume snapshots](/postgres_for_kubernetes/latest/backup/#object-stores-or-volume-snapshots-which-one-to-use). +For a comparison of these two backup methods, see [Object stores or volume snapshots](/postgres_for_kubernetes/latest/backup/#object-stores-or-volume-snapshots-which-one-to-use) in the EDB Postgres for Kubernetes documentation. The `.spec.backup.schedulers[].schedule` field allows you to define a cron schedule, expressed -in the [Go `cron` package format](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format). +in the [Go `cron` package format](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format): ```yaml apiVersion: pgd.k8s.enterprisedb.io/v1beta1 @@ -123,28 +123,28 @@ spec: immediate: true ``` -You can suspend scheduled backups if necessary by setting `.spec.backup.schedulers[].suspend` to `true`. -This will prevent new backups from being scheduled. +If necessary, you can suspend scheduled backups by setting `.spec.backup.schedulers[].suspend` to `true`. +This setting prevents new backups from being scheduled. If you want to execute a backup as soon as the `ScheduledBackup` resource is created, set `.spec.backup.schedulers[].immediate` to `true`. `.spec.backupOwnerReference` indicates the `ownerReference` to use -in the created backup resources. The choices are: +in the created backup resources. The options are: -- **none** — No owner reference for created backup objects. -- **self** — Sets the `ScheduledBackup` object as owner of the backup. -- **cluster** — Sets the cluster as owner of the backup. +- **none** — Doesn't set an owner reference for created backup objects. +- **self** — Sets the `ScheduledBackup` object as owner of the backup. +- **cluster** — Sets the cluster as owner of the backup. !!! Warning - The `.spec.backup.cron` field is now deprecated. Please use + The `.spec.backup.cron` field is deprecated. Use `.spec.backup.schedulers` instead. - Note that while `.spec.backup.cron` can still be used, it cannot - be used simultaneously with `.spec.backup.schedulers`. + While you can still use `.spec.backup.cron`, you can't use it + at the same time as `.spec.backup.schedulers`. !!! Note The EDB Postgres for Kubernetes `ScheduledBackup` object contains the `cluster` option to specify the - cluster to back up. This option is currently not supported by EDB Postgres Distributed for Kubernetes and is + cluster to back up. This option currently isn't supported by EDB Postgres Distributed for Kubernetes and is ignored if specified. If an elected backup node is deleted, the operator transparently elects a new backup node diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx index b32266ff3ff..32884097dad 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/connectivity.mdx @@ -20,14 +20,14 @@ PGD cluster includes: Resources in a PGD cluster are accessible through Kubernetes services. Every PGD group manages several of them, namely: -- One service per node, used for internal communications (*node service*) +- One service per node, used for internal communications (*node service*). - A *group service* to reach any node in the group, used primarily by EDB Postgres Distributed for Kubernetes - to discover a new group in the cluster + to discover a new group in the cluster. - A *proxy service* to enable applications to reach the write leader of the - group transparently using PGD Proxy + group transparently using PGD Proxy. - A *proxy-r service* to enable applications to reach the read nodes of the - group, transparently using PGD Proxy. This service is disabled by default - and controlled by the `.spec.proxySettings.enableReadNodeRouting` setting + group transparently using PGD Proxy. This service is disabled by default + and controlled by the `.spec.proxySettings.enableReadNodeRouting` setting. For an example that uses these services, see [Connecting an application to a PGD cluster](#connecting-to-a-pgd-cluster-from-an-application). @@ -58,7 +58,7 @@ Proxy Service Template Proxy Read Service Template : Each PGD group has a proxy service to reach the group read nodes through - the PGD proxy, can be enabled by `.spec.proxySettings.enableReadNodeRouting`, + the PGD proxy. Can be enabled by `.spec.proxySettings.enableReadNodeRouting`, and can be configured in the `.spec.connectivity.proxyReadServiceTemplate` section. This is the entry-point service for the applications. @@ -169,11 +169,11 @@ either manually or automated, by updating the content of the secret. ## Connecting to a PGD cluster from an application -Connecting to a PGD Group from an application running inside the same Kubernetes cluster -or from outside the cluster is a simple procedure. In both cases, you will connect to -the proxy service of the PGD Group as the `app` user. The proxy service is a LoadBalancer -service that will route the connection to the write leader or read nodes of the PGD Group, -depending on which proxy service is connecting to. +Connecting to a PGD group from an application running inside the same Kubernetes cluster +or from outside the cluster is a simple procedure. In both cases, you connect to +the proxy service of the PGD group as the `app` user. The proxy service is a LoadBalancer +service that routes the connection to the write leader or read nodes of the PGD group, +depending on the proxy service it's connecting to. ### Connecting from inside the cluster diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx index d40cab60aee..dc653ffc188 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/labels_annotations.mdx @@ -41,9 +41,8 @@ their metadata cleaned up before creating the PGD node. This is written by the restore job. `k8s.pgd.enterprisedb.io/hash` -: Holds the hash of the certain part of PGDGroup spec that is utilized in various entities -like `Cluster`, `ScheduledBackup`, `StatefulSet`, and `Service (node, group and proxy service)` -to determine if any updates are required for the corresponding resources. +: To determine if any updates are required for the corresponding resources, holds the hash of the certain part of PGDGroup spec that's used in entities +like `Cluster`, `ScheduledBackup`, `StatefulSet`, and `Service (node, group and proxy service)`. `k8s.pgd.enterprisedb.io/latestCleanupExecuted` : Set in the PGDGroup to indicate that the cleanup was executed. @@ -53,7 +52,7 @@ to determine if any updates are required for the corresponding resources. generated. Added to the certificate resources. `k8s.pgd.enterprisedb.io/nodeRestartHash` -: Stores the hash of the CNP configuration in PGDGroup, a restart is needed when the configuration +: Stores the hash of the CNP configuration in PGDGroup. A restart is needed when the configuration is changed. `k8s.pgd.enterprisedb.io/noFinalizers` diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/ldap.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/ldap.mdx index c042e377e38..a795d14f652 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/ldap.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/ldap.mdx @@ -1,22 +1,22 @@ --- -title: 'LDAP Authentication' +title: 'LDAP authentication' originalFilePath: 'src/ldap.md' --- -EDB Postgres Distributed for Kubernetes supports LDAP authentication, +EDB Postgres Distributed for Kubernetes supports LDAP authentication. LDAP configuration on EDB Postgres Distributed for Kubernetes relies on the -implementation from EDB Postgres for Kubernetes (PG4K). Please refer to -[the PG4K documentation](/postgres_for_kubernetes/latest/postgresql_conf/#ldap-configuration) +implementation from EDB Postgres for Kubernetes (PG4K). See the +[PG4K documentation](/postgres_for_kubernetes/latest/postgresql_conf/#ldap-configuration) for the full context. !!! Important - Before you proceed, please take some time to familiarize with the - [LDAP authentication feature in the postgres documentation](https://www.postgresql.org/docs/current/auth-ldap.html). + Before you proceed, familiarize yourself with the + [LDAP authentication feature in the Postgres documentation](https://www.postgresql.org/docs/current/auth-ldap.html). -With LDAP support, only the user authentication is sent to LDAP, so the user must already exist in the postgres database. +With LDAP support, only the user authentication is sent to LDAP, so the user must already exist in the postgres database. -Here is an example of LDAP configuration using `simple bind` mode in PGDGroup, -postgres simply use `prefix + username + suffix` and password to bind the LDAP +This example shows an LDAP configuration using `simple bind` mode in PGDGroup. +The Postgres server uses `prefix + username + suffix` and password to bind the LDAP server to achieve the authentication. ```yaml @@ -31,10 +31,10 @@ spec: suffix: ",dc=example,dc=org" ``` -Here is a example of LDAP configuration using `search+bind` mode in PGDGroup. -In this mode, the postgres is first bound to the LDAP using `bindDN` with its password stored -in the secret `bindPassword`, then postgres tries to perform a search under `baseDN` to find a -username that matches the item specified by `searchAttribute`, if a match is found, postgres finally +This example shows configuring LDAP using `search+bind` mode in PGDGroup. +In this mode, the Postgres instance is first bound to the LDAP using `bindDN` with its password stored +in the secret `bindPassword`. Then Postgres tries to perform a search under `baseDN` to find a +username that matches the item specified by `searchAttribute`. If a match is found, Postgres finally verifies the entry and the password against the LDAP server. ```yaml diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/managed.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/managed.mdx index d19b9520628..f8bfb9e7a8a 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/managed.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/managed.mdx @@ -1,12 +1,12 @@ --- -title: 'Managed Configuration' +title: 'Managed configuration' originalFilePath: 'src/managed.md' --- The PGD operator allows configuring the `managed` section of a PG4K cluster. The `spec.cnp.managed` stanza -is used for configuring the supported managed roles within the cluster. +is used for configuring the supported managed roles in the cluster. -In this example, a pgdgroup is configured to have a managed role named `foo` with the specified properties set up in postgres. +In this example, a PGDgroup is configured to have a managed role named `foo` with the specified properties set up in postgres: ```yaml apiVersion: pgd.k8s.enterprisedb.io/v1beta1 @@ -30,5 +30,4 @@ spec: replication: true ``` -For more information about managed roles, refer to [EDB Postgres for Kubernetes recovery - Database Role Management](/postgres_for_kubernetes/latest/declarative_role_management/) - +For more information about managed roles, see [Database role management](/postgres_for_kubernetes/latest/declarative_role_management/) in the EDB Postgres for Kubernetes documentation. diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/mutations.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/mutations.mdx index 7ff807ad911..184090a2526 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/mutations.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/mutations.mdx @@ -3,15 +3,15 @@ title: 'SQLMutations' originalFilePath: 'src/mutations.md' --- -SQLMutations consist of a list of SQL queries to be executed on the application -database via the superuser role after a pgd node joins the pgdgroup. Each +SQLMutations consist of a list of SQL queries to execute on the application +database via the superuser role after a PGD node joins the PGDgroup. Each SQLMutation includes an `isApplied` list of queries and an `exec` list of queries. -The `isApplied` SQL queries are used to check if the mutation has already been +The `isApplied` SQL queries are used to check if the mutation was already applied. If any of the `isApplied` queries return false, the `exec` list of SQL -queries will be added to the execution queue. +queries is added to the execution queue. -Here is a sample of SQLMutations +Here's a sample of SQLMutations: ```yaml apiVersion: pgd.k8s.enterprisedb.io/v1beta1 @@ -39,24 +39,23 @@ spec: ``` -## SQLMutation Types +## SQLMutation types -The operator offers three types of SQLMutations, which can be specified by `spec.pgd.mutations[].type`, with `always` -being the default option. +The operator offers three types of SQLMutations, which you specify with `spec.pgd.mutations[].type`. The default is `always`. -- beforeSubgroupRaft -- always -- writeLeader +- `beforeSubgroupRaft` +- `always` +- `writeLeader` The `beforeSubgroupRaft` and `always` mutations are evaluated in every reconcile loop. The difference between the two mutations lies in their execution phase: -- For `always` mutations, they are run in each reconcile loop without any restrictions on the pgdgroup. -- On the other hand, `beforeSubgroupRaft` mutations are only executed if the pgdgroup has defined data nodes - and pgd proxies, and specifically before the subgroup raft becomes ready. +- For `always` mutations, they're run in each reconcile loop without any restrictions on the PGDgroup. +- `beforeSubgroupRaft` mutations are executed only if the PGDgroup has defined data nodes + and PGD proxies, and specifically before the subgroup Raft becomes ready. -Both `beforeSubgroupRaft` and `always` mutations can run on any pgd node within the pgdgroup, including witness nodes. -Therefore, they should not be used for making data changes to the application database, as witness nodes do not contain +Both `beforeSubgroupRaft` and `always` mutations can run on any PGD node in the PGDgroup, including witness nodes. +Therefore, don't use them for making data changes to the application database, as witness nodes don't contain application database data. The `writeLeader` mutation is triggered and executed after the write leader is elected. The `exec` operations diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx index 3bf6a6d1b7d..1bdb2f4e32f 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/pg4k-pgd.v1beta1.mdx @@ -1,11 +1,11 @@ --- -title: 'API Reference' +title: 'API reference' originalFilePath: 'src/pg4k-pgd.v1beta1.md' --- -

Package v1beta1 contains API Schema definitions for the pgd v1beta1 API group

+

Package v1beta1 contains API schema definitions for the pgd v1beta1 API group.

-## Resource Types +## Resource types - [PGDGroup](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroup) - [PGDGroupCleanup](#pgd-k8s-enterprisedb-io-v1beta1-PGDGroupCleanup) diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx index c1ab7eff3e9..65e66e6f133 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/recovery.mdx @@ -5,7 +5,7 @@ originalFilePath: 'src/recovery.md' In EDB Postgres Distributed for Kubernetes, recovery is available as a way to bootstrap a new PGD group starting from an available physical backup of a PGD node. -Recovery can't be performed in-place on an existing PGD group. +Recovery can't be performed in place on an existing PGD group. EDB Postgres Distributed for Kubernetes also supports point-in-time recovery (PITR), which allows you to restore a PGD group up to any point in time, from the first available backup in your catalog to the last archived @@ -157,7 +157,7 @@ spec: ``` !!! Important - When a `backupID` is specified, make sure to list only the related PGD node + When you specify a `backupID`, make sure to list only the related PGD node in the `serverNames` option, and avoid listing the other ones. !!! Note @@ -168,12 +168,12 @@ spec: ## Recovery from volumeSnapshot -You can also recover a pgdgroup from a volumeSnapshot backup. Stanza +You can also recover a PGDgroup from a volumeSnapshot backup. Stanza `spec.restore.volumeSnapshots` is used to define the criteria for volumeSnapshots restore candidates. The operator transparently selects the latest volumeSnapshot among the candidates. The operator requires the following annotations/labels in the volumeSnapshot. These -annotations/labels will be automatically added if volumeSnapshots are taken by the operator. +annotations/labels are automatically added if volumeSnapshots are taken by the operator. Annotations: @@ -185,12 +185,12 @@ Labels: - `k8s.enterprisedb.io/cluster` indicates the node where the volumeSnapshot is taken, crucial for fetching the serverName in the object store for WAL replaying. -- `k8s.enterprisedb.io/backupName` is the backup name of the volumeSnapshot, used to group - volumeSnapshots, when more volumes are defined in the backup. -- `k8s.enterprisedb.io/tablespaceName` represents the tablespace name of the volumeSnapshot, when +- `k8s.enterprisedb.io/backupName` is the backup name of the volumeSnapshot. Used to group + volumeSnapshots when more volumes are defined in the backup. +- `k8s.enterprisedb.io/tablespaceName` represents the tablespace name of the volumeSnapshot when the volumeSnapshot role is `PG_TABLESPACE`. -The following example illustrates a full recovery from volumeSnapshots. After the volumeSnapshot recovery, +This example shows a full recovery from volumeSnapshots. After the volumeSnapshot recovery, WAL replaying for full recovery will target server `pgdgroup-backup-vs-1`. ```yaml @@ -221,14 +221,14 @@ spec: maxParallel: 8 ``` -For more information, please see [EDB Postgres for Kubernetes recovery from volumeSnapshot objects](/postgres_for_kubernetes/latest/recovery/#recovery-from-volumesnapshot-objects). +For more information, see [Recovery from volumeSnapshot objects](/postgres_for_kubernetes/latest/recovery/#recovery-from-volumesnapshot-objects) in the EDB Postgres for Kubernetes documentation. ## PITR from volumeSnapshot -Same as when doing recovery from an object store, you can instruct PostgreSQL to halt the replay of Write-Ahead Logs (WALs) -at any specific moment during volumeSnapshot recovery. +You can instruct PostgreSQL to halt the replay of write-ahead logs (WALs) +at any specific moment during volumeSnapshot recovery. This is the same capability as when recovering from an object store. -This example demonstrates setting a time-based target for recovery using volume snapshots. +This example shows setting a time-based target for recovery using volume snapshots: ```yaml apiVersion: pgd.k8s.enterprisedb.io/v1beta1 @@ -263,4 +263,4 @@ spec: ## Recovery targets Beyond PITR are other recovery target criteria you can use. -For more information on all the available recovery targets, see [EDB Postgres for Kubernetes recovery targets](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/recovery/#point-in-time-recovery-pitr) in the EDB Postgres for Kubernetes documentation. +For more information on all the available recovery targets, see [Recovery](/postgres_for_kubernetes/latest/recovery/#point-in-time-recovery-pitr) in the EDB Postgres for Kubernetes documentation. diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/tde.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/tde.mdx index 5c99a8aa65a..9761e45ad00 100644 --- a/product_docs/docs/postgres_distributed_for_kubernetes/1/tde.mdx +++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/tde.mdx @@ -1,45 +1,45 @@ --- -title: 'Transparent Data Encryption (TDE)' +title: 'Transparent data encryption (TDE)' originalFilePath: 'src/tde.md' --- !!! Important TDE is available *only* for operands that support it: - EPAS versions 15 and newer, Postgres Extended versions 15 and newer. + EDB Postgres Advanced Server versions 15 and newer and EDB Postgres Extended versions 15 and newer. -Transparent Data Encryption, or TDE, is a technology used by several database -vendors to **encrypt data at rest**, i.e. database files on disk. -TDE does not however encrypt data in use. +Transparent data encryption, or TDE, is a technology used by several database +vendors to encrypt data at rest, that is, database files on disk. +However, TDE doesn't encrypt data in use. -TDE is included in EDB Postgres Advanced Server (EPAS) or EDB Postgres -Extended, starting with version 15, and it is supported by EDB Postgres +TDE is included in EDB Postgres Advanced Server or EDB Postgres +Extended, starting with version 15, and is supported by EDB Postgres Distributed for Kubernetes. !!! Important - Before you proceed, please take some time to familiarize with the - [TDE feature in the EPAS documentation](https://www.enterprisedb.com/docs/tde/latest/). + Before you proceed, take some time to familiarize with the + [TDE feature in the EDB Postgres Advanced Server documentation](/tde/latest/). -With TDE activated, both WAL files and files for tables will be encrypted. -Data encryption/decryption is entirely transparent to the user, as it is +With TDE activated, both WAL files and files for tables are encrypted. +Data encryption/decryption is entirely transparent to the user, as it's managed by the database without requiring any application changes or updated client drivers. The support for TDE on EDB Postgres Distributed for Kubernetes relies on the -implementation from EDB Postgres for Kubernetes (PG4K). Please refer to -[the PG4K documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/tde/) +implementation from EDB Postgres for Kubernetes (PG4K). See +[the PG4K documentation](/postgres_for_kubernetes/latest/tde/) for the full context. -We show now how to use TDE with a passphrase stored in a Kubernetes Secret, -which will be used to encrypt the EPAS binary key. +You can use TDE with a passphrase stored in a Kubernetes secret, +which is used to encrypt the EDB Postgres Advanced Server binary key. -!!! Seealso "EPAS documentation" - Please refer to [the EPAS documentation](https://www.enterprisedb.com/docs/tde/latest/key_stores/) - for details on the EPAS encryption key. +!!! Seealso "EDB Postgres Advanced Server documentation" + See [the EDB Posgres Advanced Server documentation](/tde/latest/secure_key/) + for details on the this encryption key. TDE on EDB Postgres Distributed for Kubernetes relies on the PG4K implementation. -To activate TDE on a cluster, we use the `epas` section of the manifest, -which is within the `cnp` section used for PG4K-level directives such as +Activating TDE on a cluster uses the `epas` section of the manifest, +which is in the `cnp` section used for PG4K-level directives such as storage. Use the `tde` stanza to enable TDE, and set the name of the Kubernetes secret holding the TDE encryption key. @@ -75,27 +75,27 @@ spec: size: 1Gi ``` -Again, please refer to [the PG4K documentation](https://www.enterprisedb.com/docs/postgres_for_kubernetes/latest/tde/) +Again, see [the PG4K documentation](/postgres_for_kubernetes/latest/tde/) for additional depth, including how to create the encryption secret and additional ways of using TDE. -As shown in the [TDE feature documentation](https://www.enterprisedb.com/docs/tde/latest/), -the information will be encrypted at rest. +As shown in the [TDE feature documentation](/tde/latest/), +the information is encrypted at rest. -For example, open a `psql` terminal into one of your data nodes. +For example, open a psql terminal into one of your data nodes. ```sh kubectl exec -ti -- psql app ``` -and create a new table including a text column. +Create a new table including a text column: ```sql create table foo(bar int, baz varchar); insert into foo(bar, baz) values (1, 'hello'), (2, 'goodbye'); ``` -And then verify the location where the newly defined table is stored on disk: +Verify the location where the newly defined table is stored on disk: ```sql select pg_relation_filepath('foo'); @@ -110,7 +110,7 @@ You can open a terminal on the same data node: kubectl exec -ti -- bash ``` -and verify the file has been encrypted. +There, you can verify the file was encrypted: ```sh cd $PGDATA/base/16385 diff --git a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx index 87d60da95fe..5fabf15dc8c 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx @@ -17,26 +17,26 @@ originalFilePath: 'src/installation_upgrade.md' ### Obtaining an EDB subscription token !!! Important - You must obtain an EDB subscription token to install EDB Postgres for Kubernetes. Without a token, you will not be able to access the EDB private software repositories. + You must obtain an EDB subscription token to install EDB Postgres for Kubernetes. You can only access the EDB private software repositories if you have a token. Installing EDB Postgres for Kubernetes requires an EDB Repos 2.0 token to gain access to the EDB private software repositories. -You can obtain the token by visiting your [EDB Account Profile](https://www.enterprisedb.com/accounts/profile). You will have to sign in if you are not already logged in. +You can obtain the token by visiting your [EDB Account Profile](https://www.enterprisedb.com/accounts/profile). You must sign in if you're not already logged in. -Your account profile page displays the token to use next to **Repos 2.0 Token** label. By default, the token is obscured, click the "Show" button (an eye icon) to reveal it. +Your account profile page displays the token to use next to the **Repos 2.0 Token** label. By default, the token is obscured. Select **Show** (the eye icon) to reveal it. Your token entitles you to access one of two repositories: standard or enterprise. -- `standard` - Includes the operator and the EDB Postgres Extended operand images. -- `enterprise` - Includes the operator and the EDB Postgres Advanced and EDB Postgres Extended operand images. +* `standard` — Includes the operator and the EDB Postgres Extended operand images. +* `enterprise` — Includes the operator and the EDB Postgres Advanced and EDB Postgres Extended operand images. -Set the relevant value, determined by your subscription, as an environment variable `EDB_SUBSCRIPTION_PLAN`. +Set the relevant value, determined by your subscription, as an environment variable `EDB_SUBSCRIPTION_PLAN`: ```shell EDB_SUBSCRIPTION_PLAN=enterprise ``` -then set the Repos 2.0 token to an environment variable `EDB_SUBSCRIPTION_TOKEN`. +Then set the Repos 2.0 token to an environment variable `EDB_SUBSCRIPTION_TOKEN`: ```shell EDB_SUBSCRIPTION_TOKEN= @@ -72,21 +72,42 @@ kubectl create secret -n postgresql-operator-system docker-registry edb-pull-sec --docker-password=$EDB_SUBSCRIPTION_TOKEN ``` +#### Install the EDB pull secret + +Before installing EDB Postgres for Kubernetes, you need to create a pull secret for EDB software in the `postgresql-operator-system` namespace. The pull secret needs to be saved in the namespace where the operator will reside. + +Create the `postgresql-operator-system` namespace: + +```shell +kubectl create namespace postgresql-operator-system +``` + +Create the pull secret: + +```shell +kubectl create secret -n postgresql-operator-system docker-registry edb-pull-secret \ + --docker-server=docker.enterprisedb.com \ + --docker-username=k8s_$EDB_SUBSCRIPTION_PLAN \ + --docker-password=$EDB_SUBSCRIPTION_TOKEN +``` + #### Install the operator -Now that the pull-secret has been added to the namespace, the operator can be installed like any other resource in Kubernetes, +Now that the pull secret has been added to the namespace, the operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -There are two different manifests available depending on your subscription plan: +Two different manifests are available, depending on your subscription plan: -- Standard: The [latest standard operator manifest](https://get.enterprisedb.io/pg4k/pg4k-standard-1.24.1.yaml). -- Enterprise: The [latest enterprise operator manifest](https://get.enterprisedb.io/pg4k/pg4k-enterprise-1.24.1.yaml). +- Standard — The [latest standard operator manifest](https://get.enterprisedb.io/pg4k/pg4k-standard-1.24.1.yaml). +- Enterprise — The [latest enterprise operator manifest](https://get.enterprisedb.io/pg4k/pg4k-enterprise-1.24.1.yaml). You can install the manifest for the latest version of the operator by running: +You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.24.1.yaml) +for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://get.enterprisedb.io/pg4k/pg4k-$EDB_SUBSCRIPTION_PLAN-1.24.1.yaml + https://get.enterprisedb.io/pg4k/pg4k-$EDB_SUBSCRIPTION_PLAN-1.24.1.yaml ``` You can verify that with: @@ -109,7 +130,7 @@ kubectl cnp install generate \ > cnp_for_specific_namespace.yaml ``` -Please refer to ["`cnp` plugin"](./kubectl-plugin.md#generation-of-installation-manifests) documentation +See the ["`cnp` plugin"](./kubectl-plugin.md#generation-of-installation-manifests) documentation for a more comprehensive example. !!! Warning diff --git a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx index 68094eea889..abdf05abf93 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx @@ -1,6 +1,7 @@ --- title: 'EDB Postgres for Kubernetes Plugin' originalFilePath: 'src/kubectl-plugin.md' +deepToC: true --- EDB Postgres for Kubernetes provides a plugin for `kubectl` to manage a cluster in Kubernetes. @@ -55,18 +56,19 @@ Setting up kubectl-cnp (1.24.1) ... #### RPM packages -As in the example for `.rpm` packages, let's install the 1.24.1 release for an +As in the example for `.deb` packages, let's install the 1.24.1 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. -```sh +``` sh curl -L https://github.com/EnterpriseDB/kubectl-cnp/releases/download/v1.24.1/kubectl-cnp_1.24.1_linux_x86_64.rpm \ --output kube-plugin.rpm ``` -Then, with superuser privileges, install with `yum`, and you're ready to use: +Then, with super user privileges, install with `yum`, and you're ready to use: -```console -$ sudo yum --disablerepo=* localinstall kube-plugin.rpm +``` sh +sudo yum --disablerepo=* localinstall kube-plugin.rpm +__OUTPUT__ Failed to set locale, defaulting to C.UTF-8 Dependencies resolved. ==================================================================================================== @@ -90,19 +92,19 @@ EDB Postgres for Kubernetes Plugin is currently built for the following operating system and architectures: * Linux - * amd64 - * arm 5/6/7 - * arm64 - * s390x - * ppc64le + * amd64 + * arm 5/6/7 + * arm64 + * s390x + * ppc64le * macOS - * amd64 - * arm64 + * amd64 + * arm64 * Windows - * 386 - * amd64 - * arm 5/6/7 - * arm64 + * 386 + * amd64 + * arm 5/6/7 + * arm64 ### Configuring auto-completion @@ -821,8 +823,8 @@ into a human-readable output, and attempts to sort the entries by timestamp. It can be used in combination with `kubectl cnp logs cluster`, as shown in the following example: -```console -$ kubectl cnp logs cluster cluster-example | kubectl cnp logs pretty +``` sh +kubectl cnp logs cluster cluster-example | kubectl cnp logs pretty 2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting EDB Postgres for Kubernetes Instance Manager 2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL 2024-10-15T17:35:00.347 INFO cluster-example-1 instance-manager starting tablespace manager @@ -834,8 +836,8 @@ Alternatively, it can be used in combination with other commands that produce PG4K logs in JSON format, such as `stern`, or `kubectl logs`, as in the following example: -```console -$ kubectl logs cluster-example-1 | kubectl cnp logs pretty +``` sh +kubectl logs cluster-example-1 | kubectl cnp logs pretty 2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting EDB Postgres for Kubernetes Instance Manager 2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL 2024-10-15T17:35:00.347 INFO cluster-example-1 instance-manager starting tablespace manager @@ -848,8 +850,8 @@ to display logs for specific pods or loggers, or to filter logs by severity level. Here's an example: -```console -$ kubectl cnp logs cluster cluster-example | kubectl cnp logs pretty --pods cluster-example-1 --loggers postgres --log-level info +``` sh +kubectl cnp logs cluster cluster-example | kubectl cnp logs pretty --pods cluster-example-1 --loggers postgres --log-level info 2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] LOG: redirecting log output to logging collector process 2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] HINT: Future log output will appear in directory "/controller/log"... 2024-10-15T17:35:00.510 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] LOG: ending log output to stderr @@ -865,8 +867,8 @@ mode. The sub-command will add a group separator line, `---`, at the end of each sorted group. The size of the grouping can be configured via the `--sorting-group-size` flag (default: 1000), as illustrated in the following example: -```console -$ kubectl cnp logs cluster cluster-example | kubectl cnp logs pretty --sorting-group-size=3 +``` sh +kubectl cnp logs cluster cluster-example | kubectl cnp logs pretty --sorting-group-size=3 2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Starting EDB Postgres for Kubernetes Instance Manager 2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Checking for free disk space for WALs before starting PostgreSQL 2024-10-15T17:35:20.438 INFO cluster-example-2 instance-manager starting tablespace manager @@ -1413,7 +1415,7 @@ kubectl cnp subscription sync-sequences destination-cluster \ The `cnp` plugin can be easily integrated in [K9s](https://k9scli.io/), a popular terminal-based UI to interact with Kubernetes clusters. -See [`k9s/plugins.yml`](samples/k9s/plugins.yml) for details. +See [`k9s/plugins.yml`](../samples/k9s/plugins.yml) for details. ## Permissions required by the plugin @@ -1446,9 +1448,7 @@ table contains the full details: | subscription | clusters: get
pods: get,list
pods/exec: create | | version | none | -[^1]: The permissions are cluster scope ClusterRole resources. - -///Footnotes Go Here/// +[^1]: The permissions are cluster scope ClusterRole resources. Additionally, assigning the `list` permission on the `clusters` will enable autocompletion for multiple commands. diff --git a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx index 5407193ec89..4a9126d0b7c 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx @@ -106,6 +106,8 @@ kind create cluster --name pg Now that you have a Kubernetes installation up and running on your laptop, you can proceed with EDB Postgres for Kubernetes installation. +Unless specified in a cluster configuration file, EDB Postgres for Kubernetes will currently deploy Community Postgresql operands by default. See the section [Deploying EDB Postgres servers](#deploying-edb-postgres-servers) for more information. + Refer to the ["Installation"](installation_upgrade.md) section and then proceed with the deployment of a PostgreSQL cluster. @@ -178,7 +180,7 @@ metadata: spec: # [...] imageName: docker.enterprisedb.com/k8s_enterprise/edb-postgres-advanced:16 - #[...] + # [...] ``` And to install EDB Postgres Extended 16 you can use: @@ -191,7 +193,7 @@ metadata: spec: # [...] imageName: docker.enterprisedb.com/k8s_enterprise/edb-postgres-extended:16 - #[...] + # [...] ``` !!! Important diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_22_6_rel_notes.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_22_6_rel_notes.mdx index f9ae43f2b07..04a105214bb 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_22_6_rel_notes.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_22_6_rel_notes.mdx @@ -7,6 +7,18 @@ Released: 26 Aug 2024 This release of EDB Postgres for Kubernetes includes the following: -| Type | Description | -| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Upstream merge | Merged with community CloudNativePG 1.22.6. See the community [Release Notes](https://cloudnative-pg.io/documentation/1.22/release_notes/v1.22/). | +### Features + +* **Configuration of Pod Disruption Budgets (PDB)**: Introduced the `.spec.enablePDB` field to disable PDBs on the primary instance, allowing proper eviction of the pod during maintenance operations. This is particularly useful for single-instance deployments. This feature is intended to replace the node maintenance window feature. + +### Enhancements + +* **cnp plugin updates**: + * Enhance the install generate command by adding a --control-plane option, allowing deployment of the operator on control-plane nodes by setting node affinity and tolerations (\#5271). + * Enhance the destroy command to delete also any job related to the target instance (\#5298). + +### Fixes + +* Synchronous replication self-healing checks now exclude terminated pods, focusing only on active and functional pods (\#5210). +* The instance manager will now terminate all existing operator-related replication connections following a role change in a replica cluster (\#5209). +* Allow setting smartShutdownTimeout to zero, enabling immediate fast shutdown and bypassing the smart shutdown process when required (\#5347). \ No newline at end of file diff --git a/product_docs/docs/postgres_for_kubernetes/1/tde.mdx b/product_docs/docs/postgres_for_kubernetes/1/tde.mdx index 2c42dd1c221..7794075710b 100644 --- a/product_docs/docs/postgres_for_kubernetes/1/tde.mdx +++ b/product_docs/docs/postgres_for_kubernetes/1/tde.mdx @@ -41,7 +41,7 @@ The basic approach is to store the passphrase in a Kubernetes secret. Such a passphrase will be used to encrypt the EPAS binary key. !!! Seealso "EPAS documentation" - Please refer to [the EPAS documentation](/tde/latest/key_stores/) + Please refer to [the EPAS documentation](/tde/latest/secure_key/) for details on the EPAS encryption key. Activating TDE on the operator is simple. In the `epas` section of the manifest, diff --git a/product_docs/docs/tde/15/about/how.mdx b/product_docs/docs/tde/15/about/how.mdx new file mode 100644 index 00000000000..d05fdebe158 --- /dev/null +++ b/product_docs/docs/tde/15/about/how.mdx @@ -0,0 +1,51 @@ +--- +title: How does TDE encrypt data? +description: How does the encryption of data work when TDE is enabled? +--- + +TDE prevents unauthorized viewing of data in operating system files on the database server and on backup storage. Data becomes unintelligible for unauthorized users if it's stolen or misplaced. + +Data encryption and decryption is managed by the database and doesn't require application changes or updated client drivers. + +EDB Postgres Advanced Server and EDB Postgres Extended Server provide hooks to key management that's external to the database. These hooks allow for simple passphrase encrypt/decrypt or integration with enterprise key management solutions. See [Securing the data encryption key](../secure_key/) for more information. + +## How does TDE encrypt data? + +EDB TDE uses [OpenSSL](https://openssl-library.org/) to encrypt data files with the AES encryption algorithm. In Windows systems, TDE uses [OpenSSL 3](https://docs.openssl.org/3.0/). In Linux systems, TDE uses the OpenSSL version installed in the host operating system. To check the installed version, run `openssl version`. For more information, see the [OpenSSL documentation](https://docs.openssl.org/master/). If you're using a custom build not provided by the OpenSSL community, consult your vendor's documentation. + +Starting with version 16, EDB TDE introduces the option to choose between AES-128 and AES-256 encryption algorithms during the initialization of the Postgres cluster. The choice between AES-128 and AES-256 hinges on balancing performance and security requirements. AES-128 is commonly advised for environments where performance efficiency and lower power consumption are pivotal, making it suitable for most applications. Conversely, AES-256 is recommended for scenarios demanding the highest level of security, often driven by regulatory mandates. + +TDE uses AES-128-XTS or AES-256-XTS algorithms for encrypting data files. XTS uses a second value, known as the *tweak value*, to enhance the encryption. The XTS tweak value with TDE uses the database OID, the relfilenode, and the block number. + +For write-ahead log (WAL) files, TDE uses AES-128-CTR or AES-256-CTR, incorporating the WAL's log sequence number (LSN) as the counter component. + +Temporary files that are accessed by block are also encrypted using AES-128-XTS or AES-256-XTS. Other temporary files are encrypted using AES-128-CBC or AES-256-CBC. + +## How is data stored on disk with TDE? + +In this example, the data in the `tbfoo` table is encrypted. The `pg_relation_filepath` function locates the data file corresponding to the `tbfoo` table. + +```sql +insert into tbfoo values ('abc','123'); +INSERT 0 1 + +select pg_relation_filepath('tbfoo'); + + pg_relation_filepath +---------------------- + base/5/16416 +``` + +Grepping the data looking for characters doesn't return anything. Viewing the last five lines returns the encrypted data: + +```shell +$ hexdump -C 16416 | grep abc +$ + +$ hexdump -C 16416 | tail -5 +00001fc0 c8 0f 1d c8 9a 63 3d dc 7d 4e 68 98 b8 f2 5e 0a |.....c=.}Nh...^.| +00001fd0 9a eb 20 1d 59 ad be 94 6e fd d5 6e ed 0a 72 8c |.. .Y...n..n..r.| +00001fe0 7b 14 7f de 5b 63 e3 84 ba 6c e7 b0 a3 86 aa b9 |{...[c...l......| +00001ff0 fe 4f 07 50 06 b7 ef 6a cd f9 84 96 b2 4b 25 12 |.O.P...j.....K%.| +00002000 +``` diff --git a/product_docs/docs/tde/15/about/index.mdx b/product_docs/docs/tde/15/about/index.mdx new file mode 100644 index 00000000000..041ff148827 --- /dev/null +++ b/product_docs/docs/tde/15/about/index.mdx @@ -0,0 +1,9 @@ +--- +title: About TDE +description: Learn about TDE, how it works, what it encrypts, and why to use it. +indexCards: simple +--- + +Transparent data encryption (TDE) is an optional feature supported by EDB Postgres Advanced Server and EDB Postgres Extended Server in version 15 and later. + +It encrypts user data stored in the database system. \ No newline at end of file diff --git a/product_docs/docs/tde/15/about/what.mdx b/product_docs/docs/tde/15/about/what.mdx new file mode 100644 index 00000000000..dcf012a9a29 --- /dev/null +++ b/product_docs/docs/tde/15/about/what.mdx @@ -0,0 +1,37 @@ +--- +title: What's encrypted with TDE? +description: Which data is encrypted when databases are initialized with TDE? +--- + +TDE encrypts: + +- The files underlying tables, sequences, and indexes, including TOAST tables and system catalogs and all forks. These files are known as *data files*. + +- The write-ahead log (WAL). + +- Various temporary files that are used during query processing and database system operation. + +!!! Note Implications + + - Any WAL fetched from a server using TDE, including by streaming replication and archiving, is encrypted. + + - A physical replica is necessarily encrypted (or not encrypted) in the same way and using the same keys as its primary server. + + - If a server uses TDE, a base backup is encrypted. + + +The following aren't encrypted or otherwise disguised by TDE: + +- Metadata internal to operating the database system that doesn't contain user data, such as the transaction status (for example, pg_subtrans and pg_xact). + +- The file names and file system structure in the data directory. That means that the overall size of the database system, the number of databases, the number of tables, their relative sizes, as well as file system metadata, such as last access time, are all visible without decryption. + +- Data in foreign tables. + +- The server diagnostics log. + +- Configuration files. + +!!! Note Implications + + Logical replication isn't affected by TDE. Publisher and subscriber can have different encryption settings. The payload of the logical replication protocol isn't encrypted. (You can use SSL.) diff --git a/product_docs/docs/tde/15/about/why.mdx b/product_docs/docs/tde/15/about/why.mdx new file mode 100644 index 00000000000..24d2e36cd4f --- /dev/null +++ b/product_docs/docs/tde/15/about/why.mdx @@ -0,0 +1,22 @@ +--- +title: Why should you use TDE? +description: Learn about some of the use cases for TDE encryption. +--- + +TDE encryption ensures that user data remains protected from unauthorized access. + +When configured with a [data encryption key securing mechanism](../secure_key/), data stored on the database server and in backup is accessible only by users and processes with decryption keys. + +Some use cases include: + +- **Protection of sensitive personal data.** Industries like finance, e-commerce, healthcare, and government organizations often deal with personally identifiable information that must be protected to comply with data privacy regulations such as GDPR, HIPAA, and PCI DSS. + +- **Compliance with government standards.** Government institutions must comply with information processing standards like FIPS to ensure computer security and interoperability. + +- **Protecting transactional data.** Financial institutions deal with transaction, account, and payment data that must be protected to prevent fraud and financial losses. + +- **Protecting intellectual property.** Organizations safeguard proprietary information, designs, and plans to keep their competitive advantage, support brand value, and foster innovation. + +- **Protecting data in cloud-based deployments and public web applications.** Encrypting a database's data provides an added layer of security when infrastructure is shared or when vulnerabilities could potentially infiltrate in an application's API. + +When your data is encrypted, it becomes unintelligible if it's stolen or misplaced. \ No newline at end of file diff --git a/product_docs/docs/tde/15/affected_commands.mdx b/product_docs/docs/tde/15/affected_commands.mdx index 4df094998a4..0da108edb2f 100644 --- a/product_docs/docs/tde/15/affected_commands.mdx +++ b/product_docs/docs/tde/15/affected_commands.mdx @@ -1,16 +1,13 @@ --- title: "Commands affected by TDE" -navTitle: Affected commands +description: How TDE changes the behavior of some commands when enabled. --- - - When TDE is enabled, the following commands have TDE-specific options or read TDE settings in environment variables or configuration files: -- [pg_waldump](/tde/latest/troubleshooting/#dumping-a-tde-encrypted-wal-file) -- [pg_resetwal](/tde/latest/troubleshooting/#resetting-a-corrupt-tde-encrypted-wal-file) -- [pg_verifybackup](/tde/latest/backups/#verify-a-backup-of-a-tde-system) -- [pg_rewind](/tde/latest/backups/#resynchronize-timelines-in-a-tde-system) -- [pg_upgrade](pg_upgrade_arguments) -- [postgres](/tde/latest/single_user/) - +- [initdb](./initdb_tde_options/) +- [pg_waldump](././encrypted_files/wal_files/#dumping-a-tde-encrypted-wal-file) +- [pg_resetwal](././encrypted_files/wal_files/#resetting-a-corrupt-tde-encrypted-wal-file) +- [pg_verifybackup](././encrypted_files/backup_files/#verify-a-backup-of-a-tde-system) +- [pg_rewind](././encrypted_files/backup_files/#resynchronize-timelines-in-a-tde-system) +- [pg_upgrade](pg_upgrade_arguments) \ No newline at end of file diff --git a/product_docs/docs/tde/15/enabling/enabling_tde.mdx b/product_docs/docs/tde/15/enabling/enabling_tde.mdx new file mode 100644 index 00000000000..af63e7559ab --- /dev/null +++ b/product_docs/docs/tde/15/enabling/enabling_tde.mdx @@ -0,0 +1,48 @@ +--- +title: "Creating a database with TDE" +description: Create a database server with TDE enabled. +--- + +Create a new EDB Postgres Advanced Server cluster with TDE enabled. + +- Set the environment variables to export the `wrap` and `unwrap` commands for encryption. +- Initialize a server with encryption enabled. +- Start the database server. +- Verify TDE is enabled. + +## Worked example + +This example uses EDB Postgres Advanced Server 15 running on a Linux platform. It uses OpenSSL to define the passphrase to wrap and unwrap the generated data encryption key. + +1. Set the data encryption key (wrap) and decryption (unwrap) environment variables: + + ```shell + export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pass pass: -out %p' + export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pass pass: -in %p' + ``` + + !!!note + - If you're on Windows, you don't need the single quotes around the variable value. + !!! + +1. Initialize the cluster using `initdb` with encryption enabled. This command sets the `data_encryption_key_unwrap_command` parameter in the `postgresql.conf` file. + + ```shell + /usr/edb/as15/bin/initdb --data-encryption -D /var/lib/edb/as15/data + ``` + +1. Start the cluster: + + ```shell + /usr/edb/as15/bin/pg_ctl -D /var/lib/edb/as15/data start + ``` + +1. Run grep on `postgresql.conf` to verify the setting of `data_encryption_key_unwrap_command`: + + ```shell + grep data_encryption_key_unwrap_command /var/lib/edb/as15/data/postgresql.conf + __OUTPUT__ + data_encryption_key_unwrap_command = 'openssl enc -d -aes-128-cbc -pass pass: -in %p' + ``` + +1. [Verify that data encryption is enabled](verifying_tde). diff --git a/product_docs/docs/tde/15/enabling_tde_epas.mdx b/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx similarity index 73% rename from product_docs/docs/tde/15/enabling_tde_epas.mdx rename to product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx index 811859e8293..d05c9b123b7 100644 --- a/product_docs/docs/tde/15/enabling_tde_epas.mdx +++ b/product_docs/docs/tde/15/enabling/enabling_tde_epas.mdx @@ -1,36 +1,36 @@ --- title: "Enabling TDE on an existing EDB Postgres Advanced Server cluster" -navTitle: Enabling TDE on an existing EDB Postgres Advanced Server cluster +description: Migrate your existing EDB Postgres Advanced Server cluster to a new TDE-enabled database server. deepToC: true +redirects: + - /tde/latest/enabling_tde_epas/ #generated for TDE/refresh --- -## Enabling TDE on an EDB Postgres Advanced Server cluster - -Create a new EDB Postgres Advanced Server cluster with TDE enabled -and use `pg_upgrade` to transfer data from the existing source cluster to the new encrypted cluster. +Create an EDB Postgres Advanced Server cluster with TDE enabled +and use pg_upgrade to transfer data from the existing source cluster to the new encrypted cluster. - [Prepare your upgrade](#preparing-your-upgrade) by performing a backup of the existing instance. -- [Create a new database server](#creating-an-encrypted-server) - - Create an empty directory for the new server and ensure `enterprisedb` owns it. +- [Create a new database server](#creating-an-encrypted-server): + - Create an empty directory for the new server and ensure enterprisedb owns it. - Set the environment variables to export the `wrap` and `unwrap` commands for encryption. - Initialize a server with encryption enabled. - - Change the default port, so the new server is available at another port. + - Change the default port so the new server is available at another port. - Start the database server. - - Connect to the database server and ensure it is functioning. -- [Upgrade to the encrypted server](#upgrading-to-the-encrypted-server) + - Connect to the database server and ensure it's functioning. +- [Upgrade to the encrypted server](#upgrading-to-the-encrypted-server): - Stop both the source and the new server. - - Use `pg_upgrade` with `--copy-by-block` option to copy data from the source server to the new server. Specify the source and target bin and data directories. - - Start the new encrypted databaser server. - - Connect to the encrypted database server and ensure the data was transfered. -- [Clean up and delete the source server](#cleaning-up-after-upgrade) + - Use pg_upgrade with the `--copy-by-block` option to copy data from the source server to the new server. Specify the source and target bin and data directories. + - Start the new encrypted database server. + - Connect to the encrypted database server and ensure the data was transferred. +- [Clean up and delete the source server](#cleaning-up-after-upgrade): - Clean up the database and its statistics. - - Remove the source EDB Postgres Advanced Server cluster with the script provided by `pg_upgrade`. + - Remove the source EDB Postgres Advanced Server cluster with the script provided by pg_upgrade. ## Worked example -This example enables Transparent Data Encryption on an EDB Postgres Advanced Server version 16 running on an Ubuntu 22.04 machine. +This example enables TDE on EDB Postgres Advanced Server version 16 running on an Ubuntu 22.04 machine. -A similar workflow applies to other versions of EDB Postgres Advanced Server and EDB Postgres Extended Server. Note that the location of the BIN and CONFIG directories differs depending on your operating system and the Postgres version. +A similar workflow applies to other versions of EDB Postgres Advanced Server and EDB Postgres Extended Server. The location of the bin and config directories differs depending on your operating system and the Postgres version. ### Preparing your upgrade @@ -44,7 +44,7 @@ Use [pg_dumpall](https://www.postgresql.org/docs/current/app-pg-dumpall.html), [ mkdir /var/lib/edb-as/16/TDE ``` -1. Ensure the `enterprisedb` user owns the directory: +1. Ensure the enterprisedb user owns the directory: ``` sudo chown enterprisedb /var/lib/edb-as/16/TDE @@ -61,7 +61,7 @@ Use [pg_dumpall](https://www.postgresql.org/docs/current/app-pg-dumpall.html), [ !!!note Alternatively, use the `--key-unwrap-command=` and `--key-wrap-command=` arguments when initializing the encrypted server to include the `wrap` and `unwrap` commands. - See [Using initdb TDE options](enabling_tde/#using-initdb-tde-options) for more information on possible configurations. + See [Using initdb TDE options](../initdb_tde_options/) for more information on possible configurations. 1. Initialize the new server with encryption: @@ -69,7 +69,7 @@ Use [pg_dumpall](https://www.postgresql.org/docs/current/app-pg-dumpall.html), [ /usr/lib/edb-as/16/bin/initdb --data-encryption -D /var/lib/edb-as/16/TDE ``` - This command initializes a CONFIG directory with all configuration files for the encrypted server. + This command initializes a config directory with all configuration files for the encrypted server. 1. Modify the port number in the configuration file of the encrypted instance. Uncomment the line with `#port` and change the port number. For example: @@ -92,7 +92,7 @@ Use [pg_dumpall](https://www.postgresql.org/docs/current/app-pg-dumpall.html), [ !!!note If you're using two different Postgres versions, use the psql utility of the encrypted server. Otherwise, the system will attempt to use psql from the previous instance. -1. To ensure the new server is encrypted, [check for TDE presence](enabling_tde/#checking-for-tde-presence-using-sql). +1. To ensure the new server is encrypted, [check for TDE presence](verifying_tde). ### Upgrading to the encrypted server @@ -148,7 +148,7 @@ Use [pg_dumpall](https://www.postgresql.org/docs/current/app-pg-dumpall.html), [ ### Cleaning up after upgrade -After you verify that `pg_upgrade` encrypted the data successfully, perform a cleanup. +After you verify that pg_upgrade encrypted the data successfully, perform a cleanup. 1. Clean up the database and its statistics: @@ -156,7 +156,7 @@ After you verify that `pg_upgrade` encrypted the data successfully, perform a cl /usr/lib/edb-as/16/bin/vacuumdb --all --analyze-in-stages ``` -1. Remove all data files of the unencrypted server with the script generated by `pg_upgrade`: +1. Remove all data files of the unencrypted server with the script generated by pg_upgrade: ``` ./delete_old_cluster.sh diff --git a/product_docs/docs/tde/15/enabling/index.mdx b/product_docs/docs/tde/15/enabling/index.mdx new file mode 100644 index 00000000000..7724197b18a --- /dev/null +++ b/product_docs/docs/tde/15/enabling/index.mdx @@ -0,0 +1,12 @@ +--- +title: Tutorials +description: Review some examples of how to create a TDE-enabled database server. +indexCards: simple +navigation: + - enabling_tde + - enabling_tde_epas +--- + +Create a TDE-enabled database server using `initdb`. + +Or migrate an existing database instance by creating a TDE-enabled database server with `initdb` and then migrating data with pg_upgrade. \ No newline at end of file diff --git a/product_docs/docs/tde/15/upgrade_use_cases/postgres_to_extended.mdx b/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx similarity index 90% rename from product_docs/docs/tde/15/upgrade_use_cases/postgres_to_extended.mdx rename to product_docs/docs/tde/15/enabling/postgres_to_extended.mdx index bf2dfeb9c86..4e9f507e6e4 100644 --- a/product_docs/docs/tde/15/upgrade_use_cases/postgres_to_extended.mdx +++ b/product_docs/docs/tde/15/enabling/postgres_to_extended.mdx @@ -1,31 +1,34 @@ --- title: "Upgrading PostgreSQL to EDB Postgres Extended Server while enabling TDE" navTitle: Upgrading PostgreSQL to EDB Postgres Extended Server +description: Use pg_upgrade to upgrade the database version, change the Postgres distribution, or migrate to a TDE-enabled database. deepToC: true +redirects: + - /tde/latest/upgrade_use_cases/postgres_to_extended/ #generated for TDE/refresh --- Create a new EDB Postgres Extended Server cluster with TDE enabled and use pg_upgrade to transfer data from the existing PostgreSQL cluster to the new encrypted cluster. - [Prepare your upgrade](#preparing-your-upgrade) by performing a backup of the existing instance. -- [Create a new database server](#creating-an-encrypted-server). +- [Create a new database server](#creating-an-encrypted-server): - Create an empty directory for the new server and ensure the postgres user owns it. - Set the environment variables to export the `wrap` and `unwrap` commands for encryption. - Initialize a server with encryption enabled. - Change the default port so the new server is available at another port. - Start the database server. - Connect to the database server and ensure it's functioning. -- [Upgrade to the encrypted server](#upgrading-to-the-encrypted-server). +- [Upgrade to the encrypted server](#upgrading-to-the-encrypted-server): - Stop both the source and the new server. - Use pg_upgrade with the `--copy-by-block` option to copy data from the source server to the new server. Specify the source and target bin and data directories. - Start the new encrypted database server. - Connect to the encrypted database server and ensure the data was transferred. -- [Clean up and delete the source server](#cleaning-up-after-upgrade). +- [Clean up and delete the source server](#cleaning-up-after-upgrade): - Clean up the database and its statistics. - Remove the source PostgreSQL cluster with the script provided by pg_upgrade. ## Worked example -This example upgrades a PostgreSQL 16 instance to EDB Postgres Extended Server 16 while enabling Transparent Data Encryption on an Ubuntu 22.04 machine. The location of the bin and config directories differs depending on your operating system and Postgres versions. +This example upgrades a PostgreSQL 16 instance to EDB Postgres Extended Server 16 while enabling TDE on an Ubuntu 22.04 machine. The location of the bin and config directories differs depending on your operating system and Postgres versions. ### Preparing your upgrade @@ -58,7 +61,7 @@ This example upgrades a PostgreSQL 16 instance to EDB Postgres Extended Server 1 !!!note Alternatively, use the `--key-unwrap-command=` and `--key-wrap-command=` arguments when initializing the encrypted server to include the `wrap` and `unwrap` commands. - See [Using initdb TDE options](../enabling_tde/#using-initdb-tde-options) for more information on possible configurations. + See [Using initdb TDE options](../initdb_tde_options/) for more information on possible configurations. 1. Initialize the new server with encryption: @@ -89,7 +92,7 @@ This example upgrades a PostgreSQL 16 instance to EDB Postgres Extended Server 1 !!!note If you're using two different Postgres versions, use the psql utility of the encrypted server. Otherwise, the system attempts to use psql from the previous instance. -1. To ensure the new server is encrypted, [check for TDE presence](../enabling_tde/#checking-for-tde-presence-using-sql). +1. To ensure the new server is encrypted, [check for TDE presence](../enabling/verifying_tde/). ### Upgrading to the encrypted server diff --git a/product_docs/docs/tde/15/enabling/verifying_tde.mdx b/product_docs/docs/tde/15/enabling/verifying_tde.mdx new file mode 100644 index 00000000000..ad71ed5e8fb --- /dev/null +++ b/product_docs/docs/tde/15/enabling/verifying_tde.mdx @@ -0,0 +1,18 @@ +--- +title: "Verifying TDE is enabled" +description: Verify TDE is enabled after creating a database server. +--- + +You can find out whether TDE is present on a server by querying the `data_encryption_version` column of the `pg_control_init` table. + +A value of 0 means TDE isn't enabled. Any nonzero value reflects the version of TDE in use. Currently, when TDE is enabled, this value is 1. + +```sql +select data_encryption_version from pg_control_init(); +__OUTPUT__ + data_encryption_version +------------------------- + 1 +(1 row) +``` + diff --git a/product_docs/docs/tde/15/enabling_tde.mdx b/product_docs/docs/tde/15/enabling_tde.mdx deleted file mode 100644 index 79b24445228..00000000000 --- a/product_docs/docs/tde/15/enabling_tde.mdx +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: "Creating a database cluster with TDE enabled" -navTitle: Creating a cluster with TDE ---- - -You enable transparent data encryption when you initialize a database cluster using [initdb](https://www.postgresql.org/docs/15/app-initdb.html). - - -## Using initdb TDE options - -To enable encryption, use the following options with the `initdb` command or their fallback environment variables: - - -`-y, --data-encryption` - - Initialize the new database cluster with transparent data encryption. See [Transparent Data Encryption](/tde/latest) for more information. Optionally specify an AES key length. Valid values are 128 and 256. The default is 128. - -`--copy-key-from=` - - Copy the data encryption key from the given location. You can use this option to copy a key from an existing cluster when preparing a new cluster as a target for pg_upgrade. - -`--key-wrap-command=` - - Specify a command to wrap (encrypt) the generated data encryption key. The command must include a placeholder `%p` that specifies the file to write the wrapped key to. The unwrapped key is provided to the command on its standard input. If you don't specify this option, the environment variable `PGDATAKEYWRAPCMD` is used. - - Use the special value `-` if you don't want to apply any key wrapping command. - - You must specify this option or the environment variable fallback if you're using data encryption. See [Securing the data encryption key](./key_stores/) for more information. - -`--key-unwrap-command=` - - Specify a command to unwrap (decrypt) the data encryption key. The command must include a placeholder `%p` that specifies the file to read the wrapped key from. The command needs to write the unwrapped key to its standard output. If you don't specify this option, the environment variable `PGDATAKEYUNWRAPCMD` is used. - - Use the special value `-` if you don't want to apply any key unwrapping command. - - You must specify this option or the environment variable fallback if you're using data encryption. See [Securing the data encryption key](./key_stores/) for more information. - -`--no-key-wrap` - -Disable key wrapping. The data encryption key is instead stored in plaintext in the data directory. (This option is a shortcut for setting both the wrap and the unwrap command to the special value `-`.) - -!!!Note - Using this option isn't secure. Use it only for testing purposes. - -If you select data encryption and don't specify this option, then you must provide key wrap and unwrap commands. Otherwise, `initdb` terminates with an error. - -## Using environment variables - -To simplify operations, you can set the key wrap and unwrap commands in the environment variables. - -For example: - -```shell -PGDATAKEYWRAPCMD='openssl enc -e -aes128-wrap -pbkdf2 -out "%p"' -PGDATAKEYUNWRAPCMD='openssl enc -d -aes128-wrap -pbkdf2 -in "%p"' -export PGDATAKEYWRAPCMD PGDATAKEYUNWRAPCMD -``` - -## Setting the key parameter in postgresql.conf - -When you enable TDE for a cluster, the `initdb` command initializes the `data_encryption_key_unwrap_command` parameter in the `postgresql.conf` configuration file. The string specified in `data_encryption_key_unwrap_command` unwraps (decrypts) the data encryption key. - -The command must contain a placeholder `%p`, which is replaced with the name of the file containing the key to unwrap. The command must print the unwrapped (decrypted) key to its standard output. - -If you don't specify this parameter, the environment variable `PGDATAKEYUNWRAPCMD` is used. - -Use the special value `-` if you don't want to apply any key unwrapping command. - -You must specify this parameter or the environment variable fallback if you're using data encryption. See [Securing the data encryption key](./key_stores/) for more information. - -You can set this parameter only at server start. - -This parameter is normally initialized by `initdb`. Change it only if you change the key wrap method. - -For more information on the configuration files, see [PostgreSQL File Locations documentation](https://www.postgresql.org/docs/15/runtime-config-file-locations.html). - -## Example - -This example uses EDB Postgres Advanced Server 15 running on a Linux platform. It uses openssl to define the passkey to wrap and unwrap the generated data encryption key. - -1. Set the data encryption key (wrap) and decryption (unwrap) environment variables: - - ```shell - export PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pass pass:ok -out %p' - export PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pass pass:ok -in %p' - ``` - !!!note - If you are on Windows you don't need the single quotes around the variable value. - -1. Initialize the cluster using `initdb` with encryption enabled. This command sets the `data_encryption_key_unwrap_command` parameter in the postgresql.conf file. - - ```shell - /usr/edb/as15/bin/initdb --data-encryption -D /var/lib/edb/as15/data - ``` - -1. Start the cluster: - - ```shell - /usr/edb/as15/bin/pg_ctl -D /var/lib/edb/as15/data start - ``` - -1. Run grep on postgresql.conf to see the setting of `data_encryption_key_unwrap_command`: - - ```shell - grep data_encryption_key_unwrap_command /var/lib/edb/as15/data/postgresql.conf - __OUTPUT__ - data_encryption_key_unwrap_command = 'openssl enc -d -aes-128-cbc -pass pass:ok -in %p' - ``` - -## Checking for TDE presence using SQL - -You can find out whether TDE is present on a server by querying the `data_encryption_version` column of the `pg_control_init` table. - -A value of 0 means TDE isn't enabled. Any nonzero value reflects the version of TDE in use. Currently, when TDE is enabled, this value is 1. - -```sql -select data_encryption_version from pg_control_init(); -__OUTPUT__ - data_encryption_version -------------------------- - 1 -(1 row) -``` - diff --git a/product_docs/docs/tde/15/backups.mdx b/product_docs/docs/tde/15/encrypted_files/backup_files.mdx similarity index 85% rename from product_docs/docs/tde/15/backups.mdx rename to product_docs/docs/tde/15/encrypted_files/backup_files.mdx index 68b498193ed..7e2da824860 100644 --- a/product_docs/docs/tde/15/backups.mdx +++ b/product_docs/docs/tde/15/encrypted_files/backup_files.mdx @@ -1,8 +1,11 @@ --- title: "Working with encrypted backup files" navTitle: Backup files +redirects: + - /tde/latest/backups/ #generated for TDE/refresh --- +When TDE is enabled, backup files are encrypted. If you want to perform operations on the encrypted backup files, you need to allow the operations to decrypt the file. ## Verify a backup of a TDE system @@ -14,7 +17,7 @@ Specifies a command to unwrap (decrypt) the data encryption key. The command mus Use the special value `-` if you don't want to apply any key unwrapping command. -You must specify this option or the environment variable fallback if you're using data encryption. See [Securing the data encryption key](./key_stores) for more information. +You must specify this option or the environment variable fallback if you're using data encryption. See [Securing the data encryption key](../secure_key/) for more information. ## Resynchronize timelines in a TDE system @@ -27,5 +30,5 @@ Specifies a command to unwrap (decrypt) the data encryption key. The command mus Use the special value `-` if you don't want to apply any key unwrapping command. -You must specify this option or the environment variable fallback if you're using data encryption. See [Securing the data encryption key](./key_stores/) for more information. +You must specify this option or the environment variable fallback if you're using data encryption. See [Securing the data encryption key](../secure_key/) for more information. diff --git a/product_docs/docs/tde/15/encrypted_files/index.mdx b/product_docs/docs/tde/15/encrypted_files/index.mdx new file mode 100644 index 00000000000..08650dca1b3 --- /dev/null +++ b/product_docs/docs/tde/15/encrypted_files/index.mdx @@ -0,0 +1,15 @@ +--- +title: "Working with encrypted files" +description: How to work with files in an encrypted environment. +--- + +Certain Postgres utilities and operations require access to files to read data and deliver the required output. + +In a TDE-enabled database server, data is encrypted and therefore not accesible without a decryption mechanism. For these utilities to be able to perform read operations on encrypted files, you must provide a decryption mechanism. + +- [Backup files](backup_files) provides guidance on how to work with and troubleshoot any issues with backup files. + +- [WAL files](wal_files) provides guidance on how to work with and troubleshoot any issues with WAL files. + + + diff --git a/product_docs/docs/tde/15/troubleshooting.mdx b/product_docs/docs/tde/15/encrypted_files/wal_files.mdx similarity index 88% rename from product_docs/docs/tde/15/troubleshooting.mdx rename to product_docs/docs/tde/15/encrypted_files/wal_files.mdx index 0b5e48adfa7..0a350983684 100644 --- a/product_docs/docs/tde/15/troubleshooting.mdx +++ b/product_docs/docs/tde/15/encrypted_files/wal_files.mdx @@ -1,7 +1,9 @@ --- -title: "Troubleshooting with encrypted WAL files" +title: "Working with encrypted WAL files" navTitle: WAL files deepToC: true +redirects: + - /tde/latest/troubleshooting/ #generated for TDE/refresh --- When TDE is enabled, WAL files are encrypted. If you want to perform operations on the encrypted WAL files, you need to allow the operations to decrypt the file. @@ -20,7 +22,7 @@ Specify this option if the WAL files were encrypted by transparent data encrypti The `--data-encryption` or `-y` option ensures the command is aware of the encryption. Otherwise, `pg_waldump` can't detect whether WAL files are encrypted. -Provide the same encryption configuration you used when initializing the TDE-enabled database cluster. For example, if you specified an AES key length during the cluster creation, you must specify it here as well. Otherwise, run the flag with no values. See [Using initdb TDE options](enabling_tde/#using-initdb-tde-options) for more information. +Provide the same encryption configuration you used when initializing the TDE-enabled database cluster. For example, if you specified an AES key length during the cluster creation, you must specify it here as well. Otherwise, run the flag with no values. See [Using initdb TDE options](../initdb_tde_options/) for more information. ### `--key-file-name=` @@ -30,9 +32,9 @@ The command can then load the data encryption key from the provided location. ### `--key-unwrap-command=` -For the `--key-unwrap-command=` option, provide the decryption command you specified to unwrap (decrypt) the data encryption key when initializing the TDE cluster. See [Using initdb TDE options](enabling_tde/#using-initdb-tde-options) for more information. +For the `--key-unwrap-command=` option, provide the decryption command you specified to unwrap (decrypt) the data encryption key when initializing the TDE cluster. See [Using initdb TDE options](../initdb_tde_options/) for more information. -Alternatively, you can set the `PGDATAKEYUNWRAPCMD` environment variable before running the `pg_waldump` command. If the `--key-unwrap-command=` option isn't specified,`pg_waldump` falls back on `PGDATAKEYUNWRAPCMD`. This [cluster initialization example](enabling_tde/#example) shows how to export an environment variable. +Alternatively, you can set the `PGDATAKEYUNWRAPCMD` environment variable before running the `pg_waldump` command. If the `--key-unwrap-command=` option isn't specified,`pg_waldump` falls back on `PGDATAKEYUNWRAPCMD`. This [cluster initialization example](../enabling/enabling_tde/) shows how to export an environment variable. ### Example @@ -48,9 +50,9 @@ To reset a corrupt encrypted WAL file, you must ensure the [pg_resetwal](https:/ ### `--key-unwrap-command=` -For the `--key-unwrap-command=` option, provide the decryption command you specified to unwrap (decrypt) the data encryption key when initializing the TDE cluster. See [Using initdb TDE options](enabling_tde/#using-initdb-tde-options) for more information. +For the `--key-unwrap-command=` option, provide the decryption command you specified to unwrap (decrypt) the data encryption key when initializing the TDE cluster. See [Using initdb TDE options](../initdb_tde_options/) for more information. -Alternatively, you can set the `PGDATAKEYUNWRAPCMD` environment variable before running the `pg_resetwal` command. If the `--key-unwrap-command=` option isn't specified, `pg_resetwal` falls back on `PGDATAKEYUNWRAPCMD`. This [cluster initialization example](enabling_tde/#example) shows how to export an environment variable. +Alternatively, you can set the `PGDATAKEYUNWRAPCMD` environment variable before running the `pg_resetwal` command. If the `--key-unwrap-command=` option isn't specified, `pg_resetwal` falls back on `PGDATAKEYUNWRAPCMD`. This [cluster initialization example](../enabling/enabling_tde/) shows how to export an environment variable. ### Example diff --git a/product_docs/docs/tde/15/index.mdx b/product_docs/docs/tde/15/index.mdx index 8766f4fa226..16d366fa521 100644 --- a/product_docs/docs/tde/15/index.mdx +++ b/product_docs/docs/tde/15/index.mdx @@ -2,22 +2,21 @@ title: "Transparent Data Encryption" hideVersion: true navigation: -- "#Enabling" -- key_stores -- enabling_tde -- enabling_tde_epas -- "#Differences from Postgres" -- limitations +- "#Overview" +- about +- support +- "#Administering" +- overview +- secure_key +- initdb_tde +- encrypted_files +- upgrading +- enabling +- "#Reference" - affected_commands -- "#Working with encrypted files" -- troubleshooting -- backups -- "#Upgrading" -- upgrade_use_cases +- initdb_tde_options - pg_upgrade_arguments -- "#Testing" -- single_user -- regress_run +- limitations --- @@ -28,90 +27,6 @@ It encrypts any user data stored in the database system. This encryption is tran ![Architecture diagram](images/tde1.png) -## What's encrypted with TDE? - -TDE encrypts: - -- The files underlying tables, sequences, indexes, including TOAST tables and system catalogs, and including all forks. These files are known as *data files*. - -- The write-ahead log (WAL). - -- Various temporary files that are used during query processing and database system operation. - -!!! Note Implications - - - Any WAL fetched from a server using TDE, including by streaming replication and archiving, is encrypted. - - - A physical replica is necessarily encrypted (or not encrypted) in the same way and using the same keys as its primary server. - - - If a server uses TDE, a base backup is automatically encrypted. - - -The following aren't encrypted or otherwise disguised by TDE: - -- Metadata internal to operating the database system that doesn't contain user data, such as the transaction status (for example, pg_subtrans and pg_xact). - -- The file names and file system structure in the data directory. That means that the overall size of the database system, the number of databases, the number of tables, their relative sizes, as well as file system metadata such as last access time are all visible without decryption. - -- Data in foreign tables. - -- The server diagnostics log. - -- Configuration files. - -!!! Note Implications - - Logical replication isn't affected by TDE. Publisher and subscriber can have different encryption settings. The payload of the logical replication protocol isn't encrypted. (You can use SSL.) - -### How does TDE affect performance? +## How does TDE affect performance? The performance impact of TDE is low. For details, see the [Transparent Data Encryption Impacts on EDB Postgres Advanced Server 15](https://www.enterprisedb.com/blog/TDE-Postgres-Advanced-Server-15-Launch) blog. - -## How does TDE work? - -TDE prevents unauthorized viewing of data in operating system files on the database server and on backup storage. Data becomes unintelligible for unauthorized users if it's stolen or misplaced. - -Data encryption and decryption is managed by the database and doesn't require application changes or updated client drivers. - -EDB Postgres Advanced Server and EDB Postgres Extended Server provide hooks to key management that's external to the database. These hooks allow for simple passphrase encrypt/decrypt or integration with enterprise key management solutions. See [Securing the data encryption key](./key_stores) for more information. - -### How does TDE encrypt data? - -EDB TDE uses [OpenSSL](https://openssl-library.org/) to encrypt data files with the AES encryption algorithm. In Windows systems, TDE uses [OpenSSL 3](https://docs.openssl.org/3.0/). In Linux systems, TDE uses the OpenSSL version installed in the host operating system. To check the installed version, run `openssl version`. For more information, see the [OpenSSL documentation](https://docs.openssl.org/master/). If you're using a custom build not provided by the OpenSSL community, consult your vendor's documentation. - -Starting with version 16, EDB TDE introduces the option to choose between AES-128 and AES-256 encryption algorithms during the initialization of the Postgres cluster. The choice between AES-128 and AES-256 hinges on balancing performance and security requirements. AES-128 is commonly advised for environments where performance efficiency and lower power consumption are pivotal, making it suitable for most applications. Conversely, AES-256 is recommended for scenarios demanding the highest level of security, often driven by regulatory mandates. - -TDE uses AES-128-XTS or AES-256-XTS algorithms for encrypting data files. XTS uses a second value, known as the *tweak value*, to enhance the encryption. The XTS tweak value with TDE uses the database OID, the relfilenode, and the block number. - -For write-ahead log (WAL) files, TDE uses AES-128-CTR or AES-256-CTR, incorporating the WAL's log sequence number (LSN) as the counter component. - -Temporary files that are accessed by block are also encrypted using AES-128-XTS or AES-256-XTS. Other temporary files are encrypted using AES-128-CBC or AES-256-CBC. - -### How is data stored on disk with TDE? - -In this example, the data in the `tbfoo` table is encrypted. The `pg_relation_filepath` function locates the data file corresponding to the `tbfoo` table. - -```sql -insert into tbfoo values ('abc','123'); -INSERT 0 1 - -select pg_relation_filepath('tbfoo'); - - pg_relation_filepath ----------------------- - base/5/16416 -``` - -Grepping the data looking for characters doesn't return anything. Viewing the last five lines returns the encrypted data: - -```shell -$ hexdump -C 16416 | grep abc -$ - -$ hexdump -C 16416 | tail -5 -00001fc0 c8 0f 1d c8 9a 63 3d dc 7d 4e 68 98 b8 f2 5e 0a |.....c=.}Nh...^.| -00001fd0 9a eb 20 1d 59 ad be 94 6e fd d5 6e ed 0a 72 8c |.. .Y...n..n..r.| -00001fe0 7b 14 7f de 5b 63 e3 84 ba 6c e7 b0 a3 86 aa b9 |{...[c...l......| -00001ff0 fe 4f 07 50 06 b7 ef 6a cd f9 84 96 b2 4b 25 12 |.O.P...j.....K%.| -00002000 -``` diff --git a/product_docs/docs/tde/15/initdb_tde.mdx b/product_docs/docs/tde/15/initdb_tde.mdx new file mode 100644 index 00000000000..0a96aa3c614 --- /dev/null +++ b/product_docs/docs/tde/15/initdb_tde.mdx @@ -0,0 +1,24 @@ +--- +title: "Using TDE initialization options" +description: Learn which initdb options to use to enable TDE. +--- + +Initializing a TDE-enabled server requires two mandatory settings: one enables TDE and the other protects the data encryption key. + +## To enable TDE + +To create a TDE-enabled database server, you must use the [`--data-encryption` option](initdb_tde_options/#option---data-encryption-or--y), which creates a data encryption key to encrypt your server. + +If you want to copy a key from an existing cluster when preparing a new cluster as a target for pg_upgrade, also use the [`--copy-key-from=` option](initdb_tde_options/#option---copy-key-fromfile). + +## To protect the data encryption key + +When creating a TDE-enabled database, TDE generates a data encryption key that's transparent to the user. + +An added protection mechanism in the form or a wrapping and an unwrapping command is required to wrap this key, which you must make available to the database server. + +See [Providing the wrapping and unwrapping commands to TDE](secure_key) for an overview of the available protection mechanism and examples of how to provide this configuration to `initdb`. + +## Options reference + +See [initdb TDE options](initdb_tde_options) for an overview of all mandatory and elective options and supported values. \ No newline at end of file diff --git a/product_docs/docs/tde/15/initdb_tde_options.mdx b/product_docs/docs/tde/15/initdb_tde_options.mdx new file mode 100644 index 00000000000..9b0387b0f1f --- /dev/null +++ b/product_docs/docs/tde/15/initdb_tde_options.mdx @@ -0,0 +1,60 @@ +--- +title: "initdb TDE options" +description: Learn about the initdb options required to initialize a TDE-encrypted database. +--- + +To enable encryption, use the following options with the `initdb` command. + +## Option: `--data-encryption` or `-y` + +Adds transparent data encryption when initializing a database server. + +### Supported values + +You can optionally specify an AES key length. Valid values are 128 and 256. The default is 128. + +## Option: `--key-wrap-command=` + +Provides the wrapping/encryption command to protect the data encryption key. + +### Supported values + +`` is customizable, but it must contain the placeholder `%p`. See [Wrapping commands](secure_key/#configuring-a-wrapping-and-unwrapping-command) for examples and information. See [Securing the data encryption key](secure_key) for an overview of available wrapping mechanisms. + +If you don't use this option, TDE falls back on the environment variable `PGDATAKEYWRAPCMD`. + +If you don't want to apply a wrapping mechanism, use `-`. + +## Option: `--key-unwrap-command=` + +Provides the unwrapping/decryption command to access the data encryption key. + +### Supported values + +`` is customizable, but it must contain the placeholder `%p`. See [Configuring wrapping commands](secure_key/#configuring-a-wrapping-and-unwrapping-command) for examples and information. + +If you don't use this option, TDE falls back on the environment variable `PGDATAKEYUNWRAPCMD`. + +If you didn't apply a wrapping mechanism, use `-` . + +## Option: `--no-key-wrap` + +Disables the key wrapping. The data encryption key is instead stored in plaintext in the data directory. (This option is a shortcut for setting both the wrap and the unwrap command to the special value `-`.) + +!!!Note + Using this option isn't secure. Only use it for testing purposes. +!!! + +If you select data encryption and don't specify this option, then you must provide a key wrap and unwrap command. Otherwise, `initdb` terminates with an error. + +### Supported values + +The `--no-key-wrap` option doesn't require specifying any values. + +## Option: `--copy-key-from=` + +Copies an existing data encryption key from the provided location, for example, when reusing a key from an existing server during an upgrade with pg_upgrade. + +### Supported values + +`` is the directory to the existing key. Normally, encryption keys are stored in `pg_encryption/key.bin`. diff --git a/product_docs/docs/tde/15/key_stores.mdx b/product_docs/docs/tde/15/key_stores.mdx deleted file mode 100644 index 3e9ab249215..00000000000 --- a/product_docs/docs/tde/15/key_stores.mdx +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: Securing the data encryption key -description: Learn how to secure your data with an encryption key. -deepToC: true ---- - - -The key for transparent data encryption (the data key) is normally generated by `initdb` and stored in a file `pg_encryption/key.bin` under the data directory. This file actually contains several keys that are used for different purposes at run time. However, in terms of the data key, it contains a single sequence of random bytes. - -Without any further action, this file contains the key in plaintext, which isn't secure. Anyone with access to the encrypted data directory has access to the plaintext key, which defeats the purpose of encryption. Therefore, this setup is suitable only for testing purposes. - -To secure the data key properly, “wrap” it by encrypting it with another key. Broadly, you can use two approaches to arrange this: - -- Protect the data key with a passphrase. A wrapping key is derived from the passphrase and used to encrypt the data key. - -- The wrapping key is stored elsewhere, for example, in a key management system, also known as a key store. This second key is also called the *key-wrapping key* or *master key*. - -If you don't want key wrapping, for example for testing, then you must set the wrap and unwrap commands to the special value `-`. This setting specifies to use the key from the file without further processing. This approach differs from not setting a wrap or unwrap command at all, and from setting either/both to an empty string. Having no wrap or unwrap command set when transparent data encryption is used results in a fatal error when running an affected utility program. - -Postgres leaves this configuration up to the user, which allows tailoring the setup to local requirements and integrating with existing key management software or similar. To configure the data key protection, you must specify a pair of external commands that take care of the wrapping (encrypting) and unwrapping (decryption). - -## Using a passphrase - -You can protect the data key with a passphrase using the openssl command line utility. The following is an example that sets up this protection: - -```shell -initdb -D datadir -y --key-wrap-command='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' -``` - -This example wraps the randomly generated data key (done internally by initdb) by encrypting it with the AES-128-CBC (AESKW) algorithm. The encryption uses a key derived from a passphrase with the PBKDF2 key derivation function and a randomly generated salt. The terminal prompts for the passphrase. (See the openssl-enc manual page for details about these options. Available options vary across versions.) The initdb utility replaces `%p` with the name of the file that stores the wrapped key. - -The unwrap command performs the opposite operation. initdb doesn't need the unwrap operation. However, it stores it in the `postgresql.conf` file of the initialized cluster, which uses it when it starts up. - - -The key wrap command receives the plaintext key on standard input and needs to put the wrapped key at the file system location specified by the `%p` placeholder. The key unwrap command needs to read the wrapped key from the file system location specified by the `%p` placeholder and write the unwrapped key to the standard output. - -Utility programs like pg_rewind and pg_upgrade operate directly on the data directory or copies, such as backups. These programs also need to be told about the key unwrap command, depending on the circumstances. They each have command-line options for this purpose. - -To simplify operations, you can also set the key wrap and unwrap commands in environment variables. These are accepted by all affected applications if you don't provide the corresponding command line options. For example: - -```shell -PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' -PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' -export PGDATAKEYWRAPCMD PGDATAKEYUNWRAPCMD -``` - -Key unwrap commands that prompt for passwords on the terminal don't work when the server is started by pg_ctl or through service managers such as systemd. The server is detached from the terminal in those environments. If you want an interactive password prompt on server start, you need a more elaborate configuration that fetches the password using some indirect mechanism. - -For example, for systemd, you can use `systemd-ask-password`: - -``` -PGDATAKEYWRAPCMD="bash -c 'openssl enc -e -aes-128-cbc -pbkdf2 -out %p -pass file:<(sudo systemd-ask-password --no-tty)'" -PGDATAKEYUNWRAPCMD="bash -c 'openssl enc -d -aes-128-cbc -pbkdf2 -in %p -pass file:<(sudo systemd-ask-password --no-tty)'" -``` - -You also need an entry like in `/etc/sudoers`: - -``` -postgres ALL = NOPASSWD: /usr/bin/systemd-ask-password -``` - -## Using a key store -You can use the key store in an external key management system to manage the data encryption key. The tested and supported key stores are: - -- Amazon AWS Key Management Service (KMS) -- Microsoft Azure Key Vault -- Google Cloud - Cloud Key Management Service -- HashiCorp Vault (KMIP Secrets Engine and Transit Secrets Engine) -- Thales CipherTrust Manager -- Fortanix Data Security Manager - -To use one of the available key stores, see the configuration examples. - -### AWS Key Management Service example - -Create a key with [AWS Key Management Service](https://docs.aws.amazon.com/kms/): - -```shell -aws kms create-key -aws kms create-alias --alias-name alias/pg-tde-master-1 --target-key-id "..." -``` - -Use the `aws kms` command with the `alias/pg-tde-master-1` key to wrap and unwrap the data encryption key: - -```shell -PGDATAKEYWRAPCMD='aws kms encrypt --key-id alias/pg-tde-master-1 --plaintext fileb:///dev/stdin --output text --query CiphertextBlob | base64 -d > "%p"' -PGDATAKEYUNWRAPCMD='aws kms decrypt --key-id alias/pg-tde-master-1 --ciphertext-blob fileb://"%p" --output text --query Plaintext | base64 -d' -``` -!!! Note - Shell commands with pipes, as in this example, are problematic because the exit status of the pipe is that of the last command. A failure of the first, more interesting command isn't reported properly. Postgres handles this somewhat by recognizing whether the wrap or unwrap command wrote nothing. However, it's better to make this more robust. For example, use the `pipefail` option available in some shells or the `mispipe` command available on some operating systems. Put more complicated commands into an external shell script or other program instead of defining them inline. - -### Azure Key Vault example - -Create a key with [Azure Key Vault](https://learn.microsoft.com/en-us/azure/key-vault/): - -```shell -az keyvault key create --vault-name pg-tde --name pg-tde-master-1 -``` - -Use the `az keyvault key` command with the `pg-tde-master-1` key to wrap and unwrap the data encryption key: - -```shell -PGDATAKEYWRAPCMD='az keyvault key encrypt --name pg-tde-master-1 --vault-name pg-tde --algorithm A256GCM --value @- --data-type plaintext --only-show-errors --output json | jq -r .result > "%p"' -PGDATAKEYUNWRAPCMD='az keyvault key decrypt --name pg-tde-master-1 --vault-name pg-tde --algorithm A256GCM --value @"%p" --data-type plaintext --only-show-errors --output json | jq -r .result' -``` -!!! Note - Shell commands with pipes, as in this example, are problematic because the exit status of the pipe is that of the last command. A failure of the first, more interesting command isn't reported properly. Postgres handles this somewhat by recognizing whether the wrap or unwrap command wrote nothing. However, it's better to make this more robust. For example, use the `pipefail` option available in some shells or the `mispipe` command available on some operating systems. Put more complicated commands into an external shell script or other program instead of defining them inline. - -### Google Cloud KMS example - -Create a key with [Google Cloud KMS](https://cloud.google.com/kms/docs): - -```shell -gcloud kms keys create pg-tde-master-1 --location=global --keyring=pg-tde --purpose=encryption -``` - -Use the `gcloud kms` command with the `pg-tde-master-1` key to wrap and unwrap the data encryption key: - -```shell -PGDATAKEYWRAPCMD='gcloud kms encrypt --plaintext-file=- --ciphertext-file=%p --location=global --keyring=pg-tde --key=pg-tde-master-1' -PGDATAKEYUNWRAPCMD='gcloud kms decrypt --plaintext-file=- --ciphertext-file=%p --location=global --keyring=pg-tde --key=pg-tde-master-1' -``` - -### HashiCorp Vault Transit Secrets Engine example - -Enable transit with [HashiCorp Vault Transit Secrets Engine](https://developer.hashicorp.com/vault/docs): - -```shell -vault secrets enable transit -``` - -Create a key and give it a name: - -```shell -vault write -f transit/keys/pg-tde-master-1 -``` - -Use the `vault write` command with the `pg-tde-master-1` key to wrap and unwrap the data encryption key: - -``` -PGDATAKEYWRAPCMD='base64 | vault write -field=ciphertext transit/encrypt/pg-tde-master-1 plaintext=- > %p' -PGDATAKEYUNWRAPCMD='vault write -field=plaintext transit/decrypt/pg-tde-master-1 ciphertext=- < %p | base64 -d' -``` - -### Fortanix Data Security Manager example - -See [Using Fortanix Data Security Manager with EDB Postgres for TDE](https://support.fortanix.com/docs/using-fortanix-data-security-manager-with-edb-postgres-for-tde) for a step-by-step configuration tutorial. - -## Key rotation - -To change the master key, manually run the unwrap command specifying the old key. Then feed the result into the wrap command specifying the new key. Equivalently, if the data key is protected by a passphrase, to change the passphrase, run the unwrap command using the old passphrase. Then feed the result into the wrap command using the new passphrase. You can perform these operations while the database server is running. The wrapped data key in the file is used only on startup. It isn't used while the server is running. - -Building on the example in [Using a passphrase](#using-a-passphrase), which uses openssl, to change the passphrase, you can: - -```shell -cd $PGDATA/pg_encryption/ -openssl enc -d -aes-128-cbc -pbkdf2 -in key.bin | openssl enc -e -aes-128-cbc -pbkdf2 -out key.bin.new -mv key.bin.new key.bin -``` -With this method, the decryption and the encryption commands ask for the passphrase on the terminal at the same time, which is awkward and confusing. An alternative is: - -```shell -cd $PGDATA/pg_encryption/ -openssl enc -d -aes-128-cbc -pbkdf2 -in key.bin -pass pass:ACTUALPASSPHRASE | openssl enc -e -aes-128-cbc -pbkdf2 -out key.bin.new -mv key.bin.new key.bin -``` -This technique leaks the old passphrase, which is being replaced anyway. openssl supports a number of other ways to supply the passphrases. - -When using a key management system, you can connect the unwrap and wrap commands similarly, for example: - -```shell -cd $PGDATA/pg_encryption/ -crypt decrypt aws --in key.bin --region us-east-1 | crypt encrypt aws --out key.bin.new --region us-east-1 --kms alias/pg-tde-master-2 -mv key.bin.new key.bin -``` - -!!! Note - You can't change the data key (the key wrapped by the master key) on an existing data directory. If you need to do that, you need to run the data directory through an upgrade process using pg_dump, pg_upgrade, or logical replication. diff --git a/product_docs/docs/tde/15/limitations.mdx b/product_docs/docs/tde/15/limitations.mdx index 4bf457ed03e..2e563cc6c5b 100644 --- a/product_docs/docs/tde/15/limitations.mdx +++ b/product_docs/docs/tde/15/limitations.mdx @@ -1,5 +1,6 @@ --- title: "Limitations" +description: Learn about TDE limitations. --- ## `FILE_COPY` diff --git a/product_docs/docs/tde/15/overview.mdx b/product_docs/docs/tde/15/overview.mdx new file mode 100644 index 00000000000..b8756512cab --- /dev/null +++ b/product_docs/docs/tde/15/overview.mdx @@ -0,0 +1,63 @@ +--- +title: Overview +description: Understand how to initialize a TDE-encrypted cluster. +deepToC: true +redirects: +- /tde/latest/enabling_tde/ +--- + +If you want to start using Transparent Data Encryption (TDE) on your database, you'll want to either create a TDE-enabled database server or migrate an existing database server to a TDE-enabled environment. It isn't possible to enable TDE on existing instances. + +Regardless of whether you're creating a database server from scratch or creating an instance to migrate an existing database server, you have to create a TDE-enabled database by initializing a database cluster using [initdb](https://www.postgresql.org/docs/15/app-initdb.html). + +## Before you begin + +- Choose a method to [secure the data encryption key](secure_key) generated by TDE. + + You can protect the key with a [passphrase](./secure_key/passphrase/) or a wrapping key from a [key store](secure_key/key_store/). Or, for testing purposes, you can choose to [not protect the key](secure_key/disabling_key/). + +- Review the [initdb TDE options](./initdb_tde_options/) to ensure you have all information required for initializing a TDE-enabled database cluster. + +- Review [Limitations and TDE-specific options](./limitations/) to understand limitations and changes in the handling of PostgreSQL utilities when you enable TDE. + +- If you plan on migrating data from an existing database server, ensure you perform a backup of the source database server. + +## Initializing a server + +1. Export the wrapping and unwrapping commands to secure the encryption key. Use the wrapping method you chose during the planning phase. + + Alternatively, you can provide the wrapping and unwrapping commands when initializing the server with the command line arguments. + + See [Providing the wrapping and unwrapping commands](secure_key/#providing-the-wrapping-and-unwrapping-commands-to-tde) for examples. + +1. Initialize a database server with `--data-encryption` enabled on the target directory. Include other [TDE options](initdb_tde_options) as required. + +1. Start the database cluster and [verify that TDE is enabled](./enabling/verifying_tde/). + +See [Tutorials](#tutorials) for detailed initialization examples. + +## Migrating data (for existing instances) + +If you want to migrate data and objects from an existing database server, use pg_upgrade to copy data from an existing instance. + +1. Stop both the source and new server. + +1. Use pg_upgrade with the `--copy-by-block` option to copy data from the source server to the new server. Include other [TDE pg_upgrade options](pg_upgrade_arguments) as required. + +1. Start the new encrypted database server. + +1. Connect to the encrypted database server and ensure the data was transferred. + +1. Perform any required cleanup operations. + +!!!note +See [TDE pg_upgrade use cases](upgrading) for an overview of the supported use cases for enabling and migrating. +!!! + +See [Tutorials](#tutorials) for detailed migration examples. + +## Tutorials + +* [Creating a TDE-enabled database server](enabling/enabling_tde/) +* [Enabling TDE on an existing EDB Postgres Advanced Server database cluster](enabling/enabling_tde_epas/) +* [Upgrading a PostgreSQL database server to EDB Postgres Extended Server while enabling TDE](enabling/postgres_to_extended/) diff --git a/product_docs/docs/tde/15/pg_upgrade_arguments.mdx b/product_docs/docs/tde/15/pg_upgrade_arguments.mdx index 0aa60923fe6..0fb97ea75aa 100644 --- a/product_docs/docs/tde/15/pg_upgrade_arguments.mdx +++ b/product_docs/docs/tde/15/pg_upgrade_arguments.mdx @@ -1,26 +1,24 @@ --- -title: "TDE upgrade arguments" -navTitle: TDE upgrade arguments +title: "pg_upgrade TDE options" +navTitle: pg_upgrade TDE options --- These arguments to [pg_upgrade](https://www.postgresql.org/docs/current/pgupgrade.html) help with upgrading encrypted clusters. -## `--copy-by-block` +## Option: `--copy-by-block` -Copy files to the new cluster block by block instead of the default, which is to copy the whole file at once. This option is the same as the default mode but somewhat slower. It does, however, support upgrades between clusters with different encryption settings. +Copies files to the new cluster block by block instead of the default, which is to copy the whole file at once. This option is the same as the default mode but slower. It does, however, support upgrades between clusters with different encryption settings. You must use this option when upgrading between clusters with different encryption settings, that is, unencrypted to encrypted, encrypted to unencrypted, or both encrypted with different keys. While copying files to the new cluster, it decrypts them and reencrypts them with the keys and settings of the new cluster. -For added certainty, if the old cluster is encrypted and the new cluster was initialized as unencrypted, this option decrypts the data from the old cluster and copies it to the new cluster unencrypted. If the old cluster is unencrypted and the new cluster was initialized as encrypted, this option encrypts the data from the old cluster and places it into the new cluster encrypted. +If the old cluster is encrypted and the new cluster was initialized as unencrypted, this option decrypts the data from the old cluster and copies it to the new cluster unencrypted. If the old cluster is unencrypted and the new cluster was initialized as encrypted, this option encrypts the data from the old cluster and places it into the new cluster encrypted. -See the description of the [initdb --copy-key-from=<file> option](enabling_tde/#using-initdb-tde-options) for information on copying a key from an existing cluster when preparing a new cluster as a target for `pg_upgrade`. +See the description of the [initdb --copy-key-from=<file> option](initdb_tde_options) for information on copying a key from an existing cluster when preparing a new cluster as a target for `pg_upgrade`. -See [Tutorials](upgrade_use_cases/#tutorials) for `--copy-by-block` usage examples. +See [Tutorials](upgrading/#tutorials) for `--copy-by-block` usage examples. -## `--key-unwrap-command=` +## Option: `--key-unwrap-command=` -Specifies a command to unwrap (decrypt) the data encryption key. The command must include a placeholder `%p` that specifies the file to read the wrapped key from. The command needs to write the unwrapped key to its standard output. If you don't specify this option, the environment variable `PGDATAKEYUNWRAPCMD` is used. - -Use the special value `-` if you don't want to apply any key unwrapping command. - -You must specify this option or the environment variable fallback if you're using data encryption. See [Securing the data encryption key](./key_stores/) for more information. +Specifies the command to unwrap (decrypt) the data encryption key and access the files to copy. It must be the same unwrap command you specified during the server initialization. + +If you don't specify this option, pg_upgrade reads the environment variable `PGDATAKEYUNWRAPCMD`. diff --git a/product_docs/docs/tde/15/regress_run.mdx b/product_docs/docs/tde/15/regress_run.mdx deleted file mode 100644 index 875d554a1d2..00000000000 --- a/product_docs/docs/tde/15/regress_run.mdx +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: "Testing a TDE configuration" ---- - - -To run the tests in [single-user mode](./single_user/) with transparent data encryption enabled, set the environment variable `PG_TEST_USE_DATA_ENCRYPTION`. For example: - -```shell -make check PG_TEST_USE_DATA_ENCRYPTION=1 -``` - -## See also -- [Enabling TDE](./enabling_tde/) -- [PostgreSQL postgres command documentation](https://www.postgresql.org/docs/15/app-postgres.html) -- [PostgreSQL Running the Tests documentation](https://www.postgresql.org/docs/15/regress-run.html) \ No newline at end of file diff --git a/product_docs/docs/tde/15/secure_key/disabling_key.mdx b/product_docs/docs/tde/15/secure_key/disabling_key.mdx new file mode 100644 index 00000000000..bdf8edb7e19 --- /dev/null +++ b/product_docs/docs/tde/15/secure_key/disabling_key.mdx @@ -0,0 +1,9 @@ +--- +title: Disabling the key wrapping +description: Learn how to omit using a wrapping key. +deepToc: true +--- + +If you don't want key wrapping, for example for testing, then you must set the wrap and unwrap commands to the special value `-`. + +This setting specifies to use the key from the file without further processing. This approach differs from not setting a wrap or unwrap command at all and from setting either or both to an empty string. Having no wrap or unwrap command set when TDE is used leaves your data encryption key unsecured and results in a fatal error when running an affected utility program. diff --git a/product_docs/docs/tde/15/secure_key/index.mdx b/product_docs/docs/tde/15/secure_key/index.mdx new file mode 100644 index 00000000000..45ef27d8b51 --- /dev/null +++ b/product_docs/docs/tde/15/secure_key/index.mdx @@ -0,0 +1,81 @@ +--- +title: Securing the data encryption key +description: Learn how to secure your data with an encryption key. +deepToC: true +navigation: +- passphrase +- key_store +- disabling_key +- key-rotation +redirects: +- /tde/latest/key_stores/ +--- + +## Data encryption key + +The key for transparent data encryption (the data key) is generated by `initdb` and stored in a file `pg_encryption/key.bin` under the data directory. This file contains several keys that are used for different purposes at runtime. The data key is a single sequence of random bytes in the file. + +If you don't perform any further action, this file contains the key in plaintext, which isn't secure. Anyone with access to the encrypted data directory has access to the plaintext key, which defeats the purpose of encryption. + +## Choosing a mechanism to protect the data encryption key + +To secure the data encryption key, you must specify a wrap and an unwrap command that provides TDE with a data encryption protection mechanism. You can set this parameter only at server start. + +With the wrap and unwrap commands you can: + +- [Protect the data key with a passphrase](passphrase). A wrapping key is derived from the passphrase and used to encrypt the data key. + +- [Protect the data key with a wrapping key stored in a key management system](key_store) or key store. This second key is also called the *key-wrapping key* or *master key*. + +- You can also choose to [disable the data encryption key wrapping](disabling_key). Only do this for test environments, as it leaves the TDE key unprotected. + +## Configuring a wrapping and unwrapping command + +After you choose a method to protect your key, you can create the wrapping/unwrapping commands. The configuration of these commands is left to the user, which allows you to tailor the setup to local requirements and integrate with existing key management software or similar. + +[Using a passphrase](passphrase) provides an example for wrapping and unwrapping commands using OpenSSL and a passphrase to secure the TDE data key. [Using a key store](key_store) provides an example for wrapping and unwrapping commands using an external key store key to protect the TDE data key. + +When you initialize a server with TDE, the `initdb` command adds the `data_encryption_key_unwrap_command` parameter in the `postgresql.conf` configuration file. The string specified in `data_encryption_key_unwrap_command` can then unwrap (decrypt) the data encryption key. + +The commands must contain a placeholder `%p`, which is replaced with the name of the file containing the key to unwrap. The command must print the unwrapped (decrypted) key to its standard output. + +## Providing the wrapping and unwrapping commands to TDE + +You must make the commands available to the TDE database server so it can wrap and unwrap the data encryption key. You have the following options: + +- You can configure the wrapping and unwrapping commands as environment variables before creating the database, so TDE can fall back on those variables when initializing a server: + +
Example + + ```shell + PGDATAKEYWRAPCMD='openssl enc -e -aes128-wrap -pbkdf2 -out "%p"' + PGDATAKEYUNWRAPCMD='openssl enc -d -aes128-wrap -pbkdf2 -in "%p"' + export PGDATAKEYWRAPCMD PGDATAKEYUNWRAPCMD + #After these variables are set, you can initialize the server: + initdb --data-encryption -D /var/lib/edb/as16/data + ``` + +
+
+ +- You can provide the wrapping and unwrapping commands directly in the command line when initializing a server with the `--key-wrap-command=` and `--key-unwrap-command=` options: + +
Example + + ```shell + initdb --data-encryption -D /var/lib/edb/as16/data --key-wrap-command='openssl enc -e -aes128-wrap -pbkdf2 -out "%p"' --key-unwrap-command='openssl enc -d -aes128-wrap -pbkdf2 -in "%p"' + ``` + +
+
+ +- You can disable the protection of your data encryption key with `--no-key-wrap`. Doing so leaves your key unprotected and we recommend this practice only for testing purposes. + +
Example + + ```shell + initdb --data-encryption -D /var/lib/edb/as16/data --no-key-wrap + ``` + +
+
diff --git a/product_docs/docs/tde/15/secure_key/key_rotation.mdx b/product_docs/docs/tde/15/secure_key/key_rotation.mdx new file mode 100644 index 00000000000..a24799b5d33 --- /dev/null +++ b/product_docs/docs/tde/15/secure_key/key_rotation.mdx @@ -0,0 +1,42 @@ +--- +title: Rotating the data encryption key +description: Learn how to rotate your data encryption key. +deepToc: true +--- + +To change the master key, manually run the unwrap command, specifying the old key. Then feed the result into the wrap command, specifying the new key. + +If the data key is protected by a passphrase, to change the passphrase, run the unwrap command using the old passphrase. Then feed the result into the wrap command using the new passphrase. + +You can perform these operations while the database server is running. The wrapped data key in the file is used only on startup. It isn't used while the server is running. + +## Rotating the passphrase + +Building on the example in [Using a passphrase](passphrase), which uses OpenSSL, to change the passphrase, you can use this approach: + +```shell +cd $PGDATA/pg_encryption/ +openssl enc -d -aes-128-cbc -pbkdf2 -in key.bin | openssl enc -e -aes-128-cbc -pbkdf2 -out key.bin.new +mv key.bin.new key.bin +``` +With this method, the decryption and the encryption commands ask for the passphrase on the terminal at the same time, which is awkward and confusing. An alternative is: + +```shell +cd $PGDATA/pg_encryption/ +openssl enc -d -aes-128-cbc -pbkdf2 -in key.bin -pass pass:ACTUALPASSPHRASE | openssl enc -e -aes-128-cbc -pbkdf2 -out key.bin.new +mv key.bin.new key.bin +``` +This technique leaks the old passphrase, which is being replaced anyway. OpenSSL supports a number of other ways to supply the passphrases. + +## Rotating the key store wrapping key + +When using a [key store](key_store), you can connect the unwrap and wrap commands similarly. For example: + +```shell +cd $PGDATA/pg_encryption/ +crypt decrypt aws --in key.bin --region us-east-1 | crypt encrypt aws --out key.bin.new --region us-east-1 --kms alias/pg-tde-master-2 +mv key.bin.new key.bin +``` + +!!! Note + You can't change the data key (the key wrapped by the master key) on an existing data directory. If you need to do that, you need to run the data directory through an upgrade process using pg_dump, pg_upgrade, or logical replication. diff --git a/product_docs/docs/tde/15/secure_key/key_store.mdx b/product_docs/docs/tde/15/secure_key/key_store.mdx new file mode 100644 index 00000000000..55396044979 --- /dev/null +++ b/product_docs/docs/tde/15/secure_key/key_store.mdx @@ -0,0 +1,100 @@ +--- +title: Using a key store +description: Learn how to secure your encryption key with a KMS key. +deepToC: true +--- + +You can use the key store in an external key management system to manage the data encryption key. The tested and supported key stores are: + +- Amazon AWS Key Management Service (KMS) +- Microsoft Azure Key Vault +- Google Cloud - Cloud Key Management Service +- HashiCorp Vault (KMIP Secrets Engine and Transit Secrets Engine) +- Thales CipherTrust Manager +- Fortanix Data Security Manager +- Entrust KeyControl + +To use one of the available key stores, see the configuration examples. + +## AWS Key Management Service example + +Create a key with [AWS Key Management Service](https://docs.aws.amazon.com/kms/): + +```shell +aws kms create-key +aws kms create-alias --alias-name alias/pg-tde-master-1 --target-key-id "..." +``` + +Use the `aws kms` command with the `alias/pg-tde-master-1` key to wrap and unwrap the data encryption key: + +```shell +PGDATAKEYWRAPCMD='aws kms encrypt --key-id alias/pg-tde-master-1 --plaintext fileb:///dev/stdin --output text --query CiphertextBlob | base64 -d > "%p"' +PGDATAKEYUNWRAPCMD='aws kms decrypt --key-id alias/pg-tde-master-1 --ciphertext-blob fileb://"%p" --output text --query Plaintext | base64 -d' +``` +!!! Note + Shell commands with pipes, as in this example, are problematic because the exit status of the pipe is that of the last command. A failure of the first, more interesting command isn't reported properly. Postgres handles this somewhat by recognizing whether the wrap or unwrap command wrote nothing. However, it's better to make this command more robust. For example, use the `pipefail` option available in some shells or the `mispipe` command available on some operating systems. Put more complicated commands into an external shell script or other program instead of defining them inline. + +## Azure Key Vault example + +Create a key with [Azure Key Vault](https://learn.microsoft.com/en-us/azure/key-vault/): + +```shell +az keyvault key create --vault-name pg-tde --name pg-tde-master-1 +``` + +Use the `az keyvault key` command with the `pg-tde-master-1` key to wrap and unwrap the data encryption key: + +```shell +PGDATAKEYWRAPCMD='az keyvault key encrypt --name pg-tde-master-1 --vault-name pg-tde --algorithm A256GCM --value @- --data-type plaintext --only-show-errors --output json | jq -r .result > "%p"' +PGDATAKEYUNWRAPCMD='az keyvault key decrypt --name pg-tde-master-1 --vault-name pg-tde --algorithm A256GCM --value @"%p" --data-type plaintext --only-show-errors --output json | jq -r .result' +``` +!!! Note + Shell commands with pipes, as in this example, are problematic because the exit status of the pipe is that of the last command. A failure of the first, more interesting command isn't reported properly. Postgres handles this somewhat by recognizing whether the wrap or unwrap command wrote nothing. However, it's better to make this command more robust. For example, use the `pipefail` option available in some shells or the `mispipe` command available on some operating systems. Put more complicated commands into an external shell script or other program instead of defining them inline. + +## Google Cloud KMS example + +Create a key with [Google Cloud KMS](https://cloud.google.com/kms/docs): + +```shell +gcloud kms keys create pg-tde-master-1 --location=global --keyring=pg-tde --purpose=encryption +``` + +Use the `gcloud kms` command with the `pg-tde-master-1` key to wrap and unwrap the data encryption key: + +```shell +PGDATAKEYWRAPCMD='gcloud kms encrypt --plaintext-file=- --ciphertext-file=%p --location=global --keyring=pg-tde --key=pg-tde-master-1' +PGDATAKEYUNWRAPCMD='gcloud kms decrypt --plaintext-file=- --ciphertext-file=%p --location=global --keyring=pg-tde --key=pg-tde-master-1' +``` + +## HashiCorp Vault Transit Secrets Engine example + +Enable transit with [HashiCorp Vault Transit Secrets Engine](https://developer.hashicorp.com/vault/docs): + +```shell +vault secrets enable transit +``` + +Create a key and give it a name: + +```shell +vault write -f transit/keys/pg-tde-master-1 +``` + +Use the `vault write` command with the `pg-tde-master-1` key to wrap and unwrap the data encryption key: + +``` +PGDATAKEYWRAPCMD='base64 | vault write -field=ciphertext transit/encrypt/pg-tde-master-1 plaintext=- > %p' +PGDATAKEYUNWRAPCMD='vault write -field=plaintext transit/decrypt/pg-tde-master-1 ciphertext=- < %p | base64 -d' +``` + +## Thales CipherTrust Manager example + +See [Using](/partner_docs/ThalesCipherTrustManager/05-UsingThalesCipherTrustManager/) in the [Implementing Thales CipherTrust Manager](/partner_docs/ThalesCipherTrustManager/) documentation for instructions on how to wrap the data encryption key with a key from the Thales key store. + +## Fortanix Data Security Manager example + +See [Using Fortanix Data Security Manager with EDB Postgres for TDE](https://support.fortanix.com/docs/using-fortanix-data-security-manager-with-edb-postgres-for-tde) for a step-by-step configuration tutorial. + +## Entrust KeyControl integration guide + +See the [EDB Postgres and Entrust KeyControl](https://www.entrust.com/sites/default/files/2024-03/edb-postgres-and-entrust-keycontrol-ig.pdf) integration guide for installation, configuration and usage instructions (including key rotation). \ No newline at end of file diff --git a/product_docs/docs/tde/15/secure_key/passphrase.mdx b/product_docs/docs/tde/15/secure_key/passphrase.mdx new file mode 100644 index 00000000000..f09ac41f70f --- /dev/null +++ b/product_docs/docs/tde/15/secure_key/passphrase.mdx @@ -0,0 +1,41 @@ +--- +title: Using a passphrase +description: Learn how to secure your encryption key with a passphrase. +--- + +You can protect the data key with a passphrase using the OpenSSL command line utility. The following is an example that sets up this protection: + +```shell +initdb -D datadir -y --key-wrap-command='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' --key-unwrap-command='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' +``` + +This example wraps the randomly generated data key (done internally by initdb) by encrypting it with the AES-128-CBC (AESKW) algorithm. The encryption uses a key derived from a passphrase with the PBKDF2 key derivation function and a randomly generated salt. The terminal prompts for the passphrase. (See the openssl-enc manual page for details about these options. Available options vary across versions.) The initdb utility replaces `%p` with the name of the file that stores the wrapped key. + +The unwrap command performs the opposite operation. initdb doesn't need the unwrap operation. However, it stores it in the `postgresql.conf` file of the initialized cluster, which uses it when it starts up. + +The key wrap command receives the plaintext key on standard input and needs to put the wrapped key at the file system location specified by the `%p` placeholder. The key unwrap command needs to read the wrapped key from the file system location specified by the `%p` placeholder and write the unwrapped key to the standard output. + +Utility programs like pg_rewind and pg_upgrade operate directly on the data directory or copies, such as backups. These programs also need to be told about the key unwrap command, depending on the circumstances. They each have command line options for this purpose. + +To simplify operations, you can also set the key wrap and unwrap commands in environment variables. These are accepted by all affected applications if you don't provide the corresponding command line options. For example: + +```shell +PGDATAKEYWRAPCMD='openssl enc -e -aes-128-cbc -pbkdf2 -out "%p"' +PGDATAKEYUNWRAPCMD='openssl enc -d -aes-128-cbc -pbkdf2 -in "%p"' +export PGDATAKEYWRAPCMD PGDATAKEYUNWRAPCMD +``` + +Key unwrap commands that prompt for passwords on the terminal don't work when the server is started by pg_ctl or through service managers such as systemd. The server is detached from the terminal in those environments. If you want an interactive password prompt on server start, you need a more elaborate configuration that fetches the password using some indirect mechanism. + +For example, for systemd, you can use `systemd-ask-password`: + +``` +PGDATAKEYWRAPCMD="bash -c 'openssl enc -e -aes-128-cbc -pbkdf2 -out %p -pass file:<(sudo systemd-ask-password --no-tty)'" +PGDATAKEYUNWRAPCMD="bash -c 'openssl enc -d -aes-128-cbc -pbkdf2 -in %p -pass file:<(sudo systemd-ask-password --no-tty)'" +``` + +You also need an entry like in `/etc/sudoers`: + +``` +postgres ALL = NOPASSWD: /usr/bin/systemd-ask-password +``` \ No newline at end of file diff --git a/product_docs/docs/tde/15/single_user.mdx b/product_docs/docs/tde/15/single_user.mdx deleted file mode 100644 index fbe45678778..00000000000 --- a/product_docs/docs/tde/15/single_user.mdx +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Single-user mode" ---- - -If you invoke [postgres](https://www.postgresql.org/docs/15/app-postgres.html) in single-user mode with TDE enabled, the `postgres` command reads either: - -- The `PGDATAKEYUNWRAPCMD` environment variable, if set -- The `data_encryption_key_unwrap_command` value in the `postgresql.conf` file \ No newline at end of file diff --git a/product_docs/docs/tde/15/support.mdx b/product_docs/docs/tde/15/support.mdx new file mode 100644 index 00000000000..7df4d89e03e --- /dev/null +++ b/product_docs/docs/tde/15/support.mdx @@ -0,0 +1,26 @@ +--- +title: TDE compatibility +description: You can deploy TDE-enabled servers on several supported database distributions and technologies. +--- + +You can create TDE-enabled database servers on several database distributions and technologies. + +## Supported database distributions + +You can create TDE-enabled databases on: + +- EDB Postgres Advanced Server 15 and later versions. +- EDB Postgres Extended Server 15 and later versions. + +!!!note + EDB doesn't support creating TDE-enabled servers on PostgreSQL. TDE relies on a mechanism developed by EDB for EDB Postgres Advanced and Extended Servers. +!!! + +## Supported technologies + +You can create TDE-enabled servers in the following environments: + +- [EDB Postgres Distributed (PGD)](/pgd/latest/) +- [EDB Postgres Distributed for Kubernetes](/postgres_distributed_for_kubernetes/latest/) +- [EDB Postgres for Kubernetes](/postgres_for_kubernetes/latest/) +- [EDB Postgres AI Cloud Service](/edb-postgres-ai/console/using/projects/settings/security/) diff --git a/product_docs/docs/tde/15/upgrade_use_cases/index.mdx b/product_docs/docs/tde/15/upgrading.mdx similarity index 61% rename from product_docs/docs/tde/15/upgrade_use_cases/index.mdx rename to product_docs/docs/tde/15/upgrading.mdx index 4d7ec8bc033..4dff1169f92 100644 --- a/product_docs/docs/tde/15/upgrade_use_cases/index.mdx +++ b/product_docs/docs/tde/15/upgrading.mdx @@ -1,9 +1,13 @@ --- -title: "TDE pg_upgrade use cases" -navTitle: TDE pg_upgrade use cases +title: "Upgrading a TDE-enabled database" +navTitle: Upgrading +description: Learn how you can use pg_upgrade to upgrade or migrate TDE databases. +redirects: + - /tde/latest/upgrade_use_cases/ #generated for TDE/refresh --- -EDB supports using [pg_upgrade](https://www.postgresql.org/docs/current/pgupgrade.html) with additional [EDB upgrade arguments](../pg_upgrade_arguments) to add encryption to unencrypted systems. +You can use [pg_upgrade](https://www.postgresql.org/docs/current/pgupgrade.html) with additional TDE arguments to perform upgrading and migrating operations. + This table provides an overview of supported use cases. | Use case | Source unencrypted server | Target encrypted server | @@ -14,18 +18,8 @@ This table provides an overview of supported use cases. | Maintain the Postgres distribution and rotate encryption keys | Encrypted EDB Postgres Advanced Server 15 | Encrypted EDB Postgres Advanced Server 15 with new encryption keys | !!! Important - Both source and target servers must be in the same Postgres major version. `pg_upgrade` only supports upgrades between minor versions. - -## Overview - -To enable encryption: - -1. Perform a backup of your system. -1. Install the target Postgres version. -1. Initialize a new server with TDE enabled. -1. Use `pg_upgrade` with the `--copy-by-block` option to upgrade to a TDE system. + Both source and target servers must be in the same Postgres major version. pg_upgrade only supports upgrades between minor versions. ## Tutorials -* [Enable TDE on an existing EDB Postgres Advanced Server database cluster](../enabling_tde_epas). -* [Upgrade a PostgreSQL database server to EDB Postgres Extended Server while enabling TDE](postgres_to_extended). \ No newline at end of file +See [Tutorials](enabling) for a list of step-by-step tutorials that highlight different migration scenarios. \ No newline at end of file diff --git a/product_docs/docs/tpa/23/architecture-M1.mdx b/product_docs/docs/tpa/23/architecture-M1.mdx index 64718467585..848001483ce 100644 --- a/product_docs/docs/tpa/23/architecture-M1.mdx +++ b/product_docs/docs/tpa/23/architecture-M1.mdx @@ -5,11 +5,8 @@ originalFilePath: architecture-M1.md --- -A Postgres cluster with one or more active locations, each with the same -number of Postgres nodes and an extra Barman node. Optionally, there can -also be a location containing only a witness node, or a location -containing only a single node, even if the active locations have more -than one. +A Postgres cluster with a single primary node and physical replication +to a number of standby nodes including backup and failover management. This architecture is suitable for production and is also suited to testing, demonstrating and learning due to its simplicity and ability to @@ -19,25 +16,53 @@ If you select subscription-only EDB software with this architecture it will be sourced from EDB Repos 2.0 and you will need to [provide a token](reference/edb_repositories/). -## Application and backup failover - -The M1 architecture implements failover management in that it ensures -that a replica will be promoted to take the place of the primary should -the primary become unavailable. However it *does not provide any -automatic facility to reroute application traffic to the primary*. If -you require, automatic failover of application traffic you will need to -configure this at the application itself (for example using multi-host -connections) or by using an appropriate proxy or load balancer and the -facilities offered by your selected failover manager. - -The above is also true of the connection between the backup node and the -primary created by TPA. The backup will not be automatically adjusted to -target the new primary in the event of failover, instead it will remain -connected to the original primary. If you are performing a manual -failover and wish to connect the backup to the new primary, you may -simply re-run `tpaexec deploy`. If you wish to automatically change the -backup source, you should implement this using your selected failover -manager as noted above. +## Failover management + +The M1 architecture always includes a failover manager. Supported +options are repmgr, EDB Failover Manager (EFM) and Patroni. In all +cases, the failover manager will be configured by default to ensure that +a replica will be promoted to take the place of the primary should the +primary become unavailable. + +### Application failover + +The M1 architecture does not generally provide an automatic facility to +reroute application traffic to the primary. There are several ways you +can add this capability to your cluster. + +In TPA: + +- If you choose repmgr as the failover manager and enable PgBouncer, you + can include the `repmgr_redirect_pgbouncer: true` hash under + `cluster_vars` in `config.yml`. This causes repmgr to automatically + reconfigure PgBouncer to route traffic to the new primary on failover. + +- If you choose Patroni as the failover manager and enable PgBouncer, + Patroni will automatically reconfigure PgBouncer to route traffic to + the new primary on failover. + +- If you choose EFM as the failover manager, you can use the + `efm_conf_settings` hash under `cluster_vars` in `config.yml` to + [configure EFM to use a virtual IP address + (VIP)](/efm/latest/04_configuring_efm/05_using_vip_addresses/). This + is an additional IP address which will always route to the primary + node. + +- Place an appropriate proxy or load balancer between the cluster and + you application and use a [TPA hook](tpaexec-hooks/) to configure + your selected failover manager to update it with the route to the new + primary on failover. + +- Handle failover at the application itself, for example by using + multi-host connection strings. + +### Backup failover + +TPA does not configure any kind of 'backup failover'. If the Postgres +node from which you are backing up is down, backups will simply halt +until the node is back online. To manually connect the backup to the new +primary, edit `config.yml` to add the `backup` hash to the new primary +instance and re-run `tpaexec deploy`. ## Cluster configuration @@ -78,18 +103,18 @@ More detail on the options is provided in the following section. #### Additional Options -| Parameter | Description | Behaviour if omitted | -| --------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | -| `--platform` | One of `aws`, `docker`, `bare`. | Defaults to `aws`. | -| `--location-names` | A space-separated list of location names. The number of active locations is equal to the number of names supplied, minus one for each of the witness-only location and the single-node location if they are requested. | A single location called "main" is used. | -| `--primary-location` | The location where the primary server will be. Must be a member of `location-names`. | The first listed location is used. | -| `--data-nodes-per-location` | A number from 1 upwards. In each location, one node will be configured to stream directly from the cluster's primary node, and the other nodes, if present, will stream from that one. | Defaults to 2. | -| `--witness-only-location` | A location name, must be a member of `location-names`. | No witness-only location is added. | -| `--single-node-location` | A location name, must be a member of `location-names`. | No single-node location is added. | -| `--enable-haproxy` | 2 additional nodes will be added as a load balancer layer.
Only supported with Patroni as the failover manager. | HAproxy nodes will not be added to the cluster. | -| `--enable-pgbouncer` | PgBouncer will be configured in the Postgres nodes to pool connections for the primary. | PgBouncer will not be configured in the cluster. | -| `--patroni-dcs` | Select the Distributed Configuration Store backend for patroni.
Only option is `etcd` at this time.
Only supported with Patroni as the failover manager. | Defaults to `etcd`. | -| `--efm-bind-by-hostname` | Enable efm to use hostnames instead of IP addresses to configure the cluster `bind.address`. | Defaults to use IP addresses | +| Parameter | Description | Behaviour if omitted | +| --------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | +| `--platform` | One of `aws`, `docker`, `bare`. | Defaults to `aws`. | +| `--location-names` | A space-separated list of location names. The number of locations is equal to the number of names supplied. | A single location called "main" is used. | +| `--primary-location` | The location where the primary server will be. Must be a member of `location-names`. | The first listed location is used. | +| `--data-nodes-per-location` | A number from 1 upwards. In each location, one node will be configured to stream directly from the cluster's primary node, and the other nodes, if present, will stream from that one. | Defaults to 2. | +| `--witness-only-location` | A location name, must be a member of `location-names`. This location will be populated with a single witness node only. | No witness-only location is added. | +| `--single-node-location` | A location name, must be a member of `location-names`. This location will be populated with a single data node only. | No single-node location is added. | +| `--enable-haproxy` | Two additional nodes will be added as a load balancer layer.
Only supported with Patroni as the failover manager. | HAproxy nodes will not be added to the cluster. | +| `--enable-pgbouncer` | PgBouncer will be configured in the Postgres nodes to pool connections for the primary. | PgBouncer will not be configured in the cluster. | +| `--patroni-dcs` | Select the Distributed Configuration Store backend for patroni.
Only option is `etcd` at this time.
Only supported with Patroni as the failover manager. | Defaults to `etcd`. | +| `--efm-bind-by-hostname` | Enable efm to use hostnames instead of IP addresses to configure the cluster `bind.address`. | Defaults to use IP addresses |

diff --git a/product_docs/docs/tpa/23/architecture-PGD-Always-ON.mdx b/product_docs/docs/tpa/23/architecture-PGD-Always-ON.mdx index 948cf5d1aba..4dd318ac627 100644 --- a/product_docs/docs/tpa/23/architecture-PGD-Always-ON.mdx +++ b/product_docs/docs/tpa/23/architecture-PGD-Always-ON.mdx @@ -5,10 +5,10 @@ originalFilePath: architecture-PGD-Always-ON.md --- -!!! Note +!!!Note + This architecture is for Postgres Distributed 5 only. If you require PGD 4 or 3.7 please use [BDR-Always-ON](architecture-BDR-Always-ON/). -!!! EDB Postgres Distributed 5 in an Always-ON configuration, suitable for use in test and production. @@ -85,9 +85,9 @@ data centre that provides a level of redundancy, in whatever way this definition makes sense to your use case. For example, AWS regions, your own data centres, or any other designation to identify where your servers are hosted. +!!! - -!!! Note Note for AWS users +!!! Note for AWS users If you are using TPA to provision an AWS cluster, the locations will be mapped to separate availability zones within the `--region` you diff --git a/product_docs/docs/tpa/23/reference/architecture-PGD-Lightweight.mdx b/product_docs/docs/tpa/23/reference/architecture-PGD-Lightweight.mdx new file mode 100644 index 00000000000..a9b2f88d5d4 --- /dev/null +++ b/product_docs/docs/tpa/23/reference/architecture-PGD-Lightweight.mdx @@ -0,0 +1,103 @@ +--- +description: Configuring a PGD Lightweight cluster with TPA. +title: PGD Lightweight +originalFilePath: architecture-PGD-Lightweight.md + +--- + +!!! Note + + This architecture is for Postgres Distributed 5 only. + If you require PGD 4 or 3.7 please use [BDR-Always-ON](../architecture-BDR-Always-ON/). + + EDB Postgres Distributed 5 in a Lightweight configuration, + suitable for use in test and production. + + This architecture requires an EDB subscription. + All software will be sourced from [EDB Repos 2.0](edb_repositories/). + +## Cluster configuration + +### Overview of configuration options + +An example invocation of `tpaexec configure` for this architecture +is shown below. + +```bash + tpaexec configure ~/clusters/pgd-lw \ + --architecture Lightweight \ + --edb-postgres-extended 15 \ + --platform aws --instance-type t3.micro \ + --distribution Debian \ + --location-names main dr \ +``` + +You can list all available options using the help command. + +```bash +tpaexec configure --architecture Lightweight --help +``` + +The table below describes the mandatory options for PGD-Always-ON +and additional important options. +More detail on the options is provided in the following section. + +#### Mandatory Options + +| Options | Description | +| ----------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `--architecture` (`-a`) | Must be set to `Lightweight` | +| Postgres flavour and version (e.g. `--postgresql 15`) | A valid [flavour and version specifier](../tpaexec-configure/#postgres-flavour-and-version). | + +

+ +#### Additional Options + +| Options | Description | Behaviour if omitted | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | +| `--platform` | One of `aws`, `docker`, `bare`. | Defaults to `aws`. | +| `--location-names` | A space-separated list of location names. The number of locations is equal to the number of names supplied. | TPA will configure a single location with three data nodes. | +| `--add-proxy-nodes-per-location` | The number of proxy nodes in each location. | PGD-proxy will be installed on each data node. | +| `--bdr-database` | The name of the database to be used for replication. | Defaults to `bdrdb`. | +| `--enable-pgd-probes` | Enable http(s) api endpoints for pgd-proxy such as `health/is-ready` to allow probing proxy's health. | Disabled by default. | +| `--proxy-listen-port` | The port on which proxy nodes will route traffic to the write leader. | Defaults to 6432 | +| `--proxy-read-only-port` | The port on which proxy nodes will route read-only traffic to shadow nodes. | Defaults to 6433 | + +

+ +### More detail about Lightweight configuration + +A PGD Lightweight cluster comprises 2 locations, with a primary active location containing 2 nodes and a disaster recovery (dr) location with a single node. + +Location names for the cluster are specified as +`--location-names primary dr`. A location represents an independent +data centre that provides a level of redundancy, in whatever way +this definition makes sense to your use case. For example, AWS +regions, your own data centres, or any other designation to identify +where your servers are hosted. + +!!! Note for AWS users + + If you are using TPA to provision an AWS cluster, the locations will + be mapped to separate availability zones within the `--region` you + specify. + You may specify multiple `--regions`, but TPA does not currently set + up VPC peering to allow instances in different regions to + communicate with each other. For a multi-region cluster, you will + need to set up VPC peering yourself. + +By default, every data node (in every location) will also run PGD-Proxy +for connection routing. To create separate PGD-Proxy instances instead, +use `--add-proxy-nodes-per-location 3` (or however many proxies you want +to add). + +Global routing will make every proxy route to a single write leader, elected amongst all available data nodes across all locations. + +You may optionally specify `--bdr-database dbname` to set the name of +the database with BDR enabled (default: bdrdb). + +You may optionally specify `--enable-pgd-probes [{http, https}]` to +enable http(s) api endpoints that will allow to easily probe proxy's health. + +You may also specify any of the options described by +[`tpaexec help configure-options`](../tpaexec-configure/). diff --git a/product_docs/docs/tpa/23/reference/barman.mdx b/product_docs/docs/tpa/23/reference/barman.mdx index 80f7546e683..d9c8c10d1e4 100644 --- a/product_docs/docs/tpa/23/reference/barman.mdx +++ b/product_docs/docs/tpa/23/reference/barman.mdx @@ -90,3 +90,121 @@ them to each other's authorized_keys file. The postgres user must be able to ssh to the barman server in order to archive WAL segments (if configured), and the barman user must be able to ssh to the Postgres instance to take or restore backups. + +## `barman` and `barman_role` Postgres users + +TPA will create two Postgres users, `barman` and `barman_role`. + +TPA versions `<23.35` created the `barman` Postgres user as a `superuser`. + +Beginning with `23.35` the `barman` user is created with `NOSUPERUSER`, +so any re-deploys on existing clusters will remove the `superuser` attribute +from the `barman` Postgres user. Instead, the `barman_role` is granted the +required set of privileges and the `barman` user is granted `barman_role` membership. + +This avoids granting the `superuser` attribute to the `barman` user, using the set +of privileges provided in the [Barman Manual](https://docs.pgbarman.org/release/latest/#postgresql-connection). + +## Shared Barman server + +!!! Note + + To use the shared Barman functionality with clusters created using a + TPA version earlier than 23.35, you must: + a) upgrade to a version of TPA that supports creating + shared Barman instances. + b) after upgrading TPA, run deploy on $first-cluster so TPA can make + necessary config changes for subsequent clusters to run smoothly + against the shared Barman node. + +Some deployments may want to share a single Barman server for multiple +clusters. Shared Barman server deployment within +tpaexec is supported via the `barman_shared` setting that can be set via +`vars:` under the Barman server instance for the given cluster config +that plans to use an existing Barman server. `barman_shared` is a +boolean variable so possible values are true and false(default). When +making any changes to the Barman config in a shared scenario, you must +ensure that configurations across multiple clusters remain in sync so as +to avoid a scenario where one cluster adds a specific configuration and +a second cluster overrides it. + +A typical workflow for using a shared Barman server across multiple +clusters is described below. + +1. Create a TPA cluster with an instance that has `barman` role + (call it 'first-cluster' for this example). + +2. In the second cluster (second-cluster for example), reference this + particular Barman instance from $clusters/first-cluster as a shared + Barman server instance and use `bare` as platform so we are not + trying to create a new Barman instance when running provision. Also + specify the IP address of the Barman instance that this cluster can + use to access it. + + ```yml + - Name: myBarman + node: 5 + role: + - barman + platform: bare + ip_address: x.x.x.x + vars: + barman_shared: true + ``` + +3. Once the second-cluster is provisioned but before running deploy, + make sure that it can access the Barman server instance via ssh. You + can allow this access by copying second-cluster's public key to + Barman server instance via `ssh-copy-id` and then do an ssh to + make sure you can login without having to specify the password. + + ```bash + # add first-cluster's key to the ssh-agent + $ cd $clusters/first-cluster + $ ssh-add id_first-clutser + $ cd $clusters/second-cluster + $ ssh-keyscan -t rsa,ecdsa -4 $barman-server-ip >> tpa_known_hosts + $ ssh-copy-id -i id_second-cluster.pub -o 'UserKnownHostsFile=tpa_known_hosts' $user@$barman-server-ip + $ ssh -F ssh_config $barman-server + ``` + +4. Copy the Barman user's keys from first-cluster to second-cluster + ```bash + $ mkdir $clusters/second-cluster/keys + $ cp $clusters/first-cluster/keys/id_barman* clusters/second-cluster/keys + ``` + +5. Run `tpaexec deploy $clusters/second-cluster` + +!!! Note + + You must use caution when setting up clusters that share a Barman + server instance. There are a number of important aspects you must + consider before attempting such a setup. For example: + + 1. Making sure that no two instances in any of the clusters sharing a + Barman server use the same name. + 2. Barman configuration and settings otherwise should remain in sync in + all the clusters using a common Barman server to avoid a scenario + where one cluster sets up a specific configuration and the others do + not either because the configuration is missing or uses a different + value. + 3. Version of Postgres on instances being backed up across different + clusters needs to be the same. + 4. Different clusters using a common Barman server cannot specify + different versions of Barman packages when attempting to override + default. + +Some of these may be addressed in a future release as we continue to +improve the shared Barman server support. + +!!!Warning + +Be extremely careful when deprovisioning clusters sharing a common +Barman node. Especially where the first cluster that deployed Barman +uses non-bare platform. Deprovisioning the first cluster that +originally provisioned and deployed Barman will effectively leave +other clusters sharing the Barman node in an inconsistent state +because the Barman node will already have been deprovisioned by the +first cluster and it won't exist anymore. +!!! diff --git a/product_docs/docs/tpa/23/reference/compliance.mdx b/product_docs/docs/tpa/23/reference/compliance.mdx new file mode 100644 index 00000000000..54b6d864555 --- /dev/null +++ b/product_docs/docs/tpa/23/reference/compliance.mdx @@ -0,0 +1,166 @@ +--- +description: Generating standards-compliant clusters with TPA +title: Compliance +originalFilePath: compliance.md + +--- + +TPA can generate configurations designed to make it easy for a +cluster to comply with the STIG or CIS standards. If you pass +`--compliance stig` or `--compliance cis` to `tpaexec configure`, +TPA will: + +- Check that other options are compatible with the appropriate + standard. +- Add various entries to the generated `config.yml`, including + marking that this is a cluster meant to comply with a particular + standard and setting Postgres configuration as required by + the standard. +- Adjust some deployment tasks to enforce compliance. +- Run checks at the end of deployment. + +The deploy-time checks can +be skipped by giving the option `--excluded_tasks=compliance` to `tpaexec +deploy`. This feature is intended for testing only, when using a test +system on which full compliance is impossible (for example, +because SSL certificates are not available). + +There are some situations in which TPA will intentionally fail to +comply with the selected standard; these are documented under Exceptions +below. + +## STIG + +STIG compliance is indicated by the `--compliance stig` option to +`tpaexec configure`. + +### Option compatibility + +STIG compliance requires the `bare` platform and the `epas` flavour. +It requires the RedHat OS with version 8 or 9. + +### Settings in config.yml + +The following entry is added to `cluster_vars` to use the SQL/Protect +feature of EDB Postgres Advanced Server: + +``` + extra_postgres_extensions: [ 'sql_protect' ] +``` + +The following entries are added to `cluster_vars` to force clients +to use SSL authentication: + +``` + hba_force_hostssl: True + hba_force_certificate_auth: True + hba_cert_authentication_map: sslmap +``` + +The following entries are added to `cluster_vars` to set GUCs in +postgresql.conf: + +``` + tcp_keepalives_idle: 10 + tcp_keepalives_interval: 10 + tcp_keepalives_count: 10 + log_destination: "stderr" + postgres_log_file_mode: "0600" +``` + +The following entries are added to `postgres_conf_settings` in +`cluster_vars` to set GUCs in postgresql.conf: + +``` + edb_audit: "xml" + edb_audit_statement: "all" + edb_audit_connect: "all" + edb_audit_disconnect: "all" + statement_timeout: 1000 + client_min_messages: "ERROR" +``` + +### Deployment differences + +During deployment, TPA will set connection limits for the database users +it creates, corresponding to the number of connections that are needed +for normal operation. As each user is set up, it will also check that +an SSL client certificate has been provided for it. + +### Providing client ssl certificates + +STIG requires DOD-approved ssl certificates for client connections. +These certificates can't be generated by TPA and therefore must be +supplied. When setting up authentication for a user from a +node in the cluster, TPA will look for a certificate/key pair on the +node. The certificate and key should be in files called .crt +and .key in the directory given by the `ssl_client_cert_dir` +setting. The default for this setting is `/`, so the files would be, +for example, `/barman.crt` and `/barman.key` when the `barman` user is +being set up. + +### Final checks + +At the end of deployment, TPA will check that the server has FIPS +enabled. + +### Exceptions + +If you select EFM as the failover manager, TPA will configure password +authentication for the EFM user. This goes against the STIG requirement +that all TCP connections use certificate authentication. The reason for +this exception is that EFM does not support certificate authentication. + +## CIS + +CIS compliance is indicated by the `--compliance cis` option to `tpaexec +configure`. + +### Settings in config.yml + +The following entries are added to `cluster_vars` to set GUCs in +postgresql.conf: + +``` + log_connections: "on" + log_disconnections: "on" +``` + +The following entry is added to `cluster_vars` to enable required +extensions: + +``` + extra_postgres_extensions: ["passwordcheck", "pgaudit"] +``` + +The following entry is added to `cluster_vars` to set the umask for +the postgres OS user: + +``` + extra_bash_rc_lines: "umask 0077" +``` + +The following entries are added to `postgres_conf_settings` in +`cluster_vars` to set GUCs in postgresql.conf: + +``` + + log_error_verbosity: "verbose" + log_line_prefix: "'%m [%p]: [%l-1] db=%d,user=%u,app=%a,client=%h '" + log_replication_commands: "on" + temp_file_limit: "1GB" +``` + +### Final checks + +At the end of deployment, TPA will check that the server has FIPS +enabled. + +### Exceptions + +TPA does not support pgBackRest as mentioned in the CIS specification. +Instead TPA installs Barman. + +TPA does not install and configure `set_user` as required by the CIS +specification. This is because preventing logon by the Postgres user +would leave TPA unable to connect to, and configure, the database. diff --git a/product_docs/docs/tpa/23/reference/efm.mdx b/product_docs/docs/tpa/23/reference/efm.mdx index 6f262752296..ef4eaf194f7 100644 --- a/product_docs/docs/tpa/23/reference/efm.mdx +++ b/product_docs/docs/tpa/23/reference/efm.mdx @@ -24,6 +24,18 @@ to the cluster. See the [EFM documentation](https://www.enterprisedb.com/docs/efm/latest/) for more details on EFM configuration. +## efm_user_password_encryption + +Must be either `scram-sha-256` or `md5` + +Set `efm_user_password_encryption` to control the `auth-method` for the +`efm` Postgres user's `auth-method` in `pg_hba.conf` as well as the algorithm +used when generating it's encrypted password. + +```yaml +efm_user_password_encryption: 'scram-sha-256' # or can be set to `md5` +``` + ## efm_conf_settings You can use `efm_conf_settings` to set any parameters, whether recognised diff --git a/product_docs/docs/tpa/23/reference/harp.mdx b/product_docs/docs/tpa/23/reference/harp.mdx index 281f9e5515a..27dfdc5a9ed 100644 --- a/product_docs/docs/tpa/23/reference/harp.mdx +++ b/product_docs/docs/tpa/23/reference/harp.mdx @@ -13,7 +13,7 @@ to `harp`, which is the default for BDR-Always-ON clusters. You must provide the `harp-manager` and `harp-proxy` packages. Please contact EDB to obtain access to these packages. -## Configuring HARP +## Variables for HARP configuration See the [HARP documentation](https://www.enterprisedb.com/docs/pgd/4/harp/04_configuration/) for more details on HARP configuration. @@ -41,6 +41,7 @@ for more details on HARP configuration. | `harp_proxy_max_client_conn` | `75` | Maximum number of client connections accepted by harp-proxy (`max_client_conn`) | | `harp_ssl_password_command` | None | a custom command that should receive the obfuscated sslpassword in the stdin and provide the handled sslpassword via stdout. | | `harp_db_request_timeout` | `10s` | similar to dcs -> request_timeout, but for connection to the database itself. | +| `harp_local_etcd_only` | None | limit harp manager endpoints list to only contain the local etcd node instead of all etcd nodes | You can use the [harp-config hook](../tpaexec-hooks/#harp-config) @@ -114,7 +115,7 @@ provide api endpoints to monitor service's health. The variable can contain these keys: -``` +```yaml enable: false secure: false cert_file: "/etc/tpa/harp_proxy/harp_proxy.crt" diff --git a/product_docs/docs/tpa/23/reference/pgbouncer.mdx b/product_docs/docs/tpa/23/reference/pgbouncer.mdx index 567d840caac..8e36b4528d6 100644 --- a/product_docs/docs/tpa/23/reference/pgbouncer.mdx +++ b/product_docs/docs/tpa/23/reference/pgbouncer.mdx @@ -1,16 +1,28 @@ --- -description: Adding pgbouncer to your Postgres cluster. -title: Configuring pgbouncer +description: Adding PgBouncer to your Postgres cluster. +title: Configuring PgBouncer originalFilePath: pgbouncer.md --- -TPA will install and configure pgbouncer on instances whose `role` +TPA will install and configure PgBouncer on instances whose `role` contains `pgbouncer`. -By default, pgbouncer listens for connections on port 6432 and forwards -connections to `127.0.0.1:5432` (which may be either Postgres or -[haproxy](haproxy/), depending on the architecture). +By default, PgBouncer listens for connections on port 6432 and, if no +`pgbouncer_backend` is specified, forwards connections to +`127.0.0.1:5432` (which may be either Postgres or [haproxy](haproxy/), +depending on the architecture). + +!!!Note Using PgBouncer to route traffic to the primary + +If you are using the M1 architecture with repmgr you can set +`repmgr_redirect_pgbouncer: true` hash under `cluster_vars` to have +PgBouncer connections directed to the primary. The PgBouncer will be +automatically updated on failover to route to the new primary. You +should use this option in combination with setting `pgbouncer_backend` +to the primary instance name to ensure that the cluster is initially +deployed with PgBouncer configured to route to the primary. +!!! You can set the following variables on any `pgbouncer` instance. diff --git a/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx b/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx index 801d59d2ece..08c3fb4b241 100644 --- a/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx +++ b/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx @@ -24,7 +24,7 @@ are supported. container of the target operating system and uses that system's package manager to resolve dependencies and download all necessary packages. The required Docker setup for download-packages is the same as that for - [using Docker as a deployment platform](../platform-docker). + [using Docker as a deployment platform](../platform-docker/). ## Usage diff --git a/product_docs/docs/tpa/23/rel_notes/index.mdx b/product_docs/docs/tpa/23/rel_notes/index.mdx index 9f2811615ed..2eddffa4239 100644 --- a/product_docs/docs/tpa/23/rel_notes/index.mdx +++ b/product_docs/docs/tpa/23/rel_notes/index.mdx @@ -1,61 +1,65 @@ --- title: Trusted Postgres Architect release notes -navTitle: "Release notes" +navTitle: Release notes +description: Release notes for Trusted Postgres Architect and later navigation: - - tpa_23.34.1_rel_notes - - tpa_23.34_rel_notes - - tpa_23.33_rel_notes - - tpa_23.32_rel_notes - - tpa_23.31_rel_notes - - tpa_23.30_rel_notes - - tpa_23.29_rel_notes - - tpa_23.28_rel_notes - - tpa_23.27_rel_notes - - tpa_23.26_rel_notes - - tpa_23.25_rel_notes - - tpa_23.24_rel_notes - - tpa_23.23_rel_notes - - tpa_23.22_rel_notes - - tpa_23.21_rel_notes - - tpa_23.20_rel_notes - - tpa_23.19_rel_notes - - tpa_23.18_rel_notes - - tpa_23.17_rel_notes - - tpa_23.16_rel_notes - - tpa_23.15_rel_notes - - tpa_23.14_rel_notes - - tpa_23.13_rel_notes - - tpa_23.12_rel_notes - - tpa_23.1-11_rel_notes + - tpa_23.35.0_rel_notes + - tpa_23.34.1_rel_notes + - tpa_23.34_rel_notes + - tpa_23.33_rel_notes + - tpa_23.32_rel_notes + - tpa_23.31_rel_notes + - tpa_23.30_rel_notes + - tpa_23.29_rel_notes + - tpa_23.28_rel_notes + - tpa_23.27_rel_notes + - tpa_23.26_rel_notes + - tpa_23.25_rel_notes + - tpa_23.24_rel_notes + - tpa_23.23_rel_notes + - tpa_23.22_rel_notes + - tpa_23.21_rel_notes + - tpa_23.20_rel_notes + - tpa_23.19_rel_notes + - tpa_23.18_rel_notes + - tpa_23.17_rel_notes + - tpa_23.16_rel_notes + - tpa_23.15_rel_notes + - tpa_23.14_rel_notes + - tpa_23.13_rel_notes + - tpa_23.12_rel_notes + - tpa_23.1-11_rel_notes --- -The Trusted Postgres Architect documentation describes the latest version of Trusted Postgres Architect 23. -| Version | Release date | -| ---------------------------- | ------------ | -| [23.34.1](tpa_23.34.1_rel_notes) | 09 Sep 2024 | -| [23.34](tpa_23.34_rel_notes) | 22 Aug 2024 | -| [23.33](tpa_23.33_rel_notes) | 24 Jun 2024 | -| [23.32](tpa_23.32_rel_notes) | 15 May 2024 | -| [23.31](tpa_23.31_rel_notes) | 19 Mar 2024 | -| [23.30](tpa_23.30_rel_notes) | 19 Mar 2024 | -| [23.29](tpa_23.29_rel_notes) | 15 Feb 2024 | -| [23.28](tpa_23.28_rel_notes) | 23 Jan 2024 | -| [23.27](tpa_23.27_rel_notes) | 19 Dec 2023 | -| [23.26](tpa_23.26_rel_notes) | 30 Nov 2023 | -| [23.25](tpa_23.25_rel_notes) | 14 Nov 2023 | -| [23.24](tpa_23.24_rel_notes) | 17 Oct 2023 | -| [23.23](tpa_23.23_rel_notes) | 21 Sep 2023 | -| [23.22](tpa_23.22_rel_notes) | 06 Sep 2023 | -| [23.21](tpa_23.21_rel_notes) | 05 Sep 2023 | -| [23.20](tpa_23.20_rel_notes) | 01 Aug 2023 | -| [23.19](tpa_23.19_rel_notes) | 12 Jul 2023 | -| [23.18](tpa_23.18_rel_notes) | 23 May 2023 | -| [23.17](tpa_23.17_rel_notes) | 10 May 2023 | -| [23.16](tpa_23.16_rel_notes) | 21 Mar 2023 | -| [23.15](tpa_23.15_rel_notes) | 15 Mar 2023 | -| [23.14](tpa_23.14_rel_notes) | 23 Feb 2023 | -| [23.13](tpa_23.13_rel_notes) | 22 Feb 2023 | -| [23.12](tpa_23.12_rel_notes) | 21 Feb 2023 | -| [23.1-11](tpa_23.1-11_rel_notes)| - | +The Trusted Postgres Architect documentation describes the latest version of Trusted Postgres Architect 23. + +| Trusted Postgres Architect version | Release Date | +|---|---| +| [23.35.0](./tpa_23.35.0_rel_notes) | 25 Nov 2024 | +| [23.34.1](./tpa_23.34.1_rel_notes) | 09 Sep 2024 | +| [23.34](./tpa_23.34_rel_notes) | 22 Aug 2024 | +| [23.33](./tpa_23.33_rel_notes) | 24 Jun 2024 | +| [23.32](./tpa_23.32_rel_notes) | 15 May 2024 | +| [23.31](./tpa_23.31_rel_notes) | 19 Mar 2024 | +| [23.30](./tpa_23.30_rel_notes) | 19 Mar 2024 | +| [23.29](./tpa_23.29_rel_notes) | 15 Feb 2024 | +| [23.28](./tpa_23.28_rel_notes) | 23 Jan 2024 | +| [23.27](./tpa_23.27_rel_notes) | 19 Dec 2023 | +| [23.26](./tpa_23.26_rel_notes) | 30 Nov 2023 | +| [23.25](./tpa_23.25_rel_notes) | 14 Nov 2023 | +| [23.24](./tpa_23.24_rel_notes) | 17 Oct 2023 | +| [23.23](./tpa_23.23_rel_notes) | 21 Sep 2023 | +| [23.22](./tpa_23.22_rel_notes) | 06 Sep 2023 | +| [23.21](./tpa_23.21_rel_notes) | 05 Sep 2023 | +| [23.20](./tpa_23.20_rel_notes) | 01 Aug 2023 | +| [23.19](./tpa_23.19_rel_notes) | 12 Jul 2023 | +| [23.18](./tpa_23.18_rel_notes) | 23 May 2023 | +| [23.17](./tpa_23.17_rel_notes) | 10 May 2023 | +| [23.16](./tpa_23.16_rel_notes) | 21 Mar 2023 | +| [23.15](./tpa_23.15_rel_notes) | 15 Mar 2023 | +| [23.14](./tpa_23.14_rel_notes) | 23 Feb 2023 | +| [23.13](./tpa_23.13_rel_notes) | 22 Feb 2023 | +| [23.12](./tpa_23.12_rel_notes) | 21 Feb 2023 | +| [23.1-11](./tpa_23.1-11_rel_notes) | 21 Jun 2023 to 31 Jan 2023 | diff --git a/product_docs/docs/tpa/23/rel_notes/src/meta.yml b/product_docs/docs/tpa/23/rel_notes/src/meta.yml new file mode 100644 index 00000000000..6a6b069cdad --- /dev/null +++ b/product_docs/docs/tpa/23/rel_notes/src/meta.yml @@ -0,0 +1,65 @@ +product: Trusted Postgres Architect +shortname: tpa +title: Trusted Postgres Architect release notes +description: Release notes for Trusted Postgres Architect and later +columns: +- 0: + label: "Trusted Postgres Architect version" + key: version-link +- 1: + label: Release Date + key: shortdate +intro: | + The Trusted Postgres Architect documentation describes the latest version of Trusted Postgres Architect 23. +precursor: +- version: "23.34.1" + date: 09 Sep 2024 +- version: "23.34" + date: 22 Aug 2024 +- version: "23.33" + date: 24 Jun 2024 +- version: "23.32" + date: 15 May 2024 +- version: "23.31" + date: 19 Mar 2024 +- version: "23.30" + date: 19 Mar 2024 +- version: "23.29" + date: 15 Feb 2024 +- version: "23.28" + date: 23 Jan 2024 +- version: "23.27" + date: 19 Dec 2023 +- version: "23.26" + date: 30 Nov 2023 +- version: "23.25" + date: 14 Nov 2023 +- version: "23.24" + date: 17 Oct 2023 +- version: "23.23" + date: 21 Sep 2023 +- version: "23.22" + date: 06 Sep 2023 +- version: "23.21" + date: 05 Sep 2023 +- version: "23.20" + date: 01 Aug 2023 +- version: "23.19" + date: 12 Jul 2023 +- version: "23.18" + date: 23 May 2023 +- version: "23.17" + date: 10 May 2023 +- version: "23.16" + date: 21 Mar 2023 +- version: "23.15" + date: 15 Mar 2023 +- version: "23.14" + date: 23 Feb 2023 +- version: "23.13" + date: 22 Feb 2023 +- version: "23.12" + date: 21 Feb 2023 +- version: "23.1-11" + date: 21 Jun 2023 to 31 Jan 2023 + diff --git a/product_docs/docs/tpa/23/rel_notes/src/tpa_23.35.0_rel_notes.yml b/product_docs/docs/tpa/23/rel_notes/src/tpa_23.35.0_rel_notes.yml new file mode 100644 index 00000000000..ceef28c7ea8 --- /dev/null +++ b/product_docs/docs/tpa/23/rel_notes/src/tpa_23.35.0_rel_notes.yml @@ -0,0 +1,283 @@ +product: Trusted Postgres Architect +version: 23.35.0 +date: 25 November 2024 +intro: | + New features, enhancements, bug fixes, and other changes in Trusted Postgres Architect 23.35.0 include the following: +highlights: | + - Options for STIG/CIS compliance. + - Support for PGD Lightweight architecture + - Postgis is now a recognized extension. + - Docker `configure` creates named networks with static IP addresses. + - Support for RedHat Enterprise Linux 9 for ARM architectures. + - Support for PostgreSQL, EDB Postgres Extended, and EDB Postgres Advanced Server 17. +relnotes: + - details: >- + `PermissionsStartOnly` has been deprecated and is now achieved via + `ExecStartPost=+/bin/bash...` syntax + impact: low + jira: + - TPA-762 + relnote: Remove deprecated `PermissionStartOnly` in postgres.service.j2 template + type: Change + - details: >- + Fixed a bug whereby the test that ensures the current pgd-proxy + configuration matches the expected configuration would fail for version < + 5.5.0. This fix ensures that TPA won't try to query configuration keys + added in version 5.5.0. + impact: low + jira: + - TPA-819 + relnote: Fix tpaexec test for pgd-proxy config verification + type: Bug Fix + - details: >- + The PostGIS package will automatically be added when a user specifies + `postgis` as an entry in either `postgres_extensions` or the list of + extensions named under `postgres_databases`. Also enables the CRB (Code + Ready Builder) repository for RHEL-compatible distributions so PostGIS + dependencies can be installed. + impact: low + jira: + - TPA-771 + relnote: Add `postgis` to list of recognized extensions + type: Enhancement + - details: >- + Certain required privileges are granted to Postgres role, `barman_role`, + which is then granted to the `barman` Postgres user. This avoids creating + the `barman` user as a superuser. This role can also be granted to other + Postgres users by adding it to their `granted_roles` list using + `postgres/createuser`. The `barman_role` is created as part of the Barman + tasks; if Barman is not used, this role will not be created. Therefore, + the task that grants privileges to this role is only executed if the + `barman_role` username is in the list of Postgres users that are created. + The 'barman' user now has `NOSUPERUSER` explicitly specified as a role + attribute. If a cluster was deployed with a previous TPA version (which + created the 'barman' user as a superuser), deploying with this version + will remove the `superuser` role attribute from the `barman` user. + impact: low + jira: + - TPA-148 + - TPA-818 + relnote: The `barman` Postgres user is no longer a superuser + type: Change + - details: >- + Add new optional var `harp_local_etcd_only` available when using etcd with + HARP. This option tells HARP manager to connect to local etcd node. This + recommendation follows the best practices learnt by doing the same when + `bdr` as consensus procotol is being used. The default mode of adding + multiple endpoints can lead to performance issues in some cases. This + option is added to give more control to the user. + impact: low + jira: + - TPA-821 + relnote: Add new option `harp_local_etcd_only` when using etcd with HARP + type: Change + - details: >- + A `primary_slot_name` is configured on the primary node to ensure the old + primary uses a physical slot for replication during an EFM switchover. + However, 'bdr_init_physical' attempts to use it for node initialisation + and hangs indefinitely since the slot does not exist in a PGD + installation. This `primary_slot_name` is now conditionally set explicitly + when the `failover_manager` is EFM to avoid setting it unnecessarily. + impact: low + jira: + - TPA-712 + relnote: >- + Fix case where `primary_slot_name` added for EFM compatibility interferes + with `bdr_init_physical` + type: Bug Fix + - details: >- + If the `pgdcli_package_version` is specified in `config.yml`, the + `bash-completion` package is incorrectly named because the `packages_for` + filter erroneously appends the `pgdcli_package_version` to the package + name. This results in an attempt to download a nonexistant package. The + `bash-completion` package is now appended to the list after the + `packages_for` filter, since it's version is independent from the + `pgdcli_package_version`. + impact: low + jira: + - TPA-794 + relnote: Download correct `bash-completion` package version + type: Bug Fix + - details: >- + TPA is now able to generate a PGD Lightweight architecture comprised of + three nodes in two locations (2 nodes in Primary and one in Disaster + Recovery) designed to ease migrations from physical replication. Users can + now run `tpaexec configure lw -a Lightweight --postgresql 15`. + impact: medium + jira: + - TPA-838 + relnote: Add support for PGD Lightweight architecture + type: Enhancement + - details: >- + TPA now clears the error message stack after each task to ensure messages + are not spuriously repeated + impact: low + jira: + - TPA-812 + relnote: >- + Fix an issue whereby in some cases error messages would be repeated even + after successful tasks. + type: Bug Fix + - details: >- + Improve postgres-monitor script to better manage recoverable errors and + add retries on network errors to ensure that it won't return failure when + it just didn't allow enough time for postgres service to be fully started. + impact: low + jira: + - TPA-796 + relnote: Improve postgres-monitor script + type: Change + - details: >- + Fixed an issue whereby new replicas in Patroni clusters would fail with + errors related to replication slots. + impact: low + jira: + - TPA-792 + - TPA-781 + relnote: Fix issue that prevented the addition of replicas to Patroni clusters + type: Bug Fix + - details: >- + Previously the `pemserver` and `barman` nodes were added to the `Allowed + node host list` in EFM when they were not relevant to EFM functions. + Refactored the task that writes the `efm.node` configuration to only + include those nodes that have `efm` in their list of roles. + impact: low + jira: + - TPA-817 + relnote: Only add nodes with `efm` role to cluster `efm.nodes` file + type: Change + - details: >- + If `--enable-pem` and `--enable-pg-backup-api` are passed to `tpaexec + configure`, `pem-agent` is added twice to the `barman` node if it is also + a `witness`. Fixed by consolidating both `if` statements together to only + evaluate the conditions once. + impact: low + jira: + - TPA-793 + relnote: Add `pem-agent` role on barman nodes at most once for M1 architecture + type: Bug Fix + - details: >- + Fixed a bug whereby if the user excluded the `pkg` selector, later + PEM-related tasks would fail because the `pem_python_executable` fact had + not been set. + impact: low + jira: + - TPA-814 + relnote: Set `pem_python_executable` outside of the `pkg` role + type: Bug Fix + - details: >- + The `--efm-install-path` and `--efm-cluster-name` flags are set when a PEM + server is registered on an EFM node. The `Streaming Replication`, + `Failover Manager Node Status` and `Failover Manager Cluster Info` probes + are enabled when a PEM agent is registered on an EFM node. + impact: low + jira: + - TPA-586 + relnote: Enable EFM probes when a PEM agent is registered on an EFM node + type: Enhancement + - details: >- + TPA now supports command-line options to create a cluster configured to + conform to many of the requirements of the STIG and CIS security + standards. These options cause TPA to set postgresql.conf settings as + defined in the relevant standards, to install required extensions, to + configure other aspects of system behaviour such as filesystem permissions + and user connection limits, and to check for other requirements such as + FIPS crypto standards which TPA can't directly impose. The clusters thus + generated are not certified by TPA to conform to the standards, but much + of the groundwork of creating a conforming cluster is now automated. + impact: high + jira: + - TPA-366 + - TPA-836 + - TPA-837 + relnote: Support STIG/CIS compliance + type: Enhancement + - details: >- + The configure command will now automatically add a named network and + static IP addresses to config.yml when Docker is the selected platform. + The network name is the same as the cluster name and the address range + follows the existing semantics of the --network option with the exception + that only one subnet is used for the whole cluster rather than one per + location. If a subnet prefix is not specified by the user, TPA will + attempt to select a prefix which results in a subnet large enough to fit + the whole cluster. The key `ip_address` may now be used to specify a + static IP for a Docker instance as long as a named network is specified in + the config.yml. + impact: medium + jira: + - TPA-261 + - TPA-407 + - TPA-434 + relnote: Have `configure` create a user-defined network on Docker + type: Enhancement + - details: >- + Packages are now published targeting RHEL 9 ARM64, and TPA supports + deployments using this architecture and OS. Also updated the list of + supported AWS images to include the RedHat 9 ARM64 AMI provided by Amazon. + The default `instance_type` for ARM64 EC2 instances has been updated from + `a1` to `t4g`, which is the current generation processor available for + burstable general purpose workloads. + impact: low + jira: + - TPA-780 + relnote: Support RedHat Enterprise Linux 9 for ARM architectures + type: Enhancement + - details: >- + Clusters can be configured to use PostgreSQL, EDB Postgres Extended and + EDB Postgres Advanced Server version 17. Barman no longer needs to install + the postgres server package to get the `pg_receivewal` binary when using + EDB Postgres Advanced Server 17 or EDB Postgres Extended 17 since the + binary has been added to the client package for these versions. TPA raises + an architecture error when a cluster is configured with `repmgr` as the + failover_manager as it is not available for Postgres 17. Updated + documentation to reflect supported versions. + impact: low + jira: + - TPA-803 + relnote: >- + Support PostgreSQL, EDB Postgres Extended, and EDB Postgres Advanced + Server 17 + type: Enhancement + - details: >- + When using an existing Barman node as a backup node in a new cluster, + users can set `barman_shared: true` in the Barman instance's vars with the + platform set to `bare` and other information supplied as usual for bare + instances. This change allows TPA to skip some configuration steps that + would otherwise fail due to usermod issues, as the Barman user already has + running processes from previous deployments. The shared Barman instance is + treated as a bare instance, so the required access, including the Barman + user's access to the target PostgreSQL instances, must be already in + place. Copying the Barman user's keys from the original cluster to the new + cluster can be used to achieve this, see the Barman section of the TPA + documentation for detailed information. + impact: medium + jira: + - TPA-777 + relnote: >- + Added experimental support for using an existing Barman node as backup + node in new cluster + type: Enhancement + - details: >- + Expose a configurable `efm_user_password_encryption` variable which should + be set to either `'md5'` or `'scram-sha-256'` depending on user + requirements. This controls the `auth-method` for the `efm` Postgres user + in `pg_hba.conf` and the algorithm used for generating it's encrypted + password. In clusters deployed with `compliance` configured to `stig`, the + 'efm' Postgres user's `auth-method` in `pg_hba.conf` will be set to + `scram-sha-256` since FIPS-enabled operating systems do not allow `md5` to + be used. + impact: low + jira: + - TPA-832 + - TPA-836 + relnote: Make `password_encryption` algorithm for `efm` Postgres user configurable. + type: Enhancement + - details: >- + When using the `--hostnames-from` option to `tpaexec configure`, you can + now include two ip addresses on each line, which will be included in the + generated config.yml as public_ip and private_ip. + impact: low + jira: + - TPA-841 + relnote: Allow multiple addresses to be supplied with hostnames + type: Enhancement \ No newline at end of file diff --git a/product_docs/docs/tpa/23/rel_notes/tpa_23.35.0_rel_notes.mdx b/product_docs/docs/tpa/23/rel_notes/tpa_23.35.0_rel_notes.mdx new file mode 100644 index 00000000000..161be7b308e --- /dev/null +++ b/product_docs/docs/tpa/23/rel_notes/tpa_23.35.0_rel_notes.mdx @@ -0,0 +1,58 @@ +--- +title: Trusted Postgres Architect 23.35.0 release notes +navTitle: Version 23.35.0 +--- + +Released: 25 November 2024 + +New features, enhancements, bug fixes, and other changes in Trusted Postgres Architect 23.35.0 include the following: + +## Highlights + +- Options for STIG/CIS compliance. +- Support for PGD Lightweight architecture +- Postgis is now a recognized extension. +- Docker `configure` creates named networks with static IP addresses. +- Support for RedHat Enterprise Linux 9 for ARM architectures. +- Support for PostgreSQL, EDB Postgres Extended, and EDB Postgres Advanced Server 17. + +## Enhancements + + + + + + + + + + + + +
DescriptionAddresses
Support STIG/CIS compliance

TPA now supports command-line options to create a cluster configured to conform to many of the requirements of the STIG and CIS security standards. These options cause TPA to set postgresql.conf settings as defined in the relevant standards, to install required extensions, to configure other aspects of system behaviour such as filesystem permissions and user connection limits, and to check for other requirements such as FIPS crypto standards which TPA can't directly impose. The clusters thus generated are not certified by TPA to conform to the standards, but much of the groundwork of creating a conforming cluster is now automated.

Add support for PGD Lightweight architecture

TPA is now able to generate a PGD Lightweight architecture comprised of three nodes in two locations (2 nodes in Primary and one in Disaster Recovery) designed to ease migrations from physical replication. Users can now run tpaexec configure lw -a Lightweight --postgresql 15.

Have configure create a user-defined network on Docker

The configure command will now automatically add a named network and static IP addresses to config.yml when Docker is the selected platform. The network name is the same as the cluster name and the address range follows the existing semantics of the --network option with the exception that only one subnet is used for the whole cluster rather than one per location. If a subnet prefix is not specified by the user, TPA will attempt to select a prefix which results in a subnet large enough to fit the whole cluster. The key ip_address may now be used to specify a static IP for a Docker instance as long as a named network is specified in the config.yml.

Added experimental support for using an existing Barman node as backup node in new cluster

When using an existing Barman node as a backup node in a new cluster, users can set barman_shared: true in the Barman instance's vars with the platform set to bare and other information supplied as usual for bare instances. This change allows TPA to skip some configuration steps that would otherwise fail due to usermod issues, as the Barman user already has running processes from previous deployments. The shared Barman instance is treated as a bare instance, so the required access, including the Barman user's access to the target PostgreSQL instances, must be already in place. Copying the Barman user's keys from the original cluster to the new cluster can be used to achieve this, see the Barman section of the TPA documentation for detailed information.

Add postgis to list of recognized extensions

The PostGIS package will automatically be added when a user specifies postgis as an entry in either postgres_extensions or the list of extensions named under postgres_databases. Also enables the CRB (Code Ready Builder) repository for RHEL-compatible distributions so PostGIS dependencies can be installed.

Enable EFM probes when a PEM agent is registered on an EFM node

The --efm-install-path and --efm-cluster-name flags are set when a PEM server is registered on an EFM node. The Streaming Replication, Failover Manager Node Status and Failover Manager Cluster Info probes are enabled when a PEM agent is registered on an EFM node.

Support RedHat Enterprise Linux 9 for ARM architectures

Packages are now published targeting RHEL 9 ARM64, and TPA supports deployments using this architecture and OS. Also updated the list of supported AWS images to include the RedHat 9 ARM64 AMI provided by Amazon. The default instance_type for ARM64 EC2 instances has been updated from a1 to t4g, which is the current generation processor available for burstable general purpose workloads.

Support PostgreSQL, EDB Postgres Extended, and EDB Postgres Advanced Server 17

Clusters can be configured to use PostgreSQL, EDB Postgres Extended and EDB Postgres Advanced Server version 17. Barman no longer needs to install the postgres server package to get the pg_receivewal binary when using EDB Postgres Advanced Server 17 or EDB Postgres Extended 17 since the binary has been added to the client package for these versions. TPA raises an architecture error when a cluster is configured with repmgr as the failover_manager as it is not available for Postgres 17. Updated documentation to reflect supported versions.

Make password_encryption algorithm for efm Postgres user configurable.

Expose a configurable efm_user_password_encryption variable which should be set to either 'md5' or 'scram-sha-256' depending on user requirements. This controls the auth-method for the efm Postgres user in pg_hba.conf and the algorithm used for generating it's encrypted password. In clusters deployed with compliance configured to stig, the 'efm' Postgres user's auth-method in pg_hba.conf will be set to scram-sha-256 since FIPS-enabled operating systems do not allow md5 to be used.

Allow multiple addresses to be supplied with hostnames

When using the --hostnames-from option to tpaexec configure, you can now include two ip addresses on each line, which will be included in the generated config.yml as public_ip and private_ip.

+ + +## Changes + + + + + + + +
DescriptionAddresses
Remove deprecated PermissionStartOnly in postgres.service.j2 template

PermissionsStartOnly has been deprecated and is now achieved via ExecStartPost=+/bin/bash... syntax

The barman Postgres user is no longer a superuser

Certain required privileges are granted to Postgres role, barman_role, which is then granted to the barman Postgres user. This avoids creating the barman user as a superuser. This role can also be granted to other Postgres users by adding it to their granted_roles list using postgres/createuser. The barman_role is created as part of the Barman tasks; if Barman is not used, this role will not be created. Therefore, the task that grants privileges to this role is only executed if the barman_role username is in the list of Postgres users that are created. The 'barman' user now has NOSUPERUSER explicitly specified as a role attribute. If a cluster was deployed with a previous TPA version (which created the 'barman' user as a superuser), deploying with this version will remove the superuser role attribute from the barman user.

Add new option harp_local_etcd_only when using etcd with HARP

Add new optional var harp_local_etcd_only available when using etcd with HARP. This option tells HARP manager to connect to local etcd node. This recommendation follows the best practices learnt by doing the same when bdr as consensus procotol is being used. The default mode of adding multiple endpoints can lead to performance issues in some cases. This option is added to give more control to the user.

Improve postgres-monitor script

Improve postgres-monitor script to better manage recoverable errors and add retries on network errors to ensure that it won't return failure when it just didn't allow enough time for postgres service to be fully started.

Only add nodes with efm role to cluster efm.nodes file

Previously the pemserver and barman nodes were added to the Allowed node host list in EFM when they were not relevant to EFM functions. Refactored the task that writes the efm.node configuration to only include those nodes that have efm in their list of roles.

+ + +## Bug Fixes + + + + + + + + + +
DescriptionAddresses
Fix tpaexec test for pgd-proxy config verification

Fixed a bug whereby the test that ensures the current pgd-proxy configuration matches the expected configuration would fail for version < 5.5.0. This fix ensures that TPA won't try to query configuration keys added in version 5.5.0.

Fix case where primary_slot_name added for EFM compatibility interferes with bdr_init_physical

A primary_slot_name is configured on the primary node to ensure the old primary uses a physical slot for replication during an EFM switchover. However, 'bdr_init_physical' attempts to use it for node initialisation and hangs indefinitely since the slot does not exist in a PGD installation. This primary_slot_name is now conditionally set explicitly when the failover_manager is EFM to avoid setting it unnecessarily.

Download correct bash-completion package version

If the pgdcli_package_version is specified in config.yml, the bash-completion package is incorrectly named because the packages_for filter erroneously appends the pgdcli_package_version to the package name. This results in an attempt to download a nonexistant package. The bash-completion package is now appended to the list after the packages_for filter, since it's version is independent from the pgdcli_package_version.

Fix an issue whereby in some cases error messages would be repeated even after successful tasks.

TPA now clears the error message stack after each task to ensure messages are not spuriously repeated

Fix issue that prevented the addition of replicas to Patroni clusters

Fixed an issue whereby new replicas in Patroni clusters would fail with errors related to replication slots.

Add pem-agent role on barman nodes at most once for M1 architecture

If --enable-pem and --enable-pg-backup-api are passed to tpaexec configure, pem-agent is added twice to the barman node if it is also a witness. Fixed by consolidating both if statements together to only evaluate the conditions once.

Set pem_python_executable outside of the pkg role

Fixed a bug whereby if the user excluded the pkg selector, later PEM-related tasks would fail because the pem_python_executable fact had not been set.

+ + diff --git a/product_docs/docs/tpa/23/tpaexec-configure.mdx b/product_docs/docs/tpa/23/tpaexec-configure.mdx index a7988cc550c..98374c8dee3 100644 --- a/product_docs/docs/tpa/23/tpaexec-configure.mdx +++ b/product_docs/docs/tpa/23/tpaexec-configure.mdx @@ -118,18 +118,34 @@ configuration. You must edit config.yml to specify multiple regions. ### Network configuration +!!!Note + +These options are not meaningful for the "bare" platform, where +TPA will not alter the network configuration of existing servers. +!!! + By default, each cluster will be configured with a number of randomly selected `/28` subnets from the CIDR range `10.33.0.0/16`, depending on the selected architecture. Specify `--network 192.168.0.0/16` to assign subnets from a different network. - -**Note:** On AWS clusters, this corresponds to the VPC CIDR. +On AWS clusters, this corresponds to the VPC CIDR. See [aws](platform-aws/#vpc-required) documentation for details. Specify `--subnet-prefix 26` to assign subnets of a different size, /26 instead of /28 in this case. +!!!Note + +When the "docker" platform is selected, TPA will always place the +entire cluster in a single subnet regardless of the architecture. This +subnet is generated according to the logic described here with the +exception that if the `subnet-prefix` is not specified, TPA will +automatically select a subnet size large enough to accomodate the number +of instances in +`config.yaml`. +!!! + Specify `--no-shuffle-subnets` to allocate subnets from the start of the network CIDR range, without randomisation, e.g. `10.33.0.0/28`, then `10.33.0.16/28` and so on. @@ -138,9 +154,6 @@ Specify `--exclude-subnets-from ` to exclude subnets that are already used in existing cluster config.yml files. You can specify this argument multiple times for each directory. -**Note:** These options are not meaningful for the "bare" platform, where -TPA will not alter the network configuration of existing servers. - ### Instance type Specify `--instance-type ` to select an instance type. @@ -173,6 +186,8 @@ with one name per line. The file must contain at least as many valid hostnames as there are instances in your cluster. Each line may contain an optional IP address after the name; if present, this address will be set as the `ip_address` for the corresponding instance in `config.yml`. +If two ip addresses are present, the first will be set as `public_ip` +and the second as `private_ip`. Use `--hostnames-pattern '…pattern…'` to limit the selection to lines matching an egrep pattern. @@ -247,7 +262,7 @@ details. #### Postgres flavour and version TPA supports PostgreSQL, EDB Postgres Extended, and EDB Postgres -Advanced Server (EPAS) versions 11 through 16. +Advanced Server (EPAS) versions 11 through 17. You must specify both the flavour (or distribution) and major version of Postgres to install, for example: @@ -437,6 +452,15 @@ cluster folder to a different tpa host, ensure that you export the associated vault password on the new machine's system keyring. vault password can be displayed via `tpaexec show-vault `. +## Security standards compliance + +Use the `--compliance stig` or `--compliance cis` options to generate +a cluster with configuration suitable for complying with the STIG or CIS +standard. See [Compliance](reference/compliance/) for details. Note that these +options do not guarantee that the cluster fulfills the relevant +standard; they only cause TPA to generate a configuration designed to +comply with those aspects of the standard that can be controlled by TPA. + ## Examples Let's see what happens when we run the following command: diff --git a/product_docs/docs/tpa/23/tpaexec-hooks.mdx b/product_docs/docs/tpa/23/tpaexec-hooks.mdx index 5fccd1f9597..d9df23785c8 100644 --- a/product_docs/docs/tpa/23/tpaexec-hooks.mdx +++ b/product_docs/docs/tpa/23/tpaexec-hooks.mdx @@ -1,6 +1,6 @@ --- navTitle: Deployment hooks -description: Extending TPA with hooks to execute arbitrary Anisible tasks. +description: Extending TPA with hooks to execute arbitrary Ansible tasks. title: TPA hooks originalFilePath: tpaexec-hooks.md diff --git a/scripts/pdf/cleanup_combined_markdown.mjs b/scripts/pdf/cleanup_combined_markdown.mjs index 5b20087a43d..8573949eca3 100644 --- a/scripts/pdf/cleanup_combined_markdown.mjs +++ b/scripts/pdf/cleanup_combined_markdown.mjs @@ -233,7 +233,7 @@ function cleanup() { node.value = code; const siblings = ancestors[ancestors.length - 1].children; const idx = siblings.indexOf(node); - siblings.splice(idx + 1, 0, { type: "code", value: output }); + siblings.splice(idx + 1, 0, { type: "code", lang: "output", value: output }); return idx + 2; } } diff --git a/scripts/pdf/generate_pdf.py b/scripts/pdf/generate_pdf.py index 15dd15bf2b8..8ca46c028f4 100755 --- a/scripts/pdf/generate_pdf.py +++ b/scripts/pdf/generate_pdf.py @@ -49,6 +49,7 @@ def main(args): "--from=gfm", "--self-contained", "--highlight-style=tango", + f"--include-in-header={BASE_DIR / 'pdf-script.html'}", f"--css={BASE_DIR / 'pdf-styles.css'}", f"--resource-path={':'.join((str(p) for p in resource_search_paths))}", f"--output={html_file}", diff --git a/scripts/pdf/pdf-script.html b/scripts/pdf/pdf-script.html new file mode 100644 index 00000000000..ae0e78699d7 --- /dev/null +++ b/scripts/pdf/pdf-script.html @@ -0,0 +1,10 @@ + + + diff --git a/scripts/pdf/pdf-styles.css b/scripts/pdf/pdf-styles.css index bcd89935b6e..77b25620939 100644 --- a/scripts/pdf/pdf-styles.css +++ b/scripts/pdf/pdf-styles.css @@ -116,10 +116,32 @@ pre code { background-color: #f8f8f8; /* match SourceCode pandoc highlighting colors */ } +div.sourceCode { margin-bottom: 2rem; } + @media screen { div.sourceCode { overflow: visible; } } +pre.output { + margin-top: -2rem; + border-top: 1px dotted black; +} + +pre.output, pre.output code { + background-color: black; + color: white; +} + +pre.output::before { + content: "output"; + display: block; + margin: -0.2rem -0.1rem 0.3rem; + border-radius: 0.2rem; + background-color: #f8f8f8; + color: black; + text-align: center; +} + /* Tables */ table { @@ -148,3 +170,46 @@ td { padding: 0.3rem 0; padding-right: 1.5rem; } + +/* + * details / summary + */ + +details hr { + display: none; +} + +summary { + display: block; +} + +summary::-webkit-details-marker { display: none } + +/* + * page break hints + * wkhtmltopdf doesn't support page-break-after: avoid, so using this hack: https://stackoverflow.com/questions/9238868/how-do-i-avoid-a-page-break-immediately-after-a-heading/53742871#53742871 + */ + +h1, +h2, +h3, +h4, +h5, +h6 { + page-break-inside: avoid; +} + +h1::after, h2::after, h3::after, h4::after, h5::after, h6::after { + content: ""; + display: block; + height: 8em; + margin-bottom: -8em; +} + +h2 { + page-break-before: always; +} + +p, tr, li, blockquote, pre { + page-break-inside: avoid; +} diff --git a/scripts/source/process-tpa-docs-auto.sh b/scripts/source/process-tpa-docs-auto.sh index cb853508d1b..4af57a39a13 100755 --- a/scripts/source/process-tpa-docs-auto.sh +++ b/scripts/source/process-tpa-docs-auto.sh @@ -44,7 +44,7 @@ node $DESTINATION_CHECKOUT/scripts/source/merge-indexes.mjs \ "$SOURCE_CHECKOUT/docs/src/reference/index.mdx" \ >> $SOURCE_CHECKOUT/files-to-ignore.txt -rsync -av --delete --exclude="*.md" --exclude="architectures" --exclude="templates" --exclude-from=$SOURCE_CHECKOUT/files-to-ignore.txt src/ $DESTINATION_CHECKOUT/product_docs/docs/tpa/$TPAVERSION/ +rsync -av --delete --exclude="*.md" --exclude="architectures" --exclude="templates" --exclude="rel_notes" --exclude-from=$SOURCE_CHECKOUT/files-to-ignore.txt src/ $DESTINATION_CHECKOUT/product_docs/docs/tpa/$TPAVERSION/ # Tidy up rm -rf $SOURCE_CHECKOUT diff --git a/src/components/icon/iconNames.js b/src/components/icon/iconNames.js index 0626110cef1..03065363fb3 100644 --- a/src/components/icon/iconNames.js +++ b/src/components/icon/iconNames.js @@ -101,7 +101,6 @@ const iconNames = { INSTANCES: "Instances", INTEGRATION: "Integration", KNIGHT: "Knight", - KUBERNETES: "Kubernetes", LAPTOP_CONFIG: "LaptopConfig", LEADER: "Leader", LEARNING: "Learning", @@ -129,7 +128,6 @@ const iconNames = { PLANNER: "Planner", PLAY_CIRCLE: "PlayCircle", PLUS: "Plus", - POSTGRESQL: "Postgresql", PREFERENCES: "Preferences", PRESENTATION: "Presentation", PROCESS: "Process", diff --git a/src/components/icon/index.js b/src/components/icon/index.js index c971507e7bf..d9334a242d9 100644 --- a/src/components/icon/index.js +++ b/src/components/icon/index.js @@ -7,13 +7,18 @@ import * as logosIcons from "@enterprisedb/icons/logos"; import * as ebd_postgres_aiIcons from "@enterprisedb/icons/edb_logos"; function IconContainer({ - circle, - circleClassName, - circleDiameter, - circleAutoMargin, + circle = false, + circleClassName = "", + circleDiameter = 100, + circleAutoMargin = true, iconName: name = "dottedbox", ...props }) { + props = Object.assign( + {}, + { className: "dottedbox", width: 100, height: 100 }, + props, + ); const iconNameParts = name.split("/"); const iconCategory = iconNameParts.length === 1 ? "" : iconNameParts[0]; const iconName = iconNameParts.length === 1 ? name : iconNameParts[1]; @@ -52,16 +57,6 @@ const Icon = ({ category, name, ...props }) => { return ; }; -IconContainer.defaultProps = { - className: "dottedbox", - circleClassName: "", - circleDiameter: 100, - circleAutoMargin: true, - circle: false, - width: 100, - height: 100, -}; - export { iconNames }; export default IconContainer; diff --git a/src/components/index.js b/src/components/index.js index c42a9e30b77..7ff8a74169c 100644 --- a/src/components/index.js +++ b/src/components/index.js @@ -20,6 +20,7 @@ import Logo from "./logo"; import MainContent from "./main-content"; import PdfDownload from "./pdf-download.js"; import PrevNext from "./prev-next"; +import PurlAnchor from "./purl-anchor"; import SearchNavigationLinks from "./search-navigation-links"; import SearchNavigation from "./search-navigation"; import SideNavigation from "./side-navigation"; @@ -55,6 +56,7 @@ export { MainContent, PdfDownload, PrevNext, + PurlAnchor, SearchNavigationLinks, SearchNavigation, SideNavigation, diff --git a/src/components/layout.js b/src/components/layout.js index c39446b5e30..a0c97a624d1 100644 --- a/src/components/layout.js +++ b/src/components/layout.js @@ -12,6 +12,7 @@ import { Link, StubCards, IconList, + PurlAnchor, } from "../components"; import { MDXProvider } from "@mdx-js/react"; import Icon from "../components/icon/"; @@ -111,9 +112,8 @@ const Layout = ({ h3: ( props, // eslint-disable-next-line jsx-a11y/heading-has-content ) =>

, - img: ( - props, // eslint-disable-next-line jsx-a11y/alt-text - ) => ( + img: (props) => ( + // eslint-disable-next-line jsx-a11y/alt-text { let upLink = prevNext.up; return ( -
+
{prevLink && ( // +// ...where will be a short, stable name for the product (e.g. "pgd", "upm", "epas"), +// and will be a meaningful description of the expected content being linked to. +// Both parameters must be composed of characters valid in URL fragments; invalid characters will *not* be escaped! +// I recommend you limit these parameters to words containing alphanumeric characters, separated by dashes. +// +// This component is intentionally bare-bones right now; it's a component for only two reasons +// 1. it performs transformation and some limited validation of the id (ensure that it fits with the scheme expected by the matching redirect in static/_redirects) +// 2. it makes the purpose of the anchor more obvious when scanning the source Markdown +// (and hopefully when reorganizing content) +// +// Note that you *must* list the path passed to this function in the frontmatter +// redirects section of the file where it appears; I may automate that or at least +// validate it at some point in the future if this sees enough use to make that useful. +// +const PurlAnchor = ({ urlPath }) => { + if (!urlPath?.replace) { + console.error("PurlAnchor requires a urlPath property"); + return; + } + + let hash = urlPath + .replace(/^\/?purl\/?/, "") + .split("/") + .filter((s) => s) + .join("_"); + + if (!/^\/purl\/[^/]+\/[^/]+/.test(urlPath)) + console.error( + `PurlAnchor given a badly-formatted URL path: ${urlPath}; format must be /purl// - anchor id will be ${hash}`, + ); + + // h/t https://stackoverflow.com/questions/26088849/url-fragment-allowed-characters/26119120#26119120 + if (!/^([-?/:@._~!$&'()*+,;=a-zA-Z0-9]|%[0-9a-fA-F]{2})*$/.test(hash)) + console.error( + `PurlAnchor given a badly-formatted URL path: ${urlPath}; this results in an anchor id of ${hash}, which is invalid as a URL fragment`, + ); + + return ; +}; + +function snapToNearestSection(node) { + if ( + !node || + decodeURIComponent(window?.location?.hash?.substring(1)) !== node.id + ) + return; + + // walk backwards and find nearest previous sibling which is a header with an id, then use *that* id + // this is a "nice to have", as it makes it more likely that both the URL and section + // are those defined by the document structure (sections) vs. arbitrary ids. + for (let next = node.previousSibling; next; next = next.previousSibling) { + const prevId = next.getAttribute && next.getAttribute("id"); + if (prevId && /H\d/.test(next.nodeName)) { + window.history.replaceState(null, null, "#" + encodeURIComponent(prevId)); + break; + } + } +} + +export default PurlAnchor; diff --git a/src/pages/index.js b/src/pages/index.js index abd5f6d3829..3195d64c595 100644 --- a/src/pages/index.js +++ b/src/pages/index.js @@ -139,21 +139,21 @@ const BannerCardLink = ({ to, className, children }) => ( ); const BannerIconDivider = ({ iconName, headingText }) => ( - +
  {headingText} - +
); const BannerDivider = ({ headingText }) => ( - +
{headingText} - +
); const Page = () => { @@ -173,7 +173,7 @@ const Page = () => {
{updates.slice(0, 2).map((update) => ( -
+
{ )}

-
+
{editTarget !== "none" && ( { ) : null} - + {body} {(!indexCards || indexCards === TileModes.None) && sections && ( @@ -289,7 +295,7 @@ const DocTemplate = ({ data, pageContext }) => { {showToc && ( - + )} diff --git a/src/templates/learn-doc.js b/src/templates/learn-doc.js index 1d2d3d19ccf..91132384f08 100644 --- a/src/templates/learn-doc.js +++ b/src/templates/learn-doc.js @@ -133,7 +133,7 @@ const LearnDocTemplate = ({ data, pageContext }) => { isIndexPage: isPathAnIndexPage(mdx.fileAbsolutePath), productVersions, }; - const { path, depth } = fields; + const { path } = fields; const showToc = !!mdx.tableOfContents.items && !frontmatter.hideToC; const showInteractiveBadge = @@ -212,17 +212,29 @@ const LearnDocTemplate = ({ data, pageContext }) => { )}

{title}

- {editOrFeedbackButton} +
{editOrFeedbackButton}
+ {frontmatter.displayBanner ? ( + + ) : null} + - + {mdx.body} {showToc && ( - + )} diff --git a/static/_redirects b/static/_redirects index b642d33aa87..540a03bffff 100644 --- a/static/_redirects +++ b/static/_redirects @@ -121,6 +121,7 @@ /docs/jdbc_connector/42.5.1.1/* /docs/jdbc_connector/latest/:splat 301 /docs/jdbc_connector/42.5.1.2/* /docs/jdbc_connector/latest/:splat 301 /docs/jdbc_connector/42.5.4.1/* /docs/jdbc_connector/latest/:splat 301 +/docs/jdbc_connector/42.7.3.1/* /docs/jdbc_connector/latest/:splat 301 # ODBC Connector # collapsed versions @@ -195,7 +196,6 @@ # BigAnimal /docs/edbcloud/* /docs/biganimal/:splat 301 /docs/biganimal/release/* /docs/biganimal/latest/:splat 302 # allow fine-grained redirects a shot at resolution first -/docs/biganimal/latest/* /docs/edb-postgres-ai/cloud-service/ 301 # catch-all # Language Pack breakout from EPAS /docs/epas/latest/language_pack/* /docs/language_pack/2/:splat 301 diff --git a/tools/automation/generators/advisoryindex/advisoryindex.js b/tools/automation/generators/advisoryindex/advisoryindex.js index c17b69b94be..e55f1b77d38 100755 --- a/tools/automation/generators/advisoryindex/advisoryindex.js +++ b/tools/automation/generators/advisoryindex/advisoryindex.js @@ -160,7 +160,8 @@ cvelist.forEach((cve) => { const assfiles = fs .readdirSync(assessmentsDir) .filter((fn) => fn.startsWith("cve") && fn.endsWith("mdx")); -assfiles.sort().reverse(); +// assfiles.sort();.reverse(); +assfiles.sort(); const asslist = assfiles.map((file) => { return file.replace(/\.[^/.]+$/, ""); }); diff --git a/tools/automation/generators/relgen/package-lock.json b/tools/automation/generators/relgen/package-lock.json new file mode 100644 index 00000000000..31b0b625fbd --- /dev/null +++ b/tools/automation/generators/relgen/package-lock.json @@ -0,0 +1,878 @@ +{ + "name": "relgen", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "relgen", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "fast-glob": "^3.3.2", + "js-yaml": "^4.1.0", + "micromark": "^4.0.0", + "yargs": "^17.7.2" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/ms": { + "version": "0.7.34", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", + "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", + "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromark": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.0.tgz", + "integrity": "sha512-o/sd0nMof8kYff+TqcDx3VSrgBTcZpSvYcAHIfHhv5VAuNmisCxjhx6YmxS8PFEpb9z5WKWKPdzf0jM23ro3RQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.1.tgz", + "integrity": "sha512-CUQyKr1e///ZODyD1U3xit6zXwy1a8q2a1S1HKtIlmgvurrEpaw/Y9y6KSIbF8P59cn/NjzHyO+Q2fAyYLQrAA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.0.tgz", + "integrity": "sha512-j9DGrQLm/Uhl2tCzcbLhy5kXsgkHUrjJHg4fFAeoMRwJmJerT9aw4FEhIbZStWN8A3qMwOp1uzHr4UL8AInxtA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.0.tgz", + "integrity": "sha512-RR3i96ohZGde//4WSe/dJsxOX6vxIg9TimLAS3i4EhBAFx8Sm5SmqVfR8E87DPSR31nEAjZfbt91OMZWcNgdZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.0.tgz", + "integrity": "sha512-jY8CSxmpWLOxS+t8W+FG3Xigc0RDQA9bKMY/EwILvsesiRniiVMejYTE4wumNc2f4UbAa4WsHqe3J1QS1sli+A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.0.tgz", + "integrity": "sha512-28kbwaBjc5yAI1XadbdPYHX/eDnqaUFVikLwrO7FDnKG7lpgxnvk/XGRhX/PN0mOZ+dBSZ+LgunHS+6tYQAzhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.0.tgz", + "integrity": "sha512-anK8SWmNphkXdaKgz5hJvGa7l00qmcaUQoMYsBwDlSKFKjc6gjGXPDw3FNL3Nbwq5L8gE+RCbGqTw49FK5Qyvg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.0.tgz", + "integrity": "sha512-S0ze2R9GH+fu41FA7pbSqNWObo/kzwf8rN/+IGlW/4tC6oACOs8B++bh+i9bVyNnwCcuksbFwsBme5OCKXCwIw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.0.tgz", + "integrity": "sha512-vZZio48k7ON0fVS3CUgFatWHoKbbLTK/rT7pzpJ4Bjp5JjkZeasRfrS9wsBdDJK2cJLHMckXZdzPSSr1B8a4oQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.1.tgz", + "integrity": "sha512-bmkNc7z8Wn6kgjZmVHOX3SowGmVdhYS7yBpMnuMnPzDq/6xwVA604DuOXMZTO1lvq01g+Adfa0pE2UKGlxL1XQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz", + "integrity": "sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.0.tgz", + "integrity": "sha512-xNn4Pqkj2puRhKdKTm8t1YHC/BAjx6CEwRFXntTaRf/x16aqka6ouVoutm+QdkISTlT7e2zU7U4ZdlDLJd2Mcw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.0.tgz", + "integrity": "sha512-2xhYT0sfo85FMrUPtHcPo2rrp1lwbDEEzpx7jiH2xXJLqBuy4H0GgXk5ToU8IEwoROtXuL8ND0ttVa4rNqYK3w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.0.tgz", + "integrity": "sha512-6KU6qO7DZ7GJkaCgwBNtplXCvGkJToU86ybBAUdavvgsCiG8lSSvYxr9MhwmQ+udpzywHsl4RpGJsYWG1pDOcA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz", + "integrity": "sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.1.tgz", + "integrity": "sha512-jZNtiFl/1aY73yS3UGQkutD0UbhTt68qnRpw2Pifmz5wV9h8gOVsN70v+Lq/f1rKaU/W8pxRe8y8Q9FX1AOe1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-types": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.0.tgz", + "integrity": "sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + } + } +} diff --git a/tools/automation/generators/relgen/package.json b/tools/automation/generators/relgen/package.json new file mode 100644 index 00000000000..caa63900e82 --- /dev/null +++ b/tools/automation/generators/relgen/package.json @@ -0,0 +1,22 @@ +{ + "name": "relgen", + "version": "1.0.0", + "description": "Release Notes Generator Generic", + "main": "relgen.js", + "type": "module", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [ + "release", + "notes" + ], + "author": "Dj", + "license": "ISC", + "dependencies": { + "fast-glob": "^3.3.2", + "js-yaml": "^4.1.0", + "micromark": "^4.0.0", + "yargs": "^17.7.2" + } +} diff --git a/tools/automation/generators/relgen/relgen.js b/tools/automation/generators/relgen/relgen.js new file mode 100755 index 00000000000..b31e8e0648f --- /dev/null +++ b/tools/automation/generators/relgen/relgen.js @@ -0,0 +1,419 @@ +#! /usr/bin/env node + +// Parse the yaml in the relnotetester.yml file and return the data + +import { + readFileSync, + readdirSync, + writeFileSync, + appendFileSync, + existsSync, +} from "fs"; +import { load } from "js-yaml"; +import yargs from "yargs"; +import { hideBin } from "yargs/helpers"; +import path from "path"; +import { micromark } from "micromark"; + +let argv = yargs(hideBin(process.argv)) + .usage("Usage: $0 -p ") + .option("p", { + alias: "path", + describe: + "The path to the relnotes directory (should contain a src directory - see schema-readme.md)", + type: "string", + demandOption: true, + default: ".", + }) + .check((argv) => { + const metapath = path.resolve(path.join(argv.path, "src", "meta.yml")); + if (!existsSync(metapath)) throw new Error("Can't find " + metapath); + return true; + }) + .parse(); + +function converter(markdown) { + return micromark(markdown); +} + +function normalizeType(type) { + switch (type) { + case "Feature": + case "Features": + return "Feature"; + case "Enhancement": + case "Enhancements": + return "Enhancement"; + case "Security": + case "Security Fixes": + return "Security"; + case "Bug Fix": + case "Bug Fixes": + case "Bug-fix": + case "Bug-fixes": + case "Bug fix": + return "Bug Fix"; + case "Change": + return "Change"; + case "Deprecation": + case "Obsolete": + return "Deprecation"; + case "Other": + default: + return "Other"; + } +} + +function titles(type) { + switch (type) { + case "Feature": + return "Features"; + case "Enhancement": + return "Enhancements"; + case "Security": + return "Security Fixes"; + case "Change": + return "Changes"; + case "Bug Fix": + return "Bug Fixes"; + case "Deprecation": + return "Deprecations"; + case "Other": + return "Other"; + } +} + +let basedir = false; +let basepath = argv.path; + +// Open the src/meta.yml file and parse it. +let meta = load(readFileSync(path.join(basepath, "src/meta.yml"), "utf8")); + +// Now we scan the other files in src, on this pass, acquiring the meta data + +let files = readdirSync(path.join(basepath, "src"), { withFileTypes: true }) + .filter((dirent) => { + return ( + dirent.isFile() && + dirent.name !== "meta.yml" && + dirent.name.endsWith(".yml") + ); + }) + .map((dirent) => dirent.name); + +let relnotes = new Map(); + +for (let i = 0; i < files.length; i++) { + let file = files[i]; + let relnote = load(readFileSync(path.join(basepath, "src", file), "utf8")); + relnotes.set(file, relnote); +} + +function compareDates(dateStr1, dateStr2) { + const months = { + January: 0, + February: 1, + March: 2, + April: 3, + May: 4, + June: 5, + July: 6, + August: 7, + September: 8, + October: 9, + November: 10, + December: 11, + }; + + const parseDate = (dateStr) => { + const [day, month, year] = dateStr.split(" "); + return new Date(year, months[month], day); + }; + + const date1 = parseDate(dateStr1); + const date2 = parseDate(dateStr2); + + if (date1 < date2) { + return -1; // date1 is earlier + } else if (date1 > date2) { + return 1; // date1 is later + } else { + return 0; // dates are the same + } +} + +// Iterate over the relnotes in descending date order +relnotes[Symbol.iterator] = function* () { + yield* [...relnotes.entries()].sort((a, b) => + compareDates(b[1].date, a[1].date), + ); +}; + +function makeRelnotefilename(shortname, version) { + return `${shortname}_${version}_rel_notes`; +} + +function makeShortDate(date) { + const months = { + January: "Jan", + February: "Feb", + March: "Mar", + April: "Apr", + May: "May", + June: "Jun", + July: "Jul", + August: "Aug", + September: "Sep", + October: "Oct", + November: "Nov", + December: "Dec", + }; + let [day, month, year] = date.split(" "); + let shortmonth = months[month]; + return `${day} ${shortmonth} ${year}`; +} + +let relindexfilename = path.join(basepath, "index.mdx"); // Use this to write the file +let err = writeFileSync(relindexfilename, ""); +if (err) { + console.error(err); + process.exit(1); +} + +console.log(`Writing ${relindexfilename}`); + +appendFileSync(relindexfilename, "---\n"); +appendFileSync(relindexfilename, `title: ${meta.title}\n`); +appendFileSync(relindexfilename, `navTitle: Release notes\n`); +appendFileSync(relindexfilename, `description: ${meta.description}\n`); +appendFileSync(relindexfilename, `navigation:\n`); + +for (let [file, relnote] of relnotes) { + appendFileSync( + relindexfilename, + ` - ${makeRelnotefilename(meta.shortname, relnote.version)}\n`, + ); +} +if (meta.precursor !== undefined) { + for (let prec of meta.precursor) { + appendFileSync( + relindexfilename, + ` - ${makeRelnotefilename(meta.shortname, prec.version)}\n`, + ); + } +} + +appendFileSync(relindexfilename, "---\n"); +appendFileSync(relindexfilename, "\n\n"); + +appendFileSync(relindexfilename, `${meta.intro}`); +appendFileSync(relindexfilename, "\n\n"); + +// Before we process the table, is there a column definition. If not, we'll use a default +if (meta.columns == undefined) { + meta.columns = [ + { label: "Version", key: "version-link" }, + { label: "Release Date", key: "shortdate" }, + ]; +} + +let headers = "|"; +let headers2 = "|"; +for (let i = 0; i < meta.columns.length; i++) { + headers += ` ${meta.columns[i].label} |`; + headers2 += `---|`; +} + +appendFileSync(relindexfilename, headers + "\n"); +appendFileSync(relindexfilename, headers2 + "\n"); + +for (let [file, relnote] of relnotes) { + let line = "|"; + for (let i = 0; i < meta.columns.length; i++) { + let col = meta.columns[i]; + switch (col.key) { + case "version-link": + line += ` [${relnote.version}](./${makeRelnotefilename(meta.shortname, relnote.version)}) |`; + break; + case "shortdate": + line += ` ${makeShortDate(relnote.date)} |`; + break; + default: + if (col.key.startsWith("$")) { + let key = col.key.replace("$", ""); + line += ` ${relnote.meta[key]} |`; + } else { + console.err(`Unknown column key: ${col.key}`); + } + break; + } + } + appendFileSync(relindexfilename, line + "\n"); +} + +// We aren't done yet, check for a precursor +if (meta.precursor !== undefined) { + for (let prec of meta.precursor) { + let line = "|"; + for (let i = 0; i < meta.columns.length; i++) { + let col = meta.columns[i]; + switch (col.key) { + case "version-link": + line += ` [${prec.version}](./${makeRelnotefilename(meta.shortname, prec.version)}) |`; + break; + case "shortdate": // This is a precursor, so we need to get the date from the precursor + line += ` ${prec.date} |`; + break; + default: + if (col.key.startsWith("$")) { + let key = col.key.replace("$", ""); + line += ` ${prec.meta[key]} |`; + } else { + console.err(`Unknown column key: ${col.key}`); + } + break; + } + } + appendFileSync(relindexfilename, line + `\n`); + } +} + +// Now let's make some release notes... +for (let [file, relnote] of relnotes) { + prepareRelnote(meta, file, relnote); +} + +function prepareRelnote(meta, file, note) { + let relnotefilename = makeRelnotefilename(meta.shortname, note.version); // Use this to write the file + let alltypes = note.relnotes.map((note) => normalizeType(note.type)); + let types = [...new Set(alltypes)]; + + let rlout = path.join(basepath, relnotefilename + ".mdx"); + let err = writeFileSync(rlout, ""); + if (err) { + console.error(err); + process.exit(1); + } + + types = types.sort((a, b) => { + const order = [ + "Feature", + "Enhancement", + "Security", + "Change", + "Bug Fix", + "Deprecation", + "Other", + ]; + return order.indexOf(a) - order.indexOf(b); + }); + + let rnotes = {}; + + // Highest, High, Medium, Low, Lowest - in that order + function impactSort(a, b) { + const order = ["highest", "high", "medium", "low", "lowest"]; + return ( + order.indexOf(a.impact.toLowerCase()) - + order.indexOf(b.impact.toLowerCase()) + ); + } + + // BDR, Proxy, CLI - in that order + function componentSort(a, b) { + if (meta.components === undefined) { + return 0; + } + const order = meta.components; + return order.indexOf(a.component) - order.indexOf(b.component); + } + + for (let type of types) { + let filterednotes = note.relnotes.filter( + (note) => normalizeType(note.type) === type, + ); + rnotes[type] = filterednotes; + } + + console.log(`Writing ${rlout}`); + + appendFileSync(rlout, `---\n`); + appendFileSync( + rlout, + `title: ${note.product} ${note.version} release notes\n`, + ); + appendFileSync(rlout, `navTitle: Version ${note.version}\n`); + appendFileSync(rlout, `---\n`); + appendFileSync(rlout, "\n"); + appendFileSync(rlout, `Released: ${note.date}\n`); + appendFileSync(rlout, "\n"); + + if (note.updated !== undefined) { + appendFileSync(rlout, `Updated: ${note.updated}\n`); + appendFileSync(rlout, "\n"); + } + + appendFileSync(rlout, `${note.intro}`); + appendFileSync(rlout, "\n"); + + if (note.highlights !== undefined) { + appendFileSync(rlout, `## Highlights`); + appendFileSync(rlout, "\n\n"); + appendFileSync(rlout, `${note.highlights}`); + appendFileSync(rlout, "\n"); + } + + for (let type of types) { + appendFileSync(rlout, `## ${titles(type)}`); + appendFileSync(rlout, "\n\n"); + if (meta.components !== undefined) { + appendFileSync( + rlout, + `\n`, + ); + } else { + appendFileSync( + rlout, + `
ComponentVersionDescriptionAddresses
\n`, + ); + } + // TODO: Depending on type, we should sort the notes + let sortednotes = rnotes[type].sort(impactSort); + + //rnotes[type].sort(componentSort); + + for (let linenote of sortednotes) { + let composednote = ""; + + if (linenote.details === undefined) { + composednote = linenote.relnote; + } else { + const predetails = linenote.details; // Preprocess here + const compactdetailshtml = converter(predetails); + const preheading = linenote.relnote; // Preprocess here + const headinghtml = converter(preheading) + .replace(/

/g, "") + .replace(/<\/p>/g, ""); + composednote = `

${headinghtml}
${compactdetailshtml}
`; + } + + if (linenote.addresses === undefined || linenote.addresses === null) { + linenote.addresses = ""; + } + + if (meta.components !== undefined) { + appendFileSync( + rlout, + `\n`, + ); + } else { + appendFileSync( + rlout, + `\n`, + ); + } + } + appendFileSync(rlout, "
DescriptionAddresses
${linenote.component}${linenote.component_version}${composednote}${linenote.addresses}
${composednote}${linenote.addresses}
\n"); + appendFileSync(rlout, "\n\n"); + } +} diff --git a/tools/automation/generators/relgen/schema-readme.md b/tools/automation/generators/relgen/schema-readme.md new file mode 100644 index 00000000000..697c380b5ed --- /dev/null +++ b/tools/automation/generators/relgen/schema-readme.md @@ -0,0 +1,68 @@ +# A guide to the schemas of relgen.js + +The relgen.js script uses a set of schemas to generate the docs release notes from yaml release notes and meta files. This document provides a guide to the schemas used by the script. + +All files should reside in the src directory of the release notes in the docs. The resulting docs will be generated in the same directory as the src directory. + +## Per release note: (any file name allowed in src directory) + +product: The product name in full - required. +version: The version number of the product - required. +date: The release date of the product - required. +updated: The date the release notes were last updated - optional. +meta: # Meta key values - required if the index uses them (see meta.yml) + metakey: metavalue # keyvalue pairs for meta.yml +intro: | + Multi-line string that provides a brief introduction to the release notes. Supports Markdown. +highlights: | + Short description of the highlights of the release. Supports Markdown. Can use lists for effect. +relnotes: +- relnote: Short text for the release note entry - required + component: Component name - required if components is specified in meta.yml + component_version: Component version - required if components is specified in meta.yml + details: | + Optional multi-line string that provides more details about the release note. Supports Markdown. + jira: Jira number for tracking - not required but recommended - we can make this required + addresses: String with the issue numbers that this release note addresses. May move to markdown if requested - required as a field, not required to have content. + type: Type of release note - required. Options are: Feature, Enhancement, Change, Bug Fix, Deprecation, Security, or Other. Requiered. + impact: Low - determines sort order within section. Options are Lowest, Low, Medium, High, Highest. Required. +- relnote: ... + +## meta.yml + +product: Product name in full - required. +shortname: Short name for the product - required (used in file name generation) +title: Title for index page - required +description: Description for the index page +columns: # defines the index page columns by 0 based column number +- 0: # First column + label: Release Date # Column heading + key: shortdate # key to use when generatings the column data - shortdate is the date in short format +- 1: # Second column + label: "EDB Postgres Distributed" # Column heading + key: version-link # version-link is the version number with link to the releasenote page +- 2: # Third column + label: "BDR extension" # Column heading + key: $bdrextension # Taken from the meta.bdrextension value in the release note yaml +- 3: # Fourth column + label: "PGD CLI" # Column heading + key: $pgdcli # Taken from the meta.pgdcli value in the release note yaml +- 4: # Fifth column + label: "PGD Proxy" # Column heading + key: $pgdproxy # Taken from the meta.pgdproxy value in the release note yaml +components: [ "BDR", "PGD CLI", "PGD Proxy", "Utilities" ] # List of valid components for product +intro: | + Introduction to the release notes. Supports Markdown over multiple lines. +precursor: # An optiona; list of preceding releases which already have release notes. Required if there are preceding releases to be included. Will be appended to the table and navigation. If meta fields are in use, they are required in the precursor list under meta. too. +- version: "23.34.1" + date: 09 Sep 2024 +- version: "23.34" + date: 22 Aug 2024 +- version: "23.33" + date: 24 Jun 2024 +- version: "23.32" + date: 15 May 2024 + + + + diff --git a/tools/automation/generators/relgen/test/index.mdx b/tools/automation/generators/relgen/test/index.mdx new file mode 100644 index 00000000000..829d4a65456 --- /dev/null +++ b/tools/automation/generators/relgen/test/index.mdx @@ -0,0 +1,15 @@ +--- +title: EDB Postgres Distributed 5.6+ release notes +description: Release notes for EDB Postgres Distributed 5.6 and later +navigation: + - pgd_5.6.1_rel_notes + - pgd_5.6.0_rel_notes +--- + +The EDB Postgres Distributed documentation describes the latest version of EDB Postgres Distributed 5, including minor releases and patches. The release notes provide information on what was new in each release. For new functionality introduced in a minor or patch release, the content also indicates the release that introduced the feature. + + +| Release Date | EDB Postgres Distributed | BDR extension | PGD CLI | PGD Proxy | +|---|---|---|---|---| +| 17 Nov 2024 | [5.6.1](./pgd_5.6.1_rel_notes.md) | 5.6.1 | 5.6.1 | 5.6.1 | +| 15 Oct 2024 | [5.6.0](./pgd_5.6.0_rel_notes.md) | 5.6.0 | 5.6.0 | 5.6.0 | diff --git a/tools/automation/generators/relgen/test/pgd_5.6.0_rel_notes.mdx b/tools/automation/generators/relgen/test/pgd_5.6.0_rel_notes.mdx new file mode 100644 index 00000000000..5ae66218926 --- /dev/null +++ b/tools/automation/generators/relgen/test/pgd_5.6.0_rel_notes.mdx @@ -0,0 +1,167 @@ +--- +title: EDB Postgres Distributed 5.6.0 release notes +navTitle: Version 5.6.0 +--- + + +Released: 15 October 2024 + + +EDB Postgres Distributed 5.6.0 includes a number of enhancements and bug fixes. + + +## Highlights +- Improved observability with new monitoring functions and SQL views. +- Improvements to commit scopes including: + - GROUP COMMIT and SYNCHRONOUS COMMIT support graceful degrading using DEGRADE ON. + - ORIGIN_GROUP support and commit scope inheritance simplify commit scope creation. + - Improved synchronous commit behavior around deadlocks. + - Metrics for commit scope performance and state. +- Optimized Topology support for Subscriber-only groups and nodes. (preview) +- Improved Postgres compliance with support for: + - Exclusion Constraints + - REINDEX replications + - createrole_self_grant + - column reference in DEFAULT expressions + - CREATE SCHEMA AUTHORIZATION +- Streaming Transaction support with Decoding Worker. + + +## Enhancements + + + + + + + + + + + + + + + + + + + + + + + + + + +
ComponentVersionRelease NoteAddresses
BDR5.6.0
Decoding Worker supports Streaming Transactions

One of the main advantages of streaming is that the WAL sender sends the partial transaction before it commits, which reduces replication lag. Now, with streaming support, the WAL decoder does the same thing, but it streams to the LCRs segments. Eventually, the WAL sender will read the LCRs and mimic the same behavior of streaming large transactions before they commit. This provides the benefits of decoding worker, such as reduced CPU and disk space, as well as the benefits of streaming, such as reduced lag and disk space, since ".spill" files are not generated. +The WAL decoder always streams the transaction to LCRs, but based on downstream requests, the WAL sender either streams the transaction or just mimics the normal BEGIN..COMMIT scenario. +In addition to the normal LCRs segment files, we create streaming files with the starting names TR_TXN_<file-name-format> and CAS_TXN_<file-name-format> for each streamed transaction.

+
BDR5.6.0
Introduce several new monitoring views

There are several view providing new information as well as making some +existing information easier to discover:

+
    +
  • bdr.stat_commit_scope : Cumulative statistics for commit scopes.
  • +
  • bdr.stat_commit_scope_state : Information about current use of commit scopes by backends.
  • +
  • bdr.stat_receiver : Per subscription receiver statistics.
  • +
  • bdr.stat_writer : Per writer statistics. There can be multiple writers for each subscription. This also includes additional information about the currently applied transaction.
  • +
  • bdr.stat_raft_state : The state of the Raft consensus on the local node.
  • +
  • bdr.stat_raft_followers_state : The state of the followers on the Raft leader node (empty on other nodes), also includes approximate clock drift between nodes.
  • +
  • bdr.stat_worker : Detailed information about PGD workers, including what the operation manager worker is currently doing.
  • +
  • bdr.stat_routing_state : The state of the connection routing which PGD Proxy uses to route the connections.
  • +
  • bdr.stat_routing_candidate_state : Information about routing candidate nodes on the Raft leader node (empty on other nodes).
  • +
+
BDR5.6.0
Support conflict detection for exclusion constraints

This allows defining EXCLUDE constraint on table replicated by PGD either with +CREATE TABLE or with ALTER TABLE and uses similar conflict detection to resolve +conflicts as for UNIQUE constraints.

+
BDR5.6.0
Detect and resolve deadlocks between synchronous replication wait-for-disconnected sessions and replication writer.

This will cancel synchronous replication wait on disconnected sessions if it deadlocks against replication, preventing deadlocks on failovers when using synchronous replication. This only affects commit scopes, not synchronous replication configured via synchronous_standby_names.

+
BDR5.6.0
Add bdr.bdr_show_all_file_settings() and bdr.bdr_file_settings view

Fix: Correct privileges for bdr_superuser. Creating wrapper SECURITY DEFINER functions in the bdr schema and granting access to bdr_superuser to use those:

+
    +
  • bdr.bdr_show_all_file_settings
  • +
  • bdr.bdr_file_settings
  • +
+
BDR5.6.0
Add create/drop_commit_scope functions

Add functions for creating and dropping commit scopes that will eventually deprecate the non-standard functions for adding and removing commit scopes. Notify the user that these will be deprecated in a future version, suggesting the use of the new versions.

+
BDR5.6.0
Grant additional object permissions to role "bdr_monitor".

Permissions for the following objects have been updated to include SELECT permissions for role "bdr_monitor": bdr.node_config

+
BDR5.6.0
Add bdr.raft_vacuum_interval and bdr.raft_vacuum_full_interval GUCs to control frequency of automatic Raft catalog vacuuming.

This update introduces GUCs to regulate the frequency of automatic vacuuming on the specified catalogs. The GUC bdr.raft_vacuum_interval determines the frequency at which tables are examined for VACUUM and ANALYZE. Autovacuum GUCs and table reloptions are utilized to ascertain the necessity of VACUUM/ANALYZE. +The bdr.raft_vacuum_full_interval initiates VACUUM FULL on the tables. Users have the ability to deactivate VACUUM FULL if regular VACUUM suffices to manage bloat.

+
40412
BDR5.6.0
Add "node_name" to "bdr.node_config_summary"

Add "node_name" to the view "bdr.node_config_summary". This makes it consistent with other summary views, which report the name of the object (node, group, etc.) for which the summary is being generated.

+
BDR5.6.0
bdr_init_physical: improve local node connection failure logging

Ensure that bdr_init_physical emits details about connection failure if the "--local-dsn" parameter is syntactically correct but invalid, e.g., due to an incorrect host or port setting.

+
BDR5.6.0
bdr_config: add PG_FLAVOR output

bdr_config now shows the PostgreSQL "flavor" which BDR was built against, one of:

+
    +
  • COMMUNITY
  • +
  • EPAS
  • +
  • EXTENDED
  • +
  • BDRPG
  • +
+
BDR5.6.0
Enhance warning messages

Enhance messages issued during DML and DDL lock acquisition.

+
BDR5.6.0
Do not send Raft snapshot very aggressively

Avoid sending Raft snapshots too frequently as it can slow down follower nodes. Limit the snapshot rate to once in every election timeout, unless there is no other communication between the nodes, in which case send a snapshot every 1/3rd of the election timeout. This will help all nodes keep pace with the leader and improve CPU utilization.

+
37725
BDR5.6.0
Group-Specific Configuration Options

It is now possible to set akk top-level and subgroup level options. The following options are available for top-both groups:

+
    +
  • check_constraints
  • +
  • enable_wal_decoder
  • +
  • num_writers
  • +
  • streaming_mode
  • +
  • enable_raft +Subgroups inherit settings from their parent group, but can override them if set in the subgroup.
  • +
+
37725
BDR5.6.0
Subscriber-only node groups have a leader

Subscriber-only node groups have a leader elected by top-level Raft. There is now a bdr.leader catalog that tracks leadership of subgroups and subscriber-only nodes. If the node that is the leader of a subscriber-only node group goes down or becomes unreachable, a new leader is elected from that group.

+
BDR5.6.0
Optimized topology for subscriber-only nodes via the leader of the subscriber-only node group

Subscriber-only nodes earlier used to have subscriptions to each data node. Now if optimized topology is enabled, only the leaders of subscriber-only node groups have subscriptions to routing leaders of data node subsgroups. The subscriber only nodegroup leaders route data to other nodes of that subscriber-only nodegroup. This reduces the load on all data nodes so they do not have to send data to all subscriber-only nodes. The GUC bdr.force_full_mesh=false enables this optimized topology. It is off by default.

+
BDR5.6.0
Introduce new subscription types to support optimized topology

New subscription types that forward data from all nodes of the subgroup via a routing leader (mode: l), and those that forward data from the entire cluster via a subscriber-only group leader (mode: w) are introduced.

+
BDR5.6.0
Introduce version number and timestamp for write leader

A write leader has a version. Every time a new leader is elected, the version is incremented and timestamp noted via Raft. This is to build a foundation for better conflict resolution.

+
BDR5.6.0
Allow use of column reference in DEFAULT expressions

Using column references in default expressions is now supported, this is particularly +useful with generated columns, for example: +ALTER TABLE gtest_tableoid ADD COLUMN c regclass GENERATED ALWAYS AS (tableoid) STORED;

+
BDR5.6.0
Support replication of REINDEX

Both REINDEX and REINDEX CONCURRENTLY are now replicated commands.

+
BDR5.6.0
Fix receiver worker being stuck when exiting

Receiver worker could get stuck when exiting, waiting for a writer that never +actually started. This could on rare occasions break replication after +configuration changes until Postgres was restarted.

+
BDR5.6.0
Reduce performance impact of PGD specific configuration parameters that are sent to client

Changes to values of variables bdr.last_committed_lsn, transaction_id +and bdr.local_node_id are automatically reported to clients when using +CAMO or GROUP COMMIT. This has now been optimized to use less resources.

+
BDR5.6.0
Allow use of commit scopes defined in parent groups

When there is a commit scope defined for top-level group, it can be used by +any node in a subgroup and does not need to be redefined for every subgroup +anymore. This is particularly useful when combined with ORIGIN\_GROUP +keyword to reduce the complexity of commit scope setup.

+
CLI5.6.0
Use bdr.bdr_file_settings view in verify-settings

Use bdr.bdr_file_settings view to get the current settings for the proxy.

+
+ + +## Bug Fixes + + + + + + + + + + + + + + + + +
ComponentVersionRelease NoteAddresses
BDR5.6.0
Fixed buffer overrun in the writer

Include an extra zero byte at the end of a column value allocation in shared memory queue insert/update/delete messages.

+
98966
BDR5.6.0Fixes for some race conditions to prevent node sync from entering a hung state with the main subscription disabled.
BDR5.6.0
Do not accidentally drop the autopartition rule when a column of the autopartitioned table is dropped.

When ALTER TABLE .. DROP COLUMN is used, the object_access_hook is fired with classId set to RelationRelationId, but the subId is set to the attribute number to differentiate it from the DROP TABLE command.

+

Therefore, we need to check the subId field to make sure that we are not performing actions that should only be triggered when a table is dropped.

+
40258
BDR5.6.0
Adjust bdr.alter_table_conflict_detection() to propagate correctly to all nodes

Ensure that the propagation of bdr.alter_table_conflict_detection() (as well as the related, deprecated bdr.column_timestamps_(en|dis)able() functions) is carried out correctly to all logical standbys. Previously, this propagation did not occur if the logical standby was not directly attached to the node on which the functions were executed.

+
40258
BDR5.6.0
Prevent a node group from being created with a duplicate name

Ensure that a nodegroup is not inadvertently created with the same name as an existing nodegroup. Failure to do so may result in a complete shutdown of the top-level Raft on all nodes, with no possibility of recovery.

+
BDR5.6.0
Prevent spurious "local info ... not found" errors when parting nodes

Handle the absence of the expected node record gracefully when a node is being removed, the local node record might have already been deleted, but an attempt could be made to update it anyway. This resulted in harmless "BDR node local info for node ... not found" errors.

+
BDR5.6.0
Prevent a corner-case situation from being misdiagnosed as a PGD version problem

Improve Raft error messages to handle cases where nodes may not be correctly participating in Raft.

+
BDR5.6.0
Handling duplicate requests in RAFT preventing protocol breakage

When processing RAFT entries, it's crucial to handle duplicate requests properly to prevent Raft protocol issues. Duplicate requests can occur when a client retries a request that has already been accepted and applied by the Raft leader. The problem arose when the leader failed to detect the duplicate request due to historical evidence being pruned.

+
37725
BDR5.6.0
Handling Raft Snapshots: Consensus Log

When installing or importing a Raft snapshot, discard the consensus log unless it contains an entry matching the snapshot's last included entry and term.

+
37725
BDR5.6.0
Be more restrictive about which index to use during replication for REPLICA IDENTITY FULL tables

This fixes various index related errors during replication like: +'could not lookup equality operator for type, optype in opfamily' +or 'function "amgettuple" is not defined for index "brinidx"'

+
BDR5.6.0
Support createrole_self_grant

The createrole_self_grant configuration option affects inherited grants +by newly created roles. In previous versions CREATE ROLE/CREATE USER +replication would not take this into consideration, resulting in different +role privileges on different nodes.

+
BDR5.6.0
Allow CREATE SCHEMA AUTHORIZATION ... combined with other create operations

Previously, this would throw "cannot change current role within security-restricted operation" error

+
BDR5.6.0
Use base type instead of domain type while casting values

This prevents errors when replicating UPDATEs for domains defined as NOT VALID +where tables contain data which would not be allowed by current definition +of such domain.

+
Utilities5.6.0bdr_pg_upgrade - Create logical slot with twophase set to true for PG 14+
+ + diff --git a/tools/automation/generators/relgen/test/pgd_5.6.1_rel_notes.mdx b/tools/automation/generators/relgen/test/pgd_5.6.1_rel_notes.mdx new file mode 100644 index 00000000000..eb2037e4c20 --- /dev/null +++ b/tools/automation/generators/relgen/test/pgd_5.6.1_rel_notes.mdx @@ -0,0 +1,24 @@ +--- +title: EDB Postgres Distributed 5.6.1 release notes +navTitle: Version 5.6.1 +--- + + +Released: 17 November 2024 + + +EDB Postgres Distributed 5.6.1 includes a number of enhancements and bug fixes. + + +## Highlights +- Super new feature + + +## Enhancements + + + +
ComponentVersionRelease NoteAddresses
BDR5.6.1
Super new feature

This feature rocks so hard

+
+ + diff --git a/tools/automation/generators/relgen/test/src/meta.yml b/tools/automation/generators/relgen/test/src/meta.yml new file mode 100644 index 00000000000..4cf60b70d86 --- /dev/null +++ b/tools/automation/generators/relgen/test/src/meta.yml @@ -0,0 +1,24 @@ +product: EDB Postgres Distributed +shortname: pgd +title: EDB Postgres Distributed 5.6+ release notes +description: Release notes for EDB Postgres Distributed 5.6 and later +columns: +- 0: + label: Release Date + key: shortdate +- 1: + label: "EDB Postgres Distributed" + key: version-link +- 2: + label: "BDR extension" + key: $bdrextension +- 3: + label: "PGD CLI" + key: $pgdcli +- 4: + label: "PGD Proxy" + key: $pgdproxy +components: [ "BDR", "PGD CLI", "PGD Proxy", "Utilities" ] +intro: | + The EDB Postgres Distributed documentation describes the latest version of EDB Postgres Distributed 5, including minor releases and patches. The release notes provide information on what was new in each release. For new functionality introduced in a minor or patch release, the content also indicates the release that introduced the feature. + diff --git a/tools/automation/generators/relgen/test/src/relnote_5.6.0.yml b/tools/automation/generators/relgen/test/src/relnote_5.6.0.yml new file mode 100644 index 00000000000..a256d3420ae --- /dev/null +++ b/tools/automation/generators/relgen/test/src/relnote_5.6.0.yml @@ -0,0 +1,429 @@ +product: EDB Postgres Distributed +version: 5.6.0 +date: 15 October 2024 +meta: + bdrextension: 5.6.0 + pgdcli: 5.6.0 + pgdproxy: 5.6.0 +intro: | + EDB Postgres Distributed 5.6.0 includes a number of enhancements and bug fixes. +highlights: | + - Improved observability with new monitoring functions and SQL views. + - Improvements to commit scopes including: + - GROUP COMMIT and SYNCHRONOUS COMMIT support graceful degrading using DEGRADE ON. + - ORIGIN_GROUP support and commit scope inheritance simplify commit scope creation. + - Improved synchronous commit behavior around deadlocks. + - Metrics for commit scope performance and state. + - Optimized Topology support for Subscriber-only groups and nodes. (preview) + - Improved Postgres compliance with support for: + - Exclusion Constraints + - REINDEX replications + - createrole_self_grant + - column reference in DEFAULT expressions + - CREATE SCHEMA AUTHORIZATION + - Streaming Transaction support with Decoding Worker. +relnotes: +- relnote: Decoding Worker supports Streaming Transactions + component: BDR + component_version: 5.6.0 + details: | + One of the main advantages of streaming is that the WAL sender sends the partial transaction before it commits, which reduces replication lag. Now, with streaming support, the WAL decoder does the same thing, but it streams to the LCRs segments. Eventually, the WAL sender will read the LCRs and mimic the same behavior of streaming large transactions before they commit. This provides the benefits of decoding worker, such as reduced CPU and disk space, as well as the benefits of streaming, such as reduced lag and disk space, since ".spill" files are not generated. + The WAL decoder always streams the transaction to LCRs, but based on downstream requests, the WAL sender either streams the transaction or just mimics the normal BEGIN..COMMIT scenario. + In addition to the normal LCRs segment files, we create streaming files with the starting names `TR_TXN_` and `CAS_TXN_` for each streamed transaction. + jira: BDR-5123 + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: Introduce several new monitoring views + component: BDR + component_version: 5.6.0 + details: | + There are several view providing new information as well as making some + existing information easier to discover: + - [`bdr.stat_commit_scope`](/pgd/5.6/reference/catalogs-visible#bdrstat_commit_scope) : Cumulative statistics for commit scopes. + - [`bdr.stat_commit_scope_state`](/pgd/5.6/reference/catalogs-visible#bdrstat_commit_scope_state) : Information about current use of commit scopes by backends. + - [`bdr.stat_receiver`](/pgd/5.6/reference/catalogs-visible#bdrstat_receiver) : Per subscription receiver statistics. + - [`bdr.stat_writer`](/pgd/5.6/reference/catalogs-visible#bdrstat_writer) : Per writer statistics. There can be multiple writers for each subscription. This also includes additional information about the currently applied transaction. + - [`bdr.stat_raft_state`](/pgd/5.6/reference/catalogs-visible#bdrstat_raft_state) : The state of the Raft consensus on the local node. + - [`bdr.stat_raft_followers_state`](/pgd/5.6/reference/catalogs-visible#bdrstat_raft_followers_state) : The state of the followers on the Raft leader node (empty on other nodes), also includes approximate clock drift between nodes. + - [`bdr.stat_worker`](/pgd/5.6/reference/catalogs-visible#bdrstat_worker) : Detailed information about PGD workers, including what the operation manager worker is currently doing. + - [`bdr.stat_routing_state`](/pgd/5.6/reference/catalogs-visible#bdrstat_routing_state) : The state of the connection routing which PGD Proxy uses to route the connections. + - [`bdr.stat_routing_candidate_state`](/pgd/5.6/reference/catalogs-visible#bdrstat_routing_candidate_state) : Information about routing candidate nodes on the Raft leader node (empty on other nodes). + jira: BDR-5316 + type: Enhancement + severity: High + impact: High +- relnote: Support conflict detection for exclusion constraints + component: BDR + component_version: 5.6.0 + details: | + This allows defining `EXCLUDE` constraint on table replicated by PGD either with + `CREATE TABLE` or with `ALTER TABLE` and uses similar conflict detection to resolve + conflicts as for `UNIQUE` constraints. + jira: BDR-4851 + type: Enhancement + severity: High + impact: High +- relnote: Fixed buffer overrun in the writer + component: BDR + component_version: 5.6.0 + details: | + Include an extra zero byte at the end of a column value allocation in shared memory queue insert/update/delete messages. + jira: BDR-5188 + addresses: 98966 + type: Bug Fix + severity: High + impact: High +- relnote: Fixes for some race conditions to prevent node sync from entering a hung state with the main subscription disabled. + component: BDR + component_version: 5.6.0 + jira: BDR-5041 + addresses: "" + type: Bug Fix + severity: High + impact: High +- relnote: Detect and resolve deadlocks between synchronous replication wait-for-disconnected sessions and replication writer. + component: BDR + component_version: 5.6.0 + details: | + This will cancel synchronous replication wait on disconnected sessions if it deadlocks against replication, preventing deadlocks on failovers when using synchronous replication. This only affects commit scopes, not synchronous replication configured via `synchronous_standby_names`. + jira: BDR-5445, BDR-5445, BDR-4104 + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: Do not accidentally drop the autopartition rule when a column of the autopartitioned table is dropped. + component: BDR + component_version: 5.6.0 + details: | + When ALTER TABLE .. DROP COLUMN is used, the object_access_hook is fired with `classId` set to RelationRelationId, but the `subId` is set to the attribute number to differentiate it from the DROP TABLE command. + + Therefore, we need to check the subId field to make sure that we are not performing actions that should only be triggered when a table is dropped. + jira: BDR-5418 + addresses: 40258 + type: Bug Fix + severity: High + impact: High +- relnote: Adjust `bdr.alter_table_conflict_detection()` to propagate correctly to all nodes + component: BDR + component_version: 5.6.0 + details: | + Ensure that the propagation of `bdr.alter_table_conflict_detection()` (as well as the related, deprecated `bdr.column_timestamps_(en|dis)able()` functions) is carried out correctly to all logical standbys. Previously, this propagation did not occur if the logical standby was not directly attached to the node on which the functions were executed. + jira: BDR-3850 + addresses: 40258 + type: Bug Fix + severity: High + impact: High +- relnote: Prevent a node group from being created with a duplicate name + component: BDR + component_version: 5.6.0 + details: | + Ensure that a nodegroup is not inadvertently created with the same name as an existing nodegroup. Failure to do so may result in a complete shutdown of the top-level Raft on all nodes, with no possibility of recovery. + jira: BDR-5355 + addresses: "" + type: Bug Fix + severity: High + impact: High +- relnote: Add bdr.bdr_show_all_file_settings() and bdr.bdr_file_settings view + component: BDR + component_version: 5.6.0 + details: | + Fix: Correct privileges for bdr_superuser. Creating wrapper SECURITY DEFINER functions in the bdr schema and granting access to bdr_superuser to use those: + - bdr.bdr_show_all_file_settings + - bdr.bdr_file_settings + jira: BDR-5070 + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: Add create/drop_commit_scope functions + component: BDR + component_version: 5.6.0 + details: | + Add functions for creating and dropping commit scopes that will eventually deprecate the non-standard functions for adding and removing commit scopes. Notify the user that these will be deprecated in a future version, suggesting the use of the new versions. + jira: BDR-4073 + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: Grant additional object permissions to role "bdr_monitor". + component: BDR + component_version: 5.6.0 + details: | + Permissions for the following objects have been updated to include SELECT permissions for role "bdr_monitor": bdr.node_config + jira: BDR-4885, BDR-5354 + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: Add `bdr.raft_vacuum_interval` and `bdr.raft_vacuum_full_interval` GUCs to control frequency of automatic Raft catalog vacuuming. + component: BDR + component_version: 5.6.0 + details: | + This update introduces GUCs to regulate the frequency of automatic vacuuming on the specified catalogs. The GUC `bdr.raft_vacuum_interval` determines the frequency at which tables are examined for VACUUM and ANALYZE. Autovacuum GUCs and table reloptions are utilized to ascertain the necessity of VACUUM/ANALYZE. + The `bdr.raft_vacuum_full_interval` initiates VACUUM FULL on the tables. Users have the ability to deactivate VACUUM FULL if regular VACUUM suffices to manage bloat. + jira: BDR-5424 + addresses: 40412 + type: Enhancement + severity: High + impact: High +- relnote: Prevent spurious "local info ... not found" errors when parting nodes + component: BDR + component_version: 5.6.0 + details: | + Handle the absence of the expected node record gracefully when a node is being removed, the local node record might have already been deleted, but an attempt could be made to update it anyway. This resulted in harmless "BDR node local info for node ... not found" errors. + jira: BDR-5350 + addresses: "" + type: Bug Fix + severity: High + impact: High +- relnote: Prevent a corner-case situation from being misdiagnosed as a PGD version problem + component: BDR + component_version: 5.6.0 + details: | + Improve Raft error messages to handle cases where nodes may not be correctly participating in Raft. + jira: BDR-5362 + addresses: "" + type: Bug Fix + severity: High + impact: High +- relnote: Add "node_name" to "bdr.node_config_summary" + component: BDR + component_version: 5.6.0 + details: | + Add "node_name" to the view "bdr.node_config_summary". This makes it consistent with other summary views, which report the name of the object (node, group, etc.) for which the summary is being generated. + jira: BDR-4818 + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: "bdr_init_physical: improve local node connection failure logging" + component: BDR + component_version: 5.6.0 + details: | + Ensure that bdr_init_physical emits details about connection failure if the "--local-dsn" parameter is syntactically correct but invalid, e.g., due to an incorrect host or port setting. + jira: + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: "`bdr_config`: add PG_FLAVOR output" + component: BDR + component_version: 5.6.0 + details: | + `bdr_config` now shows the PostgreSQL "flavor" which BDR was built against, one of: + - COMMUNITY + - EPAS + - EXTENDED + - BDRPG + jira: BDR-4428 + addresses: + type: Enhancement + severity: High + impact: High +- relnote: Enhance warning messages + component: BDR + component_version: 5.6.0 + details: | + Enhance messages issued during DML and DDL lock acquisition. + jira: BDR-4200 + addresses: "" + type: Enhancement + severity: High + impact: High +- relnote: Handling duplicate requests in RAFT preventing protocol breakage + component: BDR + component_version: 5.6.0 + details: | + When processing RAFT entries, it's crucial to handle duplicate requests properly to prevent Raft protocol issues. Duplicate requests can occur when a client retries a request that has already been accepted and applied by the Raft leader. The problem arose when the leader failed to detect the duplicate request due to historical evidence being pruned. + jira: BDR-5275, BDR-4091 + addresses: 37725 + type: Bug Fix + severity: High + impact: High +- relnote: "Handling Raft Snapshots: Consensus Log" + component: BDR + component_version: 5.6.0 + details: | + When installing or importing a Raft snapshot, discard the consensus log unless it contains an entry matching the snapshot's last included entry and term. + jira: BDR-5285 + addresses: 37725 + type: Bug Fix + severity: High + impact: High +- relnote: Do not send Raft snapshot very aggressively + component: BDR + component_version: 5.6.0 + details: | + Avoid sending Raft snapshots too frequently as it can slow down follower nodes. Limit the snapshot rate to once in every election timeout, unless there is no other communication between the nodes, in which case send a snapshot every 1/3rd of the election timeout. This will help all nodes keep pace with the leader and improve CPU utilization. + jira: BDR-5288 + addresses: 37725 + type: Enhancement + severity: High + impact: High +- relnote: Group-Specific Configuration Options + component: BDR + component_version: 5.6.0 + details: | + It is now possible to set akk top-level and subgroup level options. The following options are available for top-both groups: + - check\_constraints + - enable\_wal\_decoder + - num\_writers + - streaming\_mode + - enable\_raft + Subgroups inherit settings from their parent group, but can override them if set in the subgroup. + jira: BDR-4954 + addresses: 37725 + type: Enhancement + severity: High + impact: High +- relnote: Subscriber-only node groups have a leader + component: BDR + component_version: 5.6.0 + details: | + Subscriber-only node groups have a leader elected by top-level Raft. There is now a bdr.leader catalog that tracks leadership of subgroups and subscriber-only nodes. If the node that is the leader of a subscriber-only node group goes down or becomes unreachable, a new leader is elected from that group. + jira: BDR-5089 + type: Enhancement + severity: High + impact: High +- relnote: Optimized topology for subscriber-only nodes via the leader of the subscriber-only node group + component: BDR + component_version: 5.6.0 + details: | + Subscriber-only nodes earlier used to have subscriptions to each data node. Now if optimized topology is enabled, only the leaders of subscriber-only node groups have subscriptions to routing leaders of data node subsgroups. The subscriber only nodegroup leaders route data to other nodes of that subscriber-only nodegroup. This reduces the load on all data nodes so they do not have to send data to all subscriber-only nodes. The GUC `bdr.force_full_mesh=false` enables this optimized topology. It is off by default. + jira: BDR-5214 + type: Enhancement + severity: High + impact: High +- relnote: Introduce new subscription types to support optimized topology + component: BDR + component_version: 5.6.0 + details: | + New subscription types that forward data from all nodes of the subgroup via a routing leader (mode: l), and those that forward data from the entire cluster via a subscriber-only group leader (mode: w) are introduced. + jira: BDR-5186 + type: Enhancement + severity: High + impact: High +- relnote: Introduce version number and timestamp for write leader + component: BDR + component_version: 5.6.0 + details: | + A write leader has a version. Every time a new leader is elected, the version is incremented and timestamp noted via Raft. This is to build a foundation for better conflict resolution. + jira: BDR-3589 + type: Enhancement + severity: High + impact: High +- relnote: Be more restrictive about which index to use during replication for REPLICA IDENTITY FULL tables + component: BDR + component_version: 5.6.0 + details: | + This fixes various index related errors during replication like: + 'could not lookup equality operator for type, optype in opfamily' + or 'function "amgettuple" is not defined for index "brinidx"' + jira: BDR-5523 , BDR-5361 + type: Bug Fix + severity: High + impact: High +- relnote: Allow use of column reference in DEFAULT expressions + component: BDR + component_version: 5.6.0 + details: | + Using column references in default expressions is now supported, this is particularly + useful with generated columns, for example: + `ALTER TABLE gtest_tableoid ADD COLUMN c regclass GENERATED ALWAYS AS (tableoid) STORED;` + jira: BDR-5385 + type: Enhancement + severity: High + impact: High +- relnote: Support `createrole_self_grant` + component: BDR + component_version: 5.6.0 + details: | + The `createrole_self_grant` configuration option affects inherited grants + by newly created roles. In previous versions `CREATE ROLE`/`CREATE USER` + replication would not take this into consideration, resulting in different + role privileges on different nodes. + jira: BDR-5403 + type: Bug fix + severity: High + impact: High +- relnote: Allow `CREATE SCHEMA AUTHORIZATION ...` combined with other create operations + component: BDR + component_version: 5.6.0 + details: | + Previously, this would throw "cannot change current role within security-restricted operation" error + jira: BDR-5368 + type: Bug fix + severity: High + impact: High +- relnote: Support replication of REINDEX + component: BDR + component_version: 5.6.0 + details: | + Both REINDEX and REINDEX CONCURRENTLY are now replicated commands. + jira: BDR-5363 + type: Enhancement + severity: High + impact: High +- relnote: Use base type instead of domain type while casting values + component: BDR + component_version: 5.6.0 + details: | + This prevents errors when replicating UPDATEs for domains defined as NOT VALID + where tables contain data which would not be allowed by current definition + of such domain. + jira: BDR-5369 + type: Bug fix + severity: High + impact: High +- relnote: Fix receiver worker being stuck when exiting + component: BDR + component_version: 5.6.0 + details: | + Receiver worker could get stuck when exiting, waiting for a writer that never + actually started. This could on rare occasions break replication after + configuration changes until Postgres was restarted. + jira: + type: Enhancement + severity: High + impact: High +- relnote: Reduce performance impact of PGD specific configuration parameters that are sent to client + component: BDR + component_version: 5.6.0 + details: | + Changes to values of variables `bdr.last_committed_lsn`, `transaction_id` + and `bdr.local_node_id` are automatically reported to clients when using + CAMO or GROUP COMMIT. This has now been optimized to use less resources. + jira: BDR-3212 + type: Enhancement + severity: High + impact: High +- relnote: Allow use of commit scopes defined in parent groups + component: BDR + component_version: 5.6.0 + details: | + When there is a commit scope defined for top-level group, it can be used by + any node in a subgroup and does not need to be redefined for every subgroup + anymore. This is particularly useful when combined with `ORIGIN\_GROUP` + keyword to reduce the complexity of commit scope setup. + jira: BDR-5433 + type: Enhancement + severity: High + impact: High +- relnote: bdr_pg_upgrade - Create logical slot with twophase set to true for PG 14+ + component: Utilities + component_version: 5.6.0 + jira: BDR-5306 + type: Bug Fix + severity: High + impact: High +- relnote: Use bdr.bdr_file_settings view in verify-settings + component: PGD CLI + component_version: 5.6.0 + details: | + Use bdr.bdr_file_settings view to get the current settings for the proxy. + jira: BDR-5049 + type: Enhancement + severity: High + impact: High \ No newline at end of file diff --git a/tools/automation/generators/relgen/test/src/relnote_5.6.1.yml b/tools/automation/generators/relgen/test/src/relnote_5.6.1.yml new file mode 100644 index 00000000000..86ddfddea2f --- /dev/null +++ b/tools/automation/generators/relgen/test/src/relnote_5.6.1.yml @@ -0,0 +1,22 @@ +product: EDB Postgres Distributed +version: 5.6.1 +date: 17 November 2024 +meta: + bdrextension: 5.6.1 + pgdcli: 5.6.1 + pgdproxy: 5.6.1 +intro: | + EDB Postgres Distributed 5.6.1 includes a number of enhancements and bug fixes. +highlights: | + - Super new feature +relnotes: +- relnote: Super new feature + component: BDR + component_version: 5.6.1 + details: | + This feature rocks so hard + jira: BDR-6666 + addresses: "" + type: Enhancement + severity: High + impact: High diff --git a/tools/user/import/bareleasenotes/barelease.js b/tools/user/import/bareleasenotes/barelease.js old mode 100644 new mode 100755 index e77dfba342f..8abc39be5a7 --- a/tools/user/import/bareleasenotes/barelease.js +++ b/tools/user/import/bareleasenotes/barelease.js @@ -1,3 +1,5 @@ +#!/usr/bin/env node + import fetch from "node-fetch"; import fs from "fs"; import yargs from "yargs"; @@ -43,6 +45,8 @@ function printReleaseNotesHeader(currentMonth, currentYear) { return `--- title: Cloud Service ${getMonthName(currentMonth)} ${currentYear} release notes navTitle: ${getMonthName(currentMonth)} ${currentYear} +redirect: +- /edb-postgres-ai/cloud-service/release_notes/${currentYear}_${(currentMonth + 1).toString().padStart(2, "0")}_${getShortMonthName(currentMonth)}_rel_notes/ --- EDB Postgres® AI Cloud Service's ${getMonthName( @@ -85,6 +89,8 @@ async function fetchAndProcess(directory, currentYear, currentMonth) { const cleanLines = lines.flat().filter((item) => { return ( item !== "" && + item !== "None" && + item !== "N/A" && !item.startsWith("Improvements and updates for the cloud service") ); });