From af04fa8471c7939e1d9c48c735f77f31e0a6cc1a Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Fri, 14 May 2021 12:00:48 -0600 Subject: [PATCH 1/5] Release 2021-05-14 (#1374) * adding files before rename * removing versions that will be replaced * renaming directories * normalize markdown / links * New PDFs generated by Github Actions * add content for BDR 3.7 per issue #1348 (#1359) * add content for BDR 3.7 per issue #1348 * address @daltjoh's feedback * single-asterisk "extended" indicator * New PDFs generated by Github Actions * Removing join push down from MySQL FDW 2.5.5 release docset * New PDFs generated by Github Actions Co-authored-by: Robert Stringer Co-authored-by: Robert Stringer <62722976+cero-miedo@users.noreply.github.com> Co-authored-by: Robert Stringer <62722976+robert-stringer@users.noreply.github.com> Co-authored-by: Abhilasha Narendra Former-commit-id: d3309d08ce2c8bf28470c8ec96fe98543e4c83b0 --- product_docs/docs/bdr/3.7/index.mdx | 68 +++++ .../2.0.7/01_whats_new.mdx | 4 - .../2.0.7/02_requirements_overview.mdx | 18 +- .../2.0.7/03_architecture_overview.mdx | 6 - .../04_supported_authentication_methods.mdx | 6 +- .../05_installing_the_hadoop_data_adapter.mdx | 190 ++++++------ .../06_updating_the_hadoop_data_adapter.mdx | 39 +++ ...fs_fdw.mdx => 07_features_of_hdfs_fdw.mdx} | 8 +- ...8_configuring_the_hadoop_data_adapter.mdx} | 275 +++++++++++++----- ...x => 09_using_the_hadoop_data_adapter.mdx} | 9 +- ...> 10_identifying_data_adapter_version.mdx} | 4 - ...1_uninstalling_the_hadoop_data_adapter.mdx | 29 ++ .../docs/hadoop_data_adapter/2.0.7/index.mdx | 14 +- .../mongo_data_adapter/5.2.8/01_whats_new.mdx | 4 - .../5.2.8/02_requirements_overview.mdx | 22 +- .../5.2.8/03_architecture_overview.mdx | 6 - .../04_installing_the_mongo_data_adapter.mdx | 217 +++++++------- .../5.2.8/05_features_of_mongo_fdw.mdx | 35 --- .../05_updating_the_mongo_data_adapter.mdx | 37 +++ .../5.2.8/06_features_of_mongo_fdw.mdx | 71 +++++ ...07_configuring_the_mongo_data_adapter.mdx} | 249 ++++++++++++---- ..._example_using_the_mongo_data_adapter.mdx} | 15 +- ...> 09_identifying_data_adapter_version.mdx} | 4 - .../5.2.8/10_limitations.mdx | 11 + ...11_uninstalling_the_mongo_data_adapter.mdx | 27 ++ .../docs/mongo_data_adapter/5.2.8/index.mdx | 12 +- .../mysql_data_adapter/2.5.5/01_whats_new.mdx | 4 - .../2.5.5/02_requirements_overview.mdx | 18 +- .../2.5.5/03_architecture_overview.mdx | 6 - .../04_installing_the_mysql_data_adapter.mdx | 198 ++++++------- .../05_updating_the_mysql_data_adapter.mdx | 39 +++ ...l_fdw.mdx => 06_features_of_mysql_fdw.mdx} | 26 +- ...07_configuring_the_mysql_data_adapter.mdx} | 262 +++++++++++++---- ..._example_using_the_mysql_data_adapter.mdx} | 15 +- ...x => 09_example_import_foreign_schema.mdx} | 4 - ...> 10_identifying_data_adapter_version.mdx} | 4 - ...11_uninstalling_the_mysql_data_adapter.mdx | 31 ++ ...bleshooting.mdx => 12_troubleshooting.mdx} | 6 +- .../docs/mysql_data_adapter/2.5.5/index.mdx | 12 +- 39 files changed, 1318 insertions(+), 687 deletions(-) create mode 100644 product_docs/docs/bdr/3.7/index.mdx create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.7/06_updating_the_hadoop_data_adapter.mdx rename product_docs/docs/hadoop_data_adapter/2.0.7/{06_features_of_hdfs_fdw.mdx => 07_features_of_hdfs_fdw.mdx} (73%) rename product_docs/docs/hadoop_data_adapter/2.0.7/{07_configuring_the_hadoop_data_adapter.mdx => 08_configuring_the_hadoop_data_adapter.mdx} (55%) rename product_docs/docs/hadoop_data_adapter/2.0.7/{08_using_the_hadoop_data_adapter.mdx => 09_using_the_hadoop_data_adapter.mdx} (94%) rename product_docs/docs/hadoop_data_adapter/2.0.7/{09_identifying_data_adapter_version.mdx => 10_identifying_data_adapter_version.mdx} (67%) create mode 100644 product_docs/docs/hadoop_data_adapter/2.0.7/11_uninstalling_the_hadoop_data_adapter.mdx delete mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/05_features_of_mongo_fdw.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/05_updating_the_mongo_data_adapter.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/06_features_of_mongo_fdw.mdx rename product_docs/docs/mongo_data_adapter/5.2.8/{06_configuring_the_mongo_data_adapter.mdx => 07_configuring_the_mongo_data_adapter.mdx} (50%) rename product_docs/docs/mongo_data_adapter/5.2.8/{07_example_using_the_mongo_data_adapter.mdx => 08_example_using_the_mongo_data_adapter.mdx} (91%) rename product_docs/docs/mongo_data_adapter/5.2.8/{08_identifying_data_adapter_version.mdx => 09_identifying_data_adapter_version.mdx} (67%) create mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/10_limitations.mdx create mode 100644 product_docs/docs/mongo_data_adapter/5.2.8/11_uninstalling_the_mongo_data_adapter.mdx create mode 100644 product_docs/docs/mysql_data_adapter/2.5.5/05_updating_the_mysql_data_adapter.mdx rename product_docs/docs/mysql_data_adapter/2.5.5/{05_features_of_mysql_fdw.mdx => 06_features_of_mysql_fdw.mdx} (74%) rename product_docs/docs/mysql_data_adapter/2.5.5/{06_configuring_the_mysql_data_adapter.mdx => 07_configuring_the_mysql_data_adapter.mdx} (50%) rename product_docs/docs/mysql_data_adapter/2.5.5/{07_example_using_the_mysql_data_adapter.mdx => 08_example_using_the_mysql_data_adapter.mdx} (88%) rename product_docs/docs/mysql_data_adapter/2.5.5/{08_example_import_foreign_schema.mdx => 09_example_import_foreign_schema.mdx} (77%) rename product_docs/docs/mysql_data_adapter/2.5.5/{09_identifying_data_adapter_version.mdx => 10_identifying_data_adapter_version.mdx} (67%) create mode 100644 product_docs/docs/mysql_data_adapter/2.5.5/11_uninstalling_the_mysql_data_adapter.mdx rename product_docs/docs/mysql_data_adapter/2.5.5/{10_troubleshooting.mdx => 12_troubleshooting.mdx} (70%) diff --git a/product_docs/docs/bdr/3.7/index.mdx b/product_docs/docs/bdr/3.7/index.mdx new file mode 100644 index 00000000000..ffdff029f4c --- /dev/null +++ b/product_docs/docs/bdr/3.7/index.mdx @@ -0,0 +1,68 @@ +--- +navTitle: BDR +title: "BDR (Bi-Directional Replication)" +directoryDefaults: + description: "BDR (Bi-Directional Replication) is a ground-breaking multi-master replication capability for PostgreSQL clusters that has been in full production status since 2014." +--- + +**BDR (Bi-Directional Replication)** is a ground-breaking multi-master replication capability for PostgreSQL clusters that has been in full production status since 2014. In the complex environment of replication, this 3rd generation of BDR achieves efficiency and accuracy, enabling very high availability of all nodes in a geographically distributed cluster. This solution is for top-tier enterprise applications that require near-zero downtime and near-zero data loss. + +As a standard PostgreSQL extension BDR does this through logical replication of data and schema along with a robust set of features and tooling to manage conflicts and monitor performance. This means applications with the most stringent demands can be run with confidence on PostgreSQL. + +BDR was built from the start to allow for rolling upgrades and developed in conjunction with partners who were replacing costly legacy solutions. + +Two editions are available. BDR Standard provides essential multi-master replication capabilities for delivering row level consistency to address high availability and/or geographically distributed workloads. BDR Enterprise adds advanced conflict-handling and data-loss protection capabilities. + +## BDR Enterprise + +To provide very high availability, avoid data conflicts, and to cope with more advanced usage scenarios, the Enterprise edition includes the following additional features not found in BDR Standard: + +* Eager replication provides conflict free replication by synchronizing across cluster nodes before committing a transaction **\*** +* Commit at most once consistency guards application transactions even in the presence of node failures **\*** +* Parallel apply allows multiple writer processes to apply transactions on the downstream node improving throughput up to 2X +* Single decoding worker improves performance on upstream node by doing logical decoding of WAL once instead of for each downstream node **\*** +* Conflict-free replicated data types (CRDTs) provide mathematically proven consistency in asynchronous multi-master update scenarios +* Column level conflict resolution enables per column last-update wins resolution to merge updates +* Transform triggers execute on incoming data for modifying or advanced programmatic filtering +* Conflict triggers provide custom resolution techniques when a conflict is detected +* Tooling to assess applications for distributed database suitability **\*** + +!!! Important **\*** Indicates feature is only available with EDB Postgres Extended at this time, and is expected to be available with EDB Postgres Advanced 14. +!!! + +BDR Enterprise requires EDB Postgres Extended 11, 12, 13 (formerly known as 2ndQuadrant Postgres) which is SQL compatible with PostgreSQL. For applications requiring Oracle compatibility, BDR Enterprise requires EDB Postgres Advanced 11, 12, 13. + +!!!note + The documentation for the new release 3.7 is available here: + + [BDR 3.7 Enterprise Edition](https://documentation.2ndquadrant.com/bdr3-enterprise/release/latest/) + + **This is a protected area of our website, if you need access please [contact us](https://www.enterprisedb.com/contact)** +!!! + +## BDR Standard + +The Standard edition provides loosely-coupled multi-master logical replication using a mesh topology. This means that you can write to any node and the changes will be sent directly, row-by-row to all the other nodes that are part of the BDR cluster. + +By default BDR uses asynchronous replication to provide row-level eventual consistency, applying changes on the peer nodes only after the local commit. + +The following are included to support very high availability and geographically distributed workloads: + +* Rolling application and database upgrades to address the largest source of downtime +* Origin based conflict detection and row-level last-update wins conflict resolution +* DDL replication with granular locking supports changes to application schema, ideal for use in continuous release environments +* Sub-groups with subscribe-only nodes enable data distribution use cases for applications with very high read scaling requirements +* Sequence handling provides applications different options for generating unique surrogate ids that are multi-node aware +* Tools to monitor operation and verify data consistency + +BDR Standard requires PostgreSQL 11, 12, 13 or EDB Postgres Advanced 11, 12, 13 for applications requiring Oracle compatibility. + +!!!note + The documentation for the new release 3.7 is available here: + + [BDR 3.7 Standard Edition](https://documentation.2ndquadrant.com/bdr3/release/latest/) + + **This is a protected area of our website, if you need access please [contact us](https://www.enterprisedb.com/contact)** +!!! + + diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/01_whats_new.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/01_whats_new.mdx index 2156d8d1599..ca79f3e6a32 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/01_whats_new.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/01_whats_new.mdx @@ -1,9 +1,5 @@ --- title: "What’s New" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/whats_new.html" --- diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx index d38bc4c6c19..4c9969fe4e6 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx @@ -1,14 +1,10 @@ --- title: "Requirements Overview" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/requirements_overview.html" --- ## Supported Versions -The Hadoop Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9.5 and above. +The Hadoop Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9.6 and above. ## Supported Platforms @@ -16,14 +12,14 @@ The Hadoop Foreign Data Wrapper is supported on the following platforms: **Linux x86-64** -- RHEL 8.x and 7.x -- CentOS 8.x and 7.x -- OEL 8.x and 7.x -- Ubuntu 20.04 and 18.04 LTS -- Debian 10.x and 9.x +> - RHEL 8.x and 7.x +> - CentOS 8.x and 7.x +> - OEL 8.x and 7.x +> - Ubuntu 20.04 and 18.04 LTS +> - Debian 10.x and 9.x **Linux on IBM Power8/9 (LE)** -- RHEL 7.x +> - RHEL 7.x The Hadoop Foreign Data Wrapper supports use of the Hadoop file system using a HiveServer2 interface or Apache Spark using the Spark Thrift Server. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/03_architecture_overview.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/03_architecture_overview.mdx index e7699cd534f..87c8fb6d024 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/03_architecture_overview.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/03_architecture_overview.mdx @@ -1,9 +1,5 @@ --- title: "Architecture Overview" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/architecture_overview.html" --- @@ -14,6 +10,4 @@ The Hadoop data wrapper provides an interface between a Hadoop file system and a ![Using a Hadoop distributed file system with Postgres](images/hadoop_distributed_file_system_with_postgres.png) -Using a Hadoop Distributed file system with Postgres - When possible, the Foreign Data Wrapper asks the Hive or Spark server to perform the actions associated with the `WHERE` clause of a `SELECT` statement. Pushing down the `WHERE` clause improves performance by decreasing the amount of data moving across the network. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/04_supported_authentication_methods.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/04_supported_authentication_methods.mdx index bf6ead71d96..24377cbadda 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/04_supported_authentication_methods.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/04_supported_authentication_methods.mdx @@ -1,9 +1,5 @@ --- title: "Supported Authentication Methods" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/supported_authentication_methods.html" --- @@ -50,7 +46,7 @@ Then, when starting the hive server, include the path to the `hive-site.xml` fil Where *path_to_hive-site.xml_file* specifies the complete path to the `hive‑site.xml` file. -When creating the user mapping, you must provide the name of a registered LDAP user and the corresponding password as options. For details, see [Create User Mapping](07_configuring_the_hadoop_data_adapter/#create-user-mapping). +When creating the user mapping, you must provide the name of a registered LDAP user and the corresponding password as options. For details, see [Create User Mapping](08_configuring_the_hadoop_data_adapter/#create-user-mapping). diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/05_installing_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/05_installing_the_hadoop_data_adapter.mdx index 7b92a4fb209..f89d2b1e1a2 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/05_installing_the_hadoop_data_adapter.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/05_installing_the_hadoop_data_adapter.mdx @@ -1,9 +1,5 @@ --- title: "Installing the Hadoop Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/installing_the_hadoop_data_adapter.html" --- @@ -29,19 +25,19 @@ Before installing the Hadoop Foreign Data Wrapper, you must install the followin Install the `epel-release` package: -```text -yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm -``` +> ```text +> yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +> ``` Enable the optional, extras, and HA repositories: -```text -subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" -``` +> ```text +> subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" +> ``` You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials you can: @@ -53,9 +49,9 @@ After receiving your repository credentials you can: To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` +> ```text +> yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -63,22 +59,22 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing Hadoop Foreign Data Wrapper** After saving your changes to the configuration file, use the following commands to install the Hadoop Foreign Data Wrapper: -``` -yum install edb-as-hdfs_fdw -``` +> ``` +> yum install edb-as-hdfs_fdw +> ``` where `xx` is the server version number. @@ -94,20 +90,20 @@ Before installing the Hadoop Foreign Data Wrapper, you must install the followin Install the `epel-release` package: -```text -dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm -``` +> ```text +> dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm +> ``` Enable the `codeready-builder-for-rhel-8-\*-rpms` repository: -```text -ARCH=$( /bin/arch ) -subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" -``` +> ```text +> ARCH=$( /bin/arch ) +> subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" +> ``` You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials you can: @@ -119,9 +115,9 @@ After receiving your repository credentials you can: To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` +> ```text +> dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -129,22 +125,22 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing Hadoop Foreign Data Wrapper** After saving your changes to the configuration file, use the below command to install the Hadoop Foreign Data Wrapper: -```text -dnf install edb-as-hdfs_fdw -``` +> ```text +> dnf install edb-as-hdfs_fdw +> ``` When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. @@ -158,16 +154,16 @@ Before installing the Hadoop Foreign Data Wrapper, you must install the followin Install the `epel-release` package: -```text -yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm -``` +> ```text +> yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +> ``` !!! Note You may need to enable the `[extras]` repository definition in the `CentOS-Base.repo` file (located in `/etc/yum.repos.d`). You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials you can: @@ -179,9 +175,9 @@ After receiving your repository credentials you can: To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` +> ```text +> yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -189,22 +185,22 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing Hadoop Foreign Data Wrapper** After saving your changes to the configuration file, use the following command to install the Hadoop Foreign Data Wrapper: -```text -yum install edb-as-hdfs_fdw -``` +> ```text +> yum install edb-as-hdfs_fdw +> ``` where `xx` is the server version number. @@ -220,19 +216,19 @@ Before installing the Hadoop Foreign Data Wrapper, you must install the followin Install the `epel-release` package: -```text -dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm -``` +> ```text +> dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm +> ``` Enable the `PowerTools` repository: -```text -dnf config-manager --set-enabled PowerTools -``` +> ```text +> dnf config-manager --set-enabled PowerTools +> ``` You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials you can: @@ -244,9 +240,9 @@ After receiving your repository credentials you can: To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` +> ```text +> dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -254,22 +250,22 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing Hadoop Foreign Data Wrapper** After saving your changes to the configuration file, use the following command to install the Hadoop Foreign Data Wrapper: -```text -dnf install edb-as-hdfs_fdw -``` +> ```text +> dnf install edb-as-hdfs_fdw +> ``` where `xx` is the server version number. @@ -293,23 +289,23 @@ The following steps will walk you through on using the EDB apt repository to ins On Debian 9 and Ubuntu: - ```text - sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` + > ```text + > sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + > ``` On Debian 10: 1. Set up the EDB repository: - ```text - sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` + > ```text + > sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + > ``` - 2. Substitute your EDB credentials for the `username` and `password` in the following command: + 1. Substitute your EDB credentials for the `username` and `password` in the following command: - ```text - sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' - ``` + > ```text + > sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' + > ``` 3. Add support to your system for secure APT repositories: @@ -317,7 +313,7 @@ The following steps will walk you through on using the EDB apt repository to ins apt-get install apt-transport-https ``` -4. Add the EBD signing key: +4. Add the EDB signing key: ```text wget -q -O - https://username:password diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/06_updating_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/06_updating_the_hadoop_data_adapter.mdx new file mode 100644 index 00000000000..17040838963 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/06_updating_the_hadoop_data_adapter.mdx @@ -0,0 +1,39 @@ +--- +title: "Updating the Hadoop Foreign Data Wrapper" +--- + + + +**Updating an RPM Installation** + +If you have an existing RPM installation of Hadoop Foreign Data Wrapper, you can use yum or dnf to upgrade your repository configuration file and update to a more recent product version. To update the `edb.repo` file, assume superuser privileges and enter: + +- On RHEL or CentOS 7: + + > `yum upgrade edb-repo` + +- On RHEL or CentOS 8: + + > `dnf upgrade edb-repo` + +yum or dnf will update the `edb.repo` file to enable access to the current EDB repository, configured to connect with the credentials specified in your `edb.repo` file. Then, you can use yum or dnf to upgrade any installed packages: + +- On RHEL or CentOS 7: + + > `yum upgrade edb-as-hdfs_fdw` + + where `xx` is the server version number. + +- On RHEL or CentOS 8: + + > `dnf upgrade edb-as-hdfs_fdw` + + where `xx` is the server version number. + +**Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** + +To update MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host, use the following command: + +> `apt-get --only-upgrade install edb-as-hdfs-fdw` +> +> where `xx` is the server version number. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/06_features_of_hdfs_fdw.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/07_features_of_hdfs_fdw.mdx similarity index 73% rename from product_docs/docs/hadoop_data_adapter/2.0.7/06_features_of_hdfs_fdw.mdx rename to product_docs/docs/hadoop_data_adapter/2.0.7/07_features_of_hdfs_fdw.mdx index 9a0eae92417..66cacb62851 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/06_features_of_hdfs_fdw.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/07_features_of_hdfs_fdw.mdx @@ -1,9 +1,5 @@ --- title: "Features of the Hadoop Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/features_of_hdfs_fdw.html" --- @@ -20,8 +16,8 @@ Hadoop Foreign Data Wrapper supports column push-down. As a result, the query br ## Automated Cleanup -Hadoop Foreign Data Wrappper allows the cleanup of foreign tables in a single operation using `DROP EXTENSION` command. This feature is specifically useful when a foreign table is set for a temporary purpose, as in case of data migration. The syntax is: +Hadoop Foreign Data Wrappper allows the cleanup of foreign tables in a single operation using `DROP EXTENSION` command. This feature is specifically useful when a foreign table is set for a temporary purpose. The syntax is: - `DROP EXTENSION hdfs_fdw CASCADE;` +> `DROP EXTENSION hdfs_fdw CASCADE;` For more information, see [DROP EXTENSION](https://www.postgresql.org/docs/current/sql-dropextension.html). diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/07_configuring_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/08_configuring_the_hadoop_data_adapter.mdx similarity index 55% rename from product_docs/docs/hadoop_data_adapter/2.0.7/07_configuring_the_hadoop_data_adapter.mdx rename to product_docs/docs/hadoop_data_adapter/2.0.7/08_configuring_the_hadoop_data_adapter.mdx index 46df732aad0..d2462cf1e8a 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/07_configuring_the_hadoop_data_adapter.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/08_configuring_the_hadoop_data_adapter.mdx @@ -1,9 +1,5 @@ --- title: "Configuring the Hadoop Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/configuring_the_hadoop_data_adapter.html" --- @@ -12,31 +8,30 @@ Before creating the extension and the database objects that use the extension, y After installing Postgres, modify the `postgresql.conf` located in: - `/var/lib/edb/as_version/data` +> `/var/lib/edb/as_version/data` Modify the configuration file with your editor of choice, adding the `hdfs_fdw.jvmpath` parameter to the end of the configuration file, and setting the value to specify the location of the Java virtual machine (`libjvm.so`). Set the value of `hdfs_fdw.classpath` to indicate the location of the java class files used by the adapter; use a colon (:) as a delimiter between each path. For example: -```text -hdfs_fdw.classpath= -'/usr/edb/as12/lib/HiveJdbcClient-1.0.jar:/home/edb/Projects/hadoop_fdw/hadoop/share/hadoop/common/hadoop-common-2.6.4.jar:/home/edb/Projects/hadoop_fdw/apache-hive-1.0.1-bin/lib/hive-jdbc-1.0.1-standalone.jar' -``` - - **Note**: - - The jar files (hive-jdbc-1.0.1-standalone.jar and hadoop-common-2.6.4.jar) mentioned in the above example should be copied from respective Hive and Hadoop sources or website to PostgreSQL instance where Hadoop Foreign Data Wrapper is installed. - - If you are using EDB Advanced Server and have a `DATE` column in your database, you must set `edb_redwood_date = OFF` in the `postgresql.conf` file. +> ```text +> hdfs_fdw.classpath= +> '/usr/edb/as12/lib/HiveJdbcClient-1.0.jar:/home/edb/Projects/hadoop_fdw/hadoop/share/hadoop/common/hadoop-common-2.6.4.jar:/home/edb/Projects/hadoop_fdw/apache-hive-1.0.1-bin/lib/hive-jdbc-1.0.1-standalone.jar' +> ``` +> +> !!! Note +> The jar files (hive-jdbc-1.0.1-standalone.jar and hadoop-common-2.6.4.jar) mentioned in the above example should be copied from respective Hive and Hadoop sources or website to PostgreSQL instance where Hadoop Foreign Data Wrapper is installed. +> +> If you are using EDB Advanced Server and have a `DATE` column in your database, you must set `edb_redwood_date = OFF` in the `postgresql.conf` file. After setting the parameter values, restart the Postgres server. For detailed information about controlling the service on an Advanced Server host, see the EDB Postgres Advanced Server Installation Guide, available at: -[https://www.enterprisedb.com/docs](/epas/latest/) +> Before using the Hadoop Foreign Data Wrapper, you must: -1. Use the [CREATE EXTENSION](#create-extension) command to create the extension on the Postgres host. -2. Use the [CREATE SERVER](#create-server) command to define a connection to the Hadoop file system. -3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. -4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Advanced Server database that corresponds to a database that resides on the Hadoop cluster. +> 1. Use the [CREATE EXTENSION](#create-extension) command to create the extension on the Postgres host. +> 2. Use the [CREATE SERVER](#create-server) command to define a connection to the Hadoop file system. +> 3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. +> 4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Advanced Server database that corresponds to a database that resides on the Hadoop cluster. @@ -52,21 +47,21 @@ CREATE EXTENSION [IF NOT EXISTS] hdfs_fdw [WITH] [SCHEMA schema_name]; `IF NOT EXISTS` - Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. +> Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. `schema_name` - Optionally specify the name of the schema in which to install the extension's objects. +> Optionally specify the name of the schema in which to install the extension's objects. **Example** The following command installs the `hdfs_fdw` hadoop foreign data wrapper: - `CREATE EXTENSION hdfs_fdw;` +> `CREATE EXTENSION hdfs_fdw;` For more information about using the foreign data wrapper `CREATE EXTENSION` command, see: - . +> . @@ -85,27 +80,27 @@ The role that defines the server is the owner of the server; use the `ALTER SERV `server_name` - Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. +> Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. `FOREIGN_DATA_WRAPPER` - Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `hdfs_fdw` foreign data wrapper when connecting to the cluster. +> Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `hdfs_fdw` foreign data wrapper when connecting to the cluster. `OPTIONS` - Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server. You can include: +> Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server. You can include: -| Option | Description | -| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| host | The address or hostname of the Hadoop cluster. The default value is \`localhost\`. | -| port | The port number of the Hive Thrift Server or Spark Thrift Server. The default is \`10000\`. | -| client_type | Specify hiveserver2 or spark as the client type. To use the ANALYZE statement on Spark, you must specify a value of spark; if you do not specify a value for client_type, the default value is hiveserver2. | -| auth_type | The authentication type of the client; specify LDAP or NOSASL. If you do not specify an auth_type, the data wrapper will decide the auth_type value on the basis of the user mapping:- If the user mapping includes a user name and password, the data wrapper will use LDAP authentication. - If the user mapping does not include a user name and password, the data wrapper will use NOSASL authentication. | -| connect_timeout | The length of time before a connection attempt times out. The default value is \`300\` seconds. | -| fetch_size | A user-specified value that is provided as a parameter to the JDBC API setFetchSize. The default value is \`10,000\`. | -| log_remote_sql | If true, logging will include SQL commands executed on the remote hive server and the number of times that a scan is repeated. The default is \`false\`. | -| query_timeout | Use query_timeout to provide the number of seconds after which a request will timeout if it is not satisfied by the Hive server. Query timeout is not supported by the Hive JDBC driver. | -| use_remote_estimate | Include the use_remote_estimate to instruct the server to use EXPLAIN commands on the remote server when estimating processing costs. By default, use_remote_estimate is false, and remote tables are assumed to have \`1000\` rows. | +| Option | Description | +| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| host | The address or hostname of the Hadoop cluster. The default value is \`localhost\`. | +| port | The port number of the Hive Thrift Server or Spark Thrift Server. The default is \`10000\`. | +| client_type | Specify hiveserver2 or spark as the client type. To use the ANALYZE statement on Spark, you must specify a value of spark; if you do not specify a value for client_type, the default value is hiveserver2. | +| auth_type
| The authentication type of the client; specify LDAP or NOSASL. If you do not specify an auth_type, the data wrapper will decide the auth_type value on the basis of the user mapping:- If the user mapping includes a user name and password, the data wrapper will use LDAP authentication.
- If the user mapping does not include a user name and password, the data wrapper will use NOSASL authentication.
| +| connect_timeout | The length of time before a connection attempt times out. The default value is \`300\` seconds. | +| fetch_size | A user-specified value that is provided as a parameter to the JDBC API setFetchSize. The default value is \`10,000\`. | +| log_remote_sql | If true, logging will include SQL commands executed on the remote hive server and the number of times that a scan is repeated. The default is \`false\`. | +| query_timeout | Use query_timeout to provide the number of seconds after which a request will timeout if it is not satisfied by the Hive server. Query timeout is not supported by the Hive JDBC driver. | +| use_remote_estimate | Include the use_remote_estimate to instruct the server to use EXPLAIN commands on the remote server when estimating processing costs. By default, use_remote_estimate is false, and remote tables are assumed to have \`1000\` rows. | **Example** @@ -119,7 +114,7 @@ The foreign server uses the default port (`10000`) for the connection to the cli For more information about using the `CREATE SERVER` command, see: - +> @@ -140,27 +135,27 @@ Please note: the Hadoop Foreign Data Wrapper supports NOSASL and LDAP authentica `role_name` - Use `role_name` to specify the role that will be associated with the foreign server. +> Use `role_name` to specify the role that will be associated with the foreign server. `server_name` - Use `server_name` to specify the name of the server that defines a connection to the Hadoop cluster. +> Use `server_name` to specify the name of the server that defines a connection to the Hadoop cluster. `OPTIONS` - Use the `OPTIONS` clause to specify connection information for the foreign server. If you are using LDAP authentication, provide a: - - `username`: the name of the user on the LDAP server. - - `password`: the password associated with the username. - - If you do not provide a user name and password, the data wrapper will use NOSASL authentication. +> Use the `OPTIONS` clause to specify connection information for the foreign server. If you are using LDAP authentication, provide a: +> +> `username`: the name of the user on the LDAP server. +> +> `password`: the password associated with the username. +> +> If you do not provide a user name and password, the data wrapper will use NOSASL authentication. **Example** The following command creates a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `hdfs_server`: - `CREATE USER MAPPING FOR enterprisedb SERVER hdfs_server;` +> `CREATE USER MAPPING FOR enterprisedb SERVER hdfs_server;` If the database host uses LDAP authentication, provide connection credentials when creating the user mapping: @@ -172,7 +167,7 @@ The command creates a user mapping for a role named `enterprisedb` that is assoc For detailed information about the `CREATE USER MAPPING` command, see: - +> @@ -207,57 +202,57 @@ and `table_constraint` is: `table_name` - Specifies the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. +> Specifies the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. `IF NOT EXISTS` - Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. +> Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. `column_name` - Specifies the name of a column in the new table; each column should correspond to a column described on the Hive or Spark server. +> Specifies the name of a column in the new table; each column should correspond to a column described on the Hive or Spark server. `data_type` - Specifies the data type of the column; when possible, specify the same data type for each column on the Postgres server and the Hive or Spark server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the Hive or Spark server. If the server cannot identify a compatible data type, it will return an error. +> Specifies the data type of the column; when possible, specify the same data type for each column on the Postgres server and the Hive or Spark server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the Hive or Spark server. If the server cannot identify a compatible data type, it will return an error. `COLLATE collation` - Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. +> Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. `INHERITS (parent_table [, ... ])` - Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. +> Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. `CONSTRAINT constraint_name` - Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. +> Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. `NOT NULL` - Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. +> Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. `NULL` - Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. +> Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. `CHECK (expr) [NO INHERIT]` - Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. - - A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. - - Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. +> Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. +> +> A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. +> +> Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. `DEFAULT default_expr` - Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. +> Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. `SERVER server_name [OPTIONS (option 'value' [, ... ] ) ]` - To create a foreign table that will allow you to query a table that resides on a Hadoop file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the Hadoop data adapter. - - Use the `OPTIONS` clause to specify the following `options` and their corresponding values: +> To create a foreign table that will allow you to query a table that resides on a Hadoop file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the Hadoop data adapter. +> +> Use the `OPTIONS` clause to specify the following `options` and their corresponding values: | option | value | | ---------- | --------------------------------------------------------------------------------------- | @@ -320,7 +315,7 @@ Include the `SERVER` clause to specify the name of the database stored on the Ha For more information about using the `CREATE FOREIGN TABLE` command, see: - +> ### Data Type Mappings @@ -341,3 +336,145 @@ When using the foreign data wrapper, you must create a table on the Postgres ser | TIMESTAMP | TIMESTAMP | | TINYINT | INT2 | | VARCHAR | VARCHAR | + +## DROP EXTENSION + +Use the `DROP EXTENSION` command to remove an extension. To invoke the command, use your client of choice (for example, psql) to connect to the Postgres database from which you will be dropping the Hadoop server, and run the command: + +```text +DROP EXTENSION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]; +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the specified name doesn't exists. + +`name` + +> Specify the name of the installed extension. It is optional. +> +> `CASCADE` +> +> Automatically drop objects that depend on the extension. It drops all the other dependent objects too. +> +> `RESTRICT` +> +> Do not allow to drop extension if any objects, other than its member objects and extensions listed in the same DROP command are dependent on it. + +**Example** + +The following command removes the extension from the existing database: + +> `DROP EXTENSION hdfs_fdw;` + +For more information about using the foreign data wrapper `DROP EXTENSION` command, see: + +> . + +## DROP SERVER + +Use the `DROP SERVER` command to remove a connection to a foreign server. The syntax is: + +```text +DROP SERVER [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +``` + +The role that drops the server is the owner of the server; use the `ALTER SERVER` command to reassign ownership of a foreign server. To drop a foreign server, you must have `USAGE` privilege on the foreign-data wrapper specified in the `DROP SERVER` command. + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if a server with the specified name doesn't exists. + +`name` + +> Specify the name of the installed server. It is optional. +> +> `CASCADE` +> +> Automatically drop objects that depend on the server. It should drop all the other dependent objects too. +> +> `RESTRICT` +> +> Do not allow to drop the server if any objects are dependent on it. + +**Example** + +The following command removes a foreign server named `hdfs_server`: + +> `DROP SERVER hdfs_server;` + +For more information about using the `DROP SERVER` command, see: + +> + +## DROP USER MAPPING + +Use the `DROP USER MAPPING` command to remove a mapping that associates a Postgres role with a foreign server. You must be the owner of the foreign server to remove a user mapping for that server. + +```text +DROP USER MAPPING [ IF EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } SERVER server_name; +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the user mapping doesn't exist. + +`user_name` + +> Specify the user name of the mapping. + +`server_name` + +> Specify the name of the server that defines a connection to the Hadoop cluster. + +**Example** + +The following command drops a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `hdfs_server`: + +> `DROP USER MAPPING FOR enterprisedb SERVER hdfs_server;` + +For detailed information about the `DROP USER MAPPING` command, see: + +> + +## DROP FOREIGN TABLE + +A foreign table is a pointer to a table that resides on the Hadoop host. Use the `DROP FOREIGN TABLE` command to remove a foreign table. Only the owner of the foreign table can drop it. + +```text +DROP FOREIGN TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the foreign table with the specified name doesn't exists. + +`name` + +> Specify the name of the foreign table. + +`CASCADE` + +> Automatically drop objects that depend on the foreign table. It should drop all the other dependent objects too. + +`RESTRICT` + +> Do not allow to drop foreign table if any objects are dependent on it. + +**Example** + +```text +DROP FOREIGN TABLE warehouse; +``` + +For more information about using the `DROP FOREIGN TABLE` command, see: + +> diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/08_using_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/09_using_the_hadoop_data_adapter.mdx similarity index 94% rename from product_docs/docs/hadoop_data_adapter/2.0.7/08_using_the_hadoop_data_adapter.mdx rename to product_docs/docs/hadoop_data_adapter/2.0.7/09_using_the_hadoop_data_adapter.mdx index 63abe734ce8..c67ba31ead7 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/08_using_the_hadoop_data_adapter.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/09_using_the_hadoop_data_adapter.mdx @@ -1,9 +1,5 @@ --- title: "Using the Hadoop Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/using_the_hadoop_data_adapter.html" --- @@ -302,6 +298,5 @@ EXPLAIN (verbose, costs off) SELECT name FROM f_names_tab WHERE a > 3; (3 rows) ``` -Note: - -The same port was being used while creating foreign server because the Spark Thrift Server is compatible with the Hive Thrift Server. Applications using Hiveserver2 would work with Spark except for the behaviour of the `ANALYZE` command and the connection string in the case of `NOSASL`. We recommend using `ALTER SERVER` and changing the `client_type` option if Hive is to be replaced with Spark. +!!! Note + The same port was being used while creating foreign server because the Spark Thrift Server is compatible with the Hive Thrift Server. Applications using Hiveserver2 would work with Spark except for the behaviour of the `ANALYZE` command and the connection string in the case of `NOSASL`. We recommend using `ALTER SERVER` and changing the `client_type` option if Hive is to be replaced with Spark. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/09_identifying_data_adapter_version.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/10_identifying_data_adapter_version.mdx similarity index 67% rename from product_docs/docs/hadoop_data_adapter/2.0.7/09_identifying_data_adapter_version.mdx rename to product_docs/docs/hadoop_data_adapter/2.0.7/10_identifying_data_adapter_version.mdx index f3ed65efddf..fa6e51f1d5c 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/09_identifying_data_adapter_version.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/10_identifying_data_adapter_version.mdx @@ -1,9 +1,5 @@ --- title: "Identifying the Hadoop Foreign Data Wrapper Version" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/identifying_data_adapter_version.html" --- diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/11_uninstalling_the_hadoop_data_adapter.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/11_uninstalling_the_hadoop_data_adapter.mdx new file mode 100644 index 00000000000..2ce7ab1ca46 --- /dev/null +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/11_uninstalling_the_hadoop_data_adapter.mdx @@ -0,0 +1,29 @@ +--- +title: "Uninstalling the Hadoop Foreign Data Wrapper" +--- + + + +**Uninstalling an RPM Package** + +You can use the `yum remove` or `dnf remove` command to remove a package installed by `yum` or `dnf`. To remove a package, open a terminal window, assume superuser privileges, and enter the command: + +- On RHEL or CentOS 7: + + `yum remove edb-as-hdfs_fdw` + +> where `xx` is the server version number. + +- On RHEL or CentOS 8: + + `dnf remove edb-as-hdfs_fdw` + +> where `xx` is the server version number. + +**Uninstalling Hadoop Foreign Data Wrapper on a Debian or Ubuntu Host** + +- To uninstall Hadoop Foreign Data Wrapper on a Debian or Ubuntu host, invoke the following command. + + `apt-get remove edb-as-hdfs-fdw` + +> where `xx` is the server version number. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/index.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/index.mdx index f25c120ac1d..bb911fb9abe 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/index.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/index.mdx @@ -1,15 +1,5 @@ --- -title: "EDB Postgres Hadoop Foreign Data Wrapper" -directoryDefaults: - description: "EDB Postgres Hadoop Foreign Data Wrapper Version 2.0.7 Documentation and release notes." - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/index.html" - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/conclusion.html" - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/genindex.html" - - "/edb-docs/p/edb-postgres-hadoop-data-adapter/2.0.7" - - "/edb-docs/d/edb-postgres-hadoop-data-adapter/user-guides/user-guide/2.0.7/whats_new.html" +title: "Hadoop Foreign Data Wrapper Guide" --- The Hadoop Foreign Data Wrapper (`hdfs_fdw`) is a Postgres extension that allows you to access data that resides on a Hadoop file system from EDB Postgres Advanced Server. The foreign data wrapper makes the Hadoop file system a read-only data source that you can use with Postgres functions and utilities, or in conjunction with other data that resides on a Postgres host. @@ -20,6 +10,6 @@ This guide uses the term `Postgres` to refer to an instance of EDB Postgres Adva
-whats_new requirements_overview architecture_overview supported_authentication_methods installing_the_hadoop_data_adapter features_of_hdfs_fdw configuring_the_hadoop_data_adapter using_the_hadoop_data_adapter identifying_data_adapter_version conclusion +whats_new requirements_overview architecture_overview supported_authentication_methods installing_the_hadoop_data_adapter updating_the_hadoop_data_adapter features_of_hdfs_fdw configuring_the_hadoop_data_adapter using_the_hadoop_data_adapter identifying_data_adapter_version uninstalling_the_hadoop_data_adapter conclusion
diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/01_whats_new.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/01_whats_new.mdx index 837a008da9a..864a831e6ff 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.8/01_whats_new.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.8/01_whats_new.mdx @@ -1,9 +1,5 @@ --- title: "What’s New" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/whats_new.html" --- diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx index 1e51d9278d1..810cc135c95 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx @@ -1,14 +1,10 @@ --- title: "Requirements Overview" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/requirements_overview.html" --- ## Supported Versions -The MongoDB Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9.5 and above. +The MongoDB Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9.6 and above. ## Supported Platforms @@ -16,16 +12,12 @@ The MongoDB Foreign Data Wrapper is supported on the following platforms: **Linux x86-64** -- RHEL 8.x/7.x -- CentOS 8.x/7.x -- OEL 8.x/7.x -- Ubuntu 20.04/18.04 LTS -- Debian 10.x/9.x +> - RHEL 8.x/7.x +> - CentOS 8.x/7.x +> - OEL 8.x/7.x +> - Ubuntu 20.04/18.04 LTS +> - Debian 10.x/9.x **Linux on IBM Power8/9 (LE)** -- RHEL 7.x - -## Supported MongoDB C Driver - -The MongoDB Foreign Data Wrapper supports MongoDB C Driver version 1.17.x that is compatible with MongoDB 3.0 and above. However, the MongoDB Foreign Data Wrapper has been tested with the latest version of MongoDB i.e. 4.4. \ No newline at end of file +> - RHEL 7.x diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/03_architecture_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/03_architecture_overview.mdx index eb227f0c1b8..3e48035f7a0 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.8/03_architecture_overview.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.8/03_architecture_overview.mdx @@ -1,9 +1,5 @@ --- title: "Architecture Overview" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/architecture_overview.html" --- @@ -11,5 +7,3 @@ legacyRedirectsGenerated: The MongoDB data wrapper provides an interface between a MongoDB server and a Postgres database. It transforms a Postgres statement (`SELECT`/`INSERT`/`DELETE`/`UPDATE`) into a query that is understood by the MongoDB database. ![Using MongoDB FDW with Postgres](images/mongo_server_with_postgres.png) - -Using MongoDB FDW with Postgres diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/04_installing_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/04_installing_the_mongo_data_adapter.mdx index 80fed759824..4a5497b3f03 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.8/04_installing_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.8/04_installing_the_mongo_data_adapter.mdx @@ -1,9 +1,5 @@ --- title: "Installing the MongoDB Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/installing_the_mongo_data_adapter.html" --- @@ -29,19 +25,19 @@ Before installing the MongoDB Foreign Data Wrapper, you must install the followi Install the `epel-release` package: -```text - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm -``` +> ```text +> yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +> ``` Enable the optional, extras, and HA repositories: -```text - subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" -``` +> ```text +> subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" +> ``` You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials: @@ -53,9 +49,9 @@ After receiving your repository credentials: To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` +> ```text +> yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -63,22 +59,22 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing the MongoDB Foreign Data Wrapper** After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: -``` -yum install edb-as-mongo_fdw -``` +> ``` +> yum install edb-as-mongo_fdw +> ``` where `xx` is the server version number. @@ -94,20 +90,20 @@ Before installing the MongoDB Foreign Data Wrapper, you must install the followi Install the `epel-release` package: -```text -dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm -``` +> ```text +> dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm +> ``` Enable the `codeready-builder-for-rhel-8-\*-rpms` repository: -```text -ARCH=$( /bin/arch ) -subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" -``` +> ```text +> ARCH=$( /bin/arch ) +> subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" +> ``` You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials: @@ -119,9 +115,9 @@ After receiving your repository credentials: To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` +> ```text +> dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -129,22 +125,22 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing the MongoDB Foreign Data Wrapper** After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: -```text -dnf install edb-as-mongo_fdw -``` +> ```text +> dnf install edb-as-mongo_fdw +> ``` When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. @@ -158,16 +154,16 @@ Before installing the MongoDB Foreign Data Wrapper, you must install the followi Install the `epel-release` package: -```text -yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm -``` +> ```text +> yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +> ``` !!! Note You may need to enable the `[extras]` repository definition in the `CentOS-Base.repo` file (located in `/etc/yum.repos.d`). You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials you can: @@ -179,8 +175,9 @@ After receiving your repository credentials you can: To create the repository configuration file, assume superuser privileges, and invoke the following command: -````text -yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm> ``` +> ```text +> yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -188,22 +185,22 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -```` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing the MongoDB Foreign Data Wrapper** After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: -```text -yum install edb-as-mongo_fdw -``` +> ```text +> yum install edb-as-mongo_fdw +> ``` where `xx` is the server version number. @@ -219,19 +216,19 @@ Before installing the MongoDB Foreign Data Wrapper, you must install the followi Install the `epel-release` package: -```text -dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm -``` +> ```text +> dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm +> ``` Enable the `PowerTools` repository: -```text -dnf config-manager --set-enabled PowerTools -``` +> ```text +> dnf config-manager --set-enabled PowerTools +> ``` You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials: @@ -243,9 +240,9 @@ After receiving your repository credentials: To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` +> ```text +> dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -253,22 +250,22 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing the MongoDB Foreign Data Wrapper** After saving your changes to the configuration file, use the following command to install the MongoDB Foreign Data Wrapper: -```text -dnf install edb-as-mongo_fdw -``` +> ```text +> dnf install edb-as-mongo_fdw +> ``` where `xx` is the server version number. @@ -284,54 +281,54 @@ The following steps will walk you through using the EDB apt repository to instal 1. Assume superuser privileges: - ```text - sudo su – - ``` + > ```text + > sudo su – + > ``` 2. Configure the EDB repository: On Debian 9 and Ubuntu: - ```text - sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` + > ```text + > sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + > ``` On Debian 10: 1. Set up the EDB repository: - ```text - sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` + > ```text + > sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + > ``` - 2. Substitute your EDB credentials for the `username` and `password` in the following command: + 1. Substitute your EDB credentials for the `username` and `password` in the following command: - ```text - sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' - ``` + > ```text + > sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' + > ``` 3. Add support to your system for secure APT repositories: - ```text - apt-get install apt-transport-https - ``` + > ```text + > apt-get install apt-transport-https + > ``` 4. Add the EDB signing key: - ```text - wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - - ``` + > ```text + > wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - + > ``` 5. Update the repository metadata: - ```text - apt-get update - ``` + > ```text + > apt-get update + > ``` 6. Install the Debian package: - ```text - apt-get install edb-as-mongo-fdw - ``` + > ```text + > apt-get install edb-as-mongo-fdw + > ``` where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/05_features_of_mongo_fdw.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/05_features_of_mongo_fdw.mdx deleted file mode 100644 index 63a355304b4..00000000000 --- a/product_docs/docs/mongo_data_adapter/5.2.8/05_features_of_mongo_fdw.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Features of the MongoDB Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/features_of_mongo_fdw.html" ---- - - - -The key features of the MongoDB Foreign Data Wrapper are listed below: - -## Writable FDW - -The MongoDB Foreign Data Wrapper allows you to modify data on a MongoDB server. Users can `INSERT`, `UPDATE` and `DELETE` data in the remote MongoDB collections by inserting, updating and deleting data locally in foreign tables. See also: - -[Example: Using the MongoDB Foreign Data Wrapper](07_example_using_the_mongo_data_adapter/#example_using_the_mongo_data_adapter) - -[Data Type Mappings](06_configuring_the_mongo_data_adapter/#data-type-mappings) - -## Where Clause Push-down - -MongoDB Foreign Data Wrapper allows the push-down of `WHERE` clause only when clauses include comparison expressions that have a column and a constant as arguments. WHERE clause push-down is not supported where constant is an array. - -## Connection Pooling - -Mongo_FDW establishes a connection to a foreign server during the first query that uses a foreign table associated with the foreign server. This connection is kept and reused for subsequent queries in the same session. - -## Automated Cleanup - -The MongoDB Foreign Data Wrapper allows the cleanup of foreign tables in a single operation using the `DROP EXTENSION` command. This feature is especially useful when a foreign table has been created for a temporary purpose, as in the case of data migration. The syntax of a `DROP EXTENSION` command is: - - `DROP EXTENSION mongo_fdw CASCADE;` - -For more information, see [DROP EXTENSION](https://www.postgresql.org/docs/current/sql-dropextension.html). diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/05_updating_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/05_updating_the_mongo_data_adapter.mdx new file mode 100644 index 00000000000..aa29e7403f9 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.8/05_updating_the_mongo_data_adapter.mdx @@ -0,0 +1,37 @@ +--- +title: "Updating the MongoDB Foreign Data Wrapper" +--- + + + +**Updating an RPM Installation** + +If you have an existing RPM installation of MongoDB Foreign Data Wrapper, you can use yum or dnf to upgrade your repository configuration file and update to a more recent product version. To update the `edb.repo` file, assume superuser privileges and enter: + +- On RHEL or CentOS 7: + + > `yum upgrade edb-repo` + +- On RHEL or CentOS 8: + + > `dnf upgrade edb-repo` + +yum or dnf will update the `edb.repo` file to enable access to the current EDB repository, configured to connect with the credentials specified in your `edb.repo` file. Then, you can use yum or dnf to upgrade any installed packages: + +- On RHEL or CentOS 7: + + > `yum upgrade edb-as-mongo_fdw` + +- On RHEL or CentOS 8: + + > `dnf upgrade edb-as-mongo_fdw` + + where `xx` is the server version number. + +**Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** + +To update MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host, use the following command: + +> `apt-get --only-upgrade install edb-as-mongo-fdw edb-libmongoc` +> +> where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/06_features_of_mongo_fdw.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/06_features_of_mongo_fdw.mdx new file mode 100644 index 00000000000..aec81845037 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.8/06_features_of_mongo_fdw.mdx @@ -0,0 +1,71 @@ +--- +title: "Features of the MongoDB Foreign Data Wrapper" +--- + + + +The key features of the MongoDB Foreign Data Wrapper are listed below: + +## Writable FDW + +The MongoDB Foreign Data Wrapper allows you to modify data on a MongoDB server. Users can `INSERT`, `UPDATE` and `DELETE` data in the remote MongoDB collections by inserting, updating and deleting data locally in foreign tables. See also: + +[Example: Using the MongoDB Foreign Data Wrapper](08_example_using_the_mongo_data_adapter/#example_using_the_mongo_data_adapter) + +[Data Type Mappings](07_configuring_the_mongo_data_adapter/#data-type-mappings) + +## Where Clause Push-down + +MongoDB Foreign Data Wrapper allows the push-down of `WHERE` clause only when clauses include comparison expressions that have a column and a constant as arguments. WHERE clause push-down is not supported where constant is an array. + +## Connection Pooling + +Mongo_FDW establishes a connection to a foreign server during the first query that uses a foreign table associated with the foreign server. This connection is kept and reused for subsequent queries in the same session. + +## Automated Cleanup + +The MongoDB Foreign Data Wrapper allows the cleanup of foreign tables in a single operation using the `DROP EXTENSION` command. This feature is especially useful when a foreign table has been created for a temporary purpose. The syntax of a `DROP EXTENSION` command is: + +> `DROP EXTENSION mongo_fdw CASCADE;` + +For more information, see [DROP EXTENSION](https://www.postgresql.org/docs/current/sql-dropextension.html). + +## Full Document Retrieval + +This feature allows to retrieve documents along with all their fields from collection without any knowledge of the fields in BSON document available in MongoDB's collection. Those retrieved documents are in the JSON format. + +You can retrieve all available fields in a collection residing in MongoDB Foreign Data Wrapper as explained in the following example: + +**Example**: + +The collection in MongoDB Foreign Data Wrapper: + +```text +> db.warehouse.find(); +{ "_id" : ObjectId("58a1ebbaf543ec0b90545859"), "warehouse_id" : 1, "warehouse_name" : "UPS", "warehouse_created" : ISODate("2014-12-12T07:12:10Z") } +{ "_id" : ObjectId("58a1ebbaf543ec0b9054585a"), "warehouse_id" : 2, "warehouse_name" : "Laptop", "warehouse_created" : ISODate("2015-11-11T08:13:10Z") } +``` + +Steps for retrieving the document: + +1. Create foreign table with a column name `__doc`. The type of the column could be json, jsonb, text or varchar. + +```text +CREATE FOREIGN TABLE test_json(__doc json) SERVER mongo_server OPTIONS (database 'testdb', collection 'warehouse'); +``` + +1. Retrieve the document. + +```text +SELECT * FROM test_json ORDER BY __doc::text COLLATE "C"; +``` + +The output: + +```text +edb=#SELECT * FROM test_json ORDER BY __doc::text COLLATE "C"; + __doc --------------------------------------------------------------------------------------------------------------------------------------------------------- +{ "_id" : { "$oid" : "58a1ebbaf543ec0b90545859" }, "warehouse_id" : 1, "warehouse_name" : "UPS", "warehouse_created" : { "$date" : 1418368330000 } } +{ "_id" : { "$oid" : "58a1ebbaf543ec0b9054585a" }, "warehouse_id" : 2, "warehouse_name" : "Laptop", "warehouse_created" : { "$date" : 1447229590000 } } +(2 rows) +``` diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/06_configuring_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/07_configuring_the_mongo_data_adapter.mdx similarity index 50% rename from product_docs/docs/mongo_data_adapter/5.2.8/06_configuring_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5.2.8/07_configuring_the_mongo_data_adapter.mdx index 6633ac9328e..b04ccf9e345 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.8/06_configuring_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.8/07_configuring_the_mongo_data_adapter.mdx @@ -1,19 +1,15 @@ --- title: "Configuring the MongoDB Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/configuring_the_mongo_data_adapter.html" --- Before using the MongoDB Foreign Data Wrapper, you must: -1. Use the [CREATE EXTENSION](#create-extension) command to create the MongoDB Foreign Data Wrapper extension on the Postgres host. -2. Use the [CREATE SERVER](#create-server) command to define a connection to the MongoDB server. -3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. -4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Postgres database that corresponds to a database that resides on the MongoDB cluster. +> 1. Use the [CREATE EXTENSION](#create-extension) command to create the MongoDB Foreign Data Wrapper extension on the Postgres host. +> 2. Use the [CREATE SERVER](#create-server) command to define a connection to the MongoDB server. +> 3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. +> 4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a table in the Postgres database that corresponds to a database that resides on the MongoDB cluster. @@ -29,21 +25,21 @@ CREATE EXTENSION [IF NOT EXISTS] mongo_fdw [WITH] [SCHEMA schema_name]; `IF NOT EXISTS` - Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. +> Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. `schema_name` - Optionally specify the name of the schema in which to install the extension's objects. +> Optionally specify the name of the schema in which to install the extension's objects. **Example** The following command installs the MongoDB foreign data wrapper: - `CREATE EXTENSION mongo_fdw;` +> `CREATE EXTENSION mongo_fdw;` For more information about using the foreign data wrapper `CREATE EXTENSION` command, see: - . +> . @@ -62,23 +58,21 @@ The role that defines the server is the owner of the server; use the `ALTER SERV `server_name` - Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. +> Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. `FOREIGN_DATA_WRAPPER` - Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `mongo_fdw` foreign data wrapper when connecting to the cluster. +> Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `mongo_fdw` foreign data wrapper when connecting to the cluster. `OPTIONS` - Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server object. You can include: +> Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server object. You can include: | **Option** | **Description** | | ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | address | The address or hostname of the Mongo server. The default value is `127.0.0.1`. | | port | The port number of the Mongo Server. Valid range is 0 to 65535. The default value is `27017`. | | authentication_database | The database against which user will be authenticated. This option is only valid with password based authentication. | -| replica_set | The replica set the server is member of. If it is set, the driver will auto-connect to correct primary in the replica set when writing. | -| read_preference | The order of read preference. Options available are: primary \[default], secondary, primaryPreferred, secondaryPreferred, and nearest. | | ssl | Requests an authenticated, encrypted SSL connection. By default, the value is set to `false`. Set the value to `true` to enable ssl. See to understand the options. | | pem_file | SSL option | | pem_pwd | SSL option. | @@ -99,7 +93,7 @@ The foreign server uses the default port (`27017`) for the connection to the cli For more information about using the `CREATE SERVER` command, see: - +> @@ -118,25 +112,25 @@ You must be the owner of the foreign server to create a user mapping for that se `role_name` - Use `role_name` to specify the role that will be associated with the foreign server. +> Use `role_name` to specify the role that will be associated with the foreign server. `server_name` - Use `server_name` to specify the name of the server that defines a connection to the MongoDB cluster. +> Use `server_name` to specify the name of the server that defines a connection to the MongoDB cluster. `OPTIONS` - Use the `OPTIONS` clause to specify connection information for the foreign server. - - `username`: the name of the user on the MongoDB server. - - `password`: the password associated with the username. +> Use the `OPTIONS` clause to specify connection information for the foreign server. +> +> `username`: the name of the user on the MongoDB server. +> +> `password`: the password associated with the username. **Example** The following command creates a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mongo_server`: - `CREATE USER MAPPING FOR enterprisedb SERVER mongo_server;` +> `CREATE USER MAPPING FOR enterprisedb SERVER mongo_server;` If the database host uses secure authentication, provide connection credentials when creating the user mapping: @@ -148,7 +142,7 @@ The command creates a user mapping for a role named `enterprisedb` that is assoc For detailed information about the `CREATE USER MAPPING` command, see: - +> @@ -183,57 +177,57 @@ and `table_constraint` is: `table_name` - Specifies the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. +> Specifies the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. `IF NOT EXISTS` - Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. +> Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. `column_name` - Specifies the name of a column in the new table; each column should correspond to a column described on the MongoDB server. +> Specifies the name of a column in the new table; each column should correspond to a column described on the MongoDB server. `data_type` - Specifies the data type of the column; when possible, specify the same data type for each column on the Postgres server and the MongoDB server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the MongoDB server. If the server cannot identify a compatible data type, it will return an error. +> Specifies the data type of the column; when possible, specify the same data type for each column on the Postgres server and the MongoDB server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the MongoDB server. If the server cannot identify a compatible data type, it will return an error. `COLLATE collation` - Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. +> Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. `INHERITS (parent_table [, ... ])` - Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. +> Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. `CONSTRAINT constraint_name` - Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. +> Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. `NOT NULL` - Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. +> Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. `NULL` - Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. +> Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. `CHECK (expr) [NO INHERIT]` - Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. - - A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. - - Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. +> Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. +> +> A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. +> +> Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. `DEFAULT default_expr` - Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. +> Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. `SERVER server_name [OPTIONS (option 'value' [, ... ] ) ]` - To create a foreign table that will allow you to query a table that resides on a MongoDB file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the MongoDB data adapter. - - Use the `OPTIONS` clause to specify the following `options` and their corresponding values: +> To create a foreign table that will allow you to query a table that resides on a MongoDB file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the MongoDB data adapter. +> +> Use the `OPTIONS` clause to specify the following `options` and their corresponding values: | option | value | | ---------- | --------------------------------------------------------------------------------- | @@ -279,7 +273,7 @@ Include the `SERVER` clause to specify the name of the database stored on the Mo For more information about using the `CREATE FOREIGN TABLE` command, see: - +> !!! Note MongoDB foreign data wrapper supports the write capability feature. @@ -290,15 +284,154 @@ For more information about using the `CREATE FOREIGN TABLE` command, see: When using the foreign data wrapper, you must create a table on the Postgres server that mirrors the table that resides on the MongoDB server. The MongoDB data wrapper will automatically convert the following MongoDB data types to the target Postgres type: -| **MongoDB (BSON Type)** | **Postgres** | -| ----------------------- | -------------------------------------------- | -| ARRAY | JSON | -| BOOL | BOOL | -| BINARY | BYTEA | -| DATE_TIME | DATE/TIMESTAMP/TIMESTAMPTZ | -| DOCUMENT | JSON | -| DOUBLE | FLOAT/FLOAT4/FLOAT8/DOUBLE PRECISION/NUMERIC | -| INT32 | SMALLINT/INT2/INT/INTEGER/INT4 | -| INT64 | BIGINT/INT8 | -| OID | NAME | -| UTF8 | BPCHAR/VARCHAR/CHARCTER VARYING/TEXT | +| **MongoDB (BSON Type)** | **Postgres** | +| ---------------------------- | ---------------------------------------- | +| ARRAY JSON BOOL BOOL | | +| BINARY BYTE | A | +| DATE_TIME DATE DOCUMENT JSON | /TIMESTAMP/TIMESTAMPTZ | +| DOUBLE FLOA | T/FLOAT4/FLOAT8/DOUBLE PRECISION/NUMERIC | +| INT32 SMAL | LINT/INT2/INT/INTEGER/INT4 | +| INT64 BIGI OID NAME | NT/INT8 | +| UTF8 BPCH | AR/VARCHAR/CHARCTER VARYING/TEXT | + +## DROP EXTENSION + +Use the `DROP EXTENSION` command to remove an extension. To invoke the command, use your client of choice (for example, psql) to connect to the Postgres database from which you will be dropping the MongoDB server, and run the command: + +```text +DROP EXTENSION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]; +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the specified name doesn't exists. + +`name` + +> Specify the name of the installed extension. It is optional. +> +> `CASCADE` +> +> Automatically drop objects that depend on the extension. It drops all the other dependent objects too. +> +> `RESTRICT` +> +> Do not allow to drop extension if any objects, other than its member objects and extensions listed in the same DROP command are dependent on it. + +**Example** + +The following command removes the extension from the existing database: + +> `DROP EXTENSION mongo_fdw;` + +For more information about using the foreign data wrapper `DROP EXTENSION` command, see: + +> . + +## DROP SERVER + +Use the `DROP SERVER` command to remove a connection to a foreign server. The syntax is: + +```text +DROP SERVER [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +``` + +The role that drops the server is the owner of the server; use the `ALTER SERVER` command to reassign ownership of a foreign server. To drop a foreign server, you must have `USAGE` privilege on the foreign-data wrapper specified in the `DROP SERVER` command. + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if a server with the specified name doesn't exists. + +`name` + +> Specify the name of the installed server. It is optional. +> +> `CASCADE` +> +> Automatically drop objects that depend on the server. It should drop all the other dependent objects too. +> +> `RESTRICT` +> +> Do not allow to drop the server if any objects are dependent on it. + +**Example** + +The following command removes a foreign server named `mongo_server`: + +> `DROP SERVER mongo_server;` + +For more information about using the `DROP SERVER` command, see: + +> + +## DROP USER MAPPING + +Use the `DROP USER MAPPING` command to remove a mapping that associates a Postgres role with a foreign server. You must be the owner of the foreign server to remove a user mapping for that server. + +```text +DROP USER MAPPING [ IF EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } SERVER server_name; +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the user mapping doesn't exist. + +`user_name` + +> Specify the user name of the mapping. + +`server_name` + +> Specify the name of the server that defines a connection to the MongoDB cluster. + +**Example** + +The following command drops a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mongo_server`: + +> `DROP USER MAPPING FOR enterprisedb SERVER mongo_server;` + +For detailed information about the `DROP USER MAPPING` command, see: + +> + +## DROP FOREIGN TABLE + +A foreign table is a pointer to a table that resides on the MongoDB host. Use the `DROP FOREIGN TABLE` command to remove a foreign table. Only the owner of the foreign table can drop it. + +```text +DROP FOREIGN TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the foreign table with the specified name doesn't exists. + +`name` + +> Specify the name of the foreign table. + +`CASCADE` + +> Automatically drop objects that depend on the foreign table. It should drop all the other dependent objects too. + +`RESTRICT` + +> Do not allow to drop foreign table if any objects are dependent on it. + +**Example** + +```text +DROP FOREIGN TABLE warehouse; +``` + +For more information about using the `DROP FOREIGN TABLE` command, see: + +> diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/07_example_using_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/08_example_using_the_mongo_data_adapter.mdx similarity index 91% rename from product_docs/docs/mongo_data_adapter/5.2.8/07_example_using_the_mongo_data_adapter.mdx rename to product_docs/docs/mongo_data_adapter/5.2.8/08_example_using_the_mongo_data_adapter.mdx index e4e210ad8a6..38f2f35b122 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.8/07_example_using_the_mongo_data_adapter.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.8/08_example_using_the_mongo_data_adapter.mdx @@ -1,9 +1,5 @@ --- title: "Example: Using the MongoDB Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/example_using_the_mongo_data_adapter.html" --- @@ -20,7 +16,7 @@ CREATE SERVER mongo_server OPTIONS (address '127.0.0.1', port '27017'); -- create user mapping -CREATE USER MAPPING FOR postgres +CREATE USER MAPPING FOR enterprisedb SERVER mongo_server OPTIONS (username 'mongo_user', password 'mongo_pass'); @@ -105,4 +101,13 @@ EXPLAIN SELECT * FROM warehouse WHERE warehouse_id = 1; -- collect data distribution statistics ANALYZE warehouse; + +-- drop foreign table +DROP FOREIGN TABLE warehouse; + +-- drop user mapping +DROP USER MAPPING FOR enterprisedb SERVER mongo_server; + +-- drop server +DROP SERVER mongo_server; ``` diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/08_identifying_data_adapter_version.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/09_identifying_data_adapter_version.mdx similarity index 67% rename from product_docs/docs/mongo_data_adapter/5.2.8/08_identifying_data_adapter_version.mdx rename to product_docs/docs/mongo_data_adapter/5.2.8/09_identifying_data_adapter_version.mdx index 49886bdff87..b1d0564acc4 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.8/08_identifying_data_adapter_version.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.8/09_identifying_data_adapter_version.mdx @@ -1,9 +1,5 @@ --- title: "Identifying the MongoDB Foreign Data Wrapper Version" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/identifying_data_adapter_version.html" --- diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/10_limitations.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/10_limitations.mdx new file mode 100644 index 00000000000..acdd2f2383c --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.8/10_limitations.mdx @@ -0,0 +1,11 @@ +--- +title: "Limitations" +--- + + + +The following limitations apply to MongoDB Foreign Data Wrapper: + +- If the BSON document key contains uppercase letters or occurs within a nested document, MongoDB Foreign Data Wrapper requires the corresponding column names to be declared in double quotes. +- PostgreSQL limits column names to 63 characters by default. You can increase the `NAMEDATALEN` constant in `src/include/pg_config_manual.h`, compile, and re-install when column names extend beyond 63 characters. +- MongoDB Foreign Data Wrapper errors out on BSON field which is not listed in the known types (For example: byte, arrays). It throws an error: `Cannot convert BSON type to column type`. diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/11_uninstalling_the_mongo_data_adapter.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/11_uninstalling_the_mongo_data_adapter.mdx new file mode 100644 index 00000000000..8313284a962 --- /dev/null +++ b/product_docs/docs/mongo_data_adapter/5.2.8/11_uninstalling_the_mongo_data_adapter.mdx @@ -0,0 +1,27 @@ +--- +title: "Uninstalling the MongoDB Foreign Data Wrapper" +--- + + + +**Uninstalling an RPM Package** + +You can use the `yum remove` or `dnf remove` command to remove a package installed by `yum` or `dnf`. To remove a package, open a terminal window, assume superuser privileges, and enter the command: + +- On RHEL or CentOS 7: + + `yum remove edb-as-mongo_fdw` + +- On RHEL or CentOS 8: + + `dnf remove edb-as-mongo_fdw` + +Where `xx` is the server version number. + +**Uninstalling MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** + +- To uninstall MongoDB Foreign Data Wrapper on a Debian or Ubuntu host, invoke the following command. + + `apt-get remove edb-as-mongo-fdw` + +Where `xx` is the server version number. diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/index.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/index.mdx index 7103853ae39..5117f306aad 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.8/index.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.8/index.mdx @@ -1,15 +1,5 @@ --- title: "MongoDB Foreign Data Wrapper Guide" -directoryDefaults: - description: "EDB Postgres MongoDB Foreign Data Wrapper Version 5.2.8 Documentation and release notes." - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/index.html" - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/conclusion.html" - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/whats_new.html" - - "/edb-docs/d/edb-postgres-mongodb-data-adapter/user-guides/user-guide/5.2.8/genindex.html" - - "/edb-docs/p/edb-postgres-mongodb-data-adapter/5.2.8" --- The MongoDB Foreign Data Wrapper (`mongo_fdw`) is a Postgres extension that allows you to access data that resides on a MongoDB database from EDB Postgres Advanced Server. It is a writable foreign data wrapper that you can use with Postgres functions and utilities, or in conjunction with other data that resides on a Postgres host. @@ -20,6 +10,6 @@ This guide uses the term `Postgres` to refer to an instance of EDB Postgres Adva
-whats_new requirements_overview architecture_overview installing_the_mongo_data_adapter features_of_mongo_fdw configuring_the_mongo_data_adapter example_using_the_mongo_data_adapter identifying_data_adapter_version conclusion +whats_new requirements_overview architecture_overview installing_the_mongo_data_adapter updating_the_mongo_data_adapter features_of_mongo_fdw configuring_the_mongo_data_adapter example_using_the_mongo_data_adapter identifying_data_adapter_version limitations uninstalling_the_mongo_data_adapter conclusion
diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/01_whats_new.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/01_whats_new.mdx index 2a9e74f2cdb..f01d86b275d 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/01_whats_new.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/01_whats_new.mdx @@ -1,9 +1,5 @@ --- title: "What’s New" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/whats_new.html" --- diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx index d2727fac08f..3b8ed138c80 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx @@ -1,14 +1,10 @@ --- title: "Requirements Overview" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/requirements_overview.html" --- ## Supported Versions -The MySQL Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9.5 and above. +The MySQL Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9.6 and above. ## Supported Platforms @@ -16,12 +12,12 @@ The MySQL Foreign Data Wrapper is supported on the following platforms: **Linux x86-64** -- RHEL 8.x/7.x -- CentOS 8.x/7.x -- OEL 8.x/7.x -- Ubuntu 20.04/18.04 LTS -- Debian 10.x/9.x +> - RHEL 8.x/7.x +> - CentOS 8.x/7.x +> - OEL 8.x/7.x +> - Ubuntu 20.04/18.04 LTS +> - Debian 10.x/9.x **Linux on IBM Power8/9 (LE)** -- RHEL 7.x +> - RHEL 7.x diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/03_architecture_overview.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/03_architecture_overview.mdx index f855eea4741..fba2349b118 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/03_architecture_overview.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/03_architecture_overview.mdx @@ -1,9 +1,5 @@ --- title: "Architecture Overview" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/architecture_overview.html" --- @@ -11,5 +7,3 @@ legacyRedirectsGenerated: The MySQL data wrapper provides an interface between a MySQL server and a Postgres database. It transforms a Postgres statement (`SELECT`/`INSERT`/`DELETE`/`UPDATE`) into a query that is understood by the MySQL database. ![Using mysql_fdw with Postgres](images/mysql_server_with_postgres.png) - -Using mysql_fdw with Postgres diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/04_installing_the_mysql_data_adapter.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/04_installing_the_mysql_data_adapter.mdx index d971ed9e206..deffe4b7dba 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/04_installing_the_mysql_data_adapter.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/04_installing_the_mysql_data_adapter.mdx @@ -1,9 +1,5 @@ --- title: "Installing the MySQL Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/installing_the_mysql_data_adapter.html" --- @@ -29,19 +25,19 @@ Before installing the MySQL Foreign Data Wrapper, you must install the following Install the `epel-release` package: -```text -yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm -``` +> ```text +> yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +> ``` Enable the optional, extras, and HA repositories: -```text -subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" -``` +> ```text +> subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" +> ``` You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials you can: @@ -53,9 +49,9 @@ After receiving your repository credentials you can: To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` +> ```text +> yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -63,26 +59,26 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing MySQL Foreign Data Wrapper** After saving your changes to the configuration file, use the following command to install the MySQL Foreign Data Wrapper: -```text -yum install edb-as-mysql_fdw -``` +> ```text +> yum install edb-as-mysql_fdw +> ``` where `xx` is the server version number, and `x` is the supported release version number of MySQL. For example, to install MySQL 5.0 on RHEL 7: - `yum install edb-as-mysql5_fdw` +> `yum install edb-as-mysql5_fdw` !!! Note MySQL 8.0 and MySQL 5.0 RPMs are available for RHEL 7 platform. @@ -99,20 +95,20 @@ Before installing the MySQL Foreign Data Wrapper, you must install the following Install the `epel-release` package: -```text -dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm -``` +> ```text +> dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm +> ``` Enable the `codeready-builder-for-rhel-8-\*-rpms` repository: -```text -ARCH=$( /bin/arch ) -subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" -``` +> ```text +> ARCH=$( /bin/arch ) +> subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" +> ``` You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials you can: @@ -124,9 +120,9 @@ After receiving your repository credentials you can: To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` +> ```text +> dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -134,22 +130,22 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing MySQL Foreign Data Wrapper** After saving your changes to the configuration file, use the below command to install the MySQL Foreign Data Wrapper: -```text -dnf install edb-as-mysql8_fdw -``` +> ```text +> dnf install edb-as-mysql8_fdw +> ``` When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. @@ -163,16 +159,16 @@ Before installing the MySQL Foreign Data Wrapper, you must install the following Install the `epel-release` package: -```text -yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm -``` +> ```text +> yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +> ``` !!! Note You may need to enable the `[extras]` repository definition in the `CentOS-Base.repo` file (located in `/etc/yum.repos.d`). You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials you can: @@ -184,9 +180,9 @@ After receiving your repository credentials you can: To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` +> ```text +> yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -194,26 +190,26 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing MySQL Foreign Data Wrapper** After saving your changes to the configuration file, use the following command to install the MySQL Foreign Data Wrapper: -```text -yum install edb-as-mysql_fdw -``` +> ```text +> yum install edb-as-mysql_fdw +> ``` where `xx` is the server version number, and `x` is the supported release version number of MySQL. For example, to install MySQL 5.0 on CentOS 7: - `yum install edb-as-mysql5_fdw` +> `yum install edb-as-mysql5_fdw` !!! Note MySQL 8.0 and MySQL 5.0 RPMs are available for CentOS 7 platform. @@ -230,19 +226,19 @@ Before installing the MySQL Foreign Data Wrapper, you must install the following Install the `epel-release` package: -```text -dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm -``` +> ```text +> dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm +> ``` Enable the `PowerTools` repository: -```text -dnf config-manager --set-enabled PowerTools -``` +> ```text +> dnf config-manager --set-enabled PowerTools +> ``` You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: - +> After receiving your repository credentials you can: @@ -254,9 +250,9 @@ After receiving your repository credentials you can: To create the repository configuration file, assume superuser privileges, and invoke the following command: -```text -dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm -``` +> ```text +> dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +> ``` The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. @@ -264,22 +260,22 @@ The repository configuration file is named `edb.repo`. The file resides in `/etc After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. -```text -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` +> ```text +> [edb] +> name=EnterpriseDB RPMs $releasever - $basearch +> baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +> enabled=1 +> gpgcheck=1 +> gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +> ``` **Installing MySQL Foreign Data Wrapper** After saving your changes to the configuration file, use the following command to install the MySQL Foreign Data Wrapper: -```text -dnf install edb-as-mysql8_fdw -``` +> ```text +> dnf install edb-as-mysql8_fdw +> ``` where `xx` is the server version number. @@ -303,23 +299,23 @@ sudo su – On Debian 9: - ```text - sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` + > ```text + > sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + > ``` On Debian 10: 1. Set up the EDB repository: - ```text - sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' - ``` + > ```text + > sh -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + > ``` - 2. Substitute your EDB credentials for the `username` and `password` in the following command: + 1. Substitute your EDB credentials for the `username` and `password` in the following command: - ```text - sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' - ``` + > ```text + > sh -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' + > ``` 2. Add support to your system for secure APT repositories: @@ -327,20 +323,20 @@ sudo su – apt-get install apt-transport-https ``` -3. Add the EBD signing key: +1. Add the EBD signing key: ```text wget -q -O - https://username:password @apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - ``` -4. Update the repository metadata: +1. Update the repository metadata: ```text apt-get update ``` -5. Install DEB package: +1. Install DEB package: ```text apt-get install edb-as-mysql-fdw diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/05_updating_the_mysql_data_adapter.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/05_updating_the_mysql_data_adapter.mdx new file mode 100644 index 00000000000..962cbf3d2c6 --- /dev/null +++ b/product_docs/docs/mysql_data_adapter/2.5.5/05_updating_the_mysql_data_adapter.mdx @@ -0,0 +1,39 @@ +--- +title: "Updating the MySQL Foreign Data Wrapper" +--- + + + +**Updating an RPM Installation** + +If you have an existing RPM installation of MySQL Foreign Data Wrapper, you can use yum or dnf to upgrade your repository configuration file and update to a more recent product version. To update the `edb.repo` file, assume superuser privileges and enter: + +- On RHEL or CentOS 7: + + > `yum upgrade edb-repo` + +- On RHEL or CentOS 8: + + > `dnf upgrade edb-repo` + +yum or dnf will update the `edb.repo` file to enable access to the current EDB repository, configured to connect with the credentials specified in your `edb.repo` file. Then, you can use yum or dnf to upgrade any installed packages: + +- On RHEL or CentOS 7: + + > `yum upgrade edb-as-mysql_fdw` + + where `xx` is the server version number, and `x` is the supported release version number of MySQL. For example, to upgrade MySQL 5.0 on RHEL 7: + + > `yum upgrade edb-as-mysql5_fdw` + +- On RHEL or CentOS 8: + + > `dnf upgrade edb-as-mysql8_fdw` + +**Updating MongoDB Foreign Data Wrapper on a Debian or Ubuntu Host** + +To update MySQL Foreign Data Wrapper on a Debian or Ubuntu Host, use the following command: + +> `apt-get --only-upgrade install edb-as-mysql_fdw` + +where `xx` is the server version number, and `x` is the supported release version number of MySQL. diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/05_features_of_mysql_fdw.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/06_features_of_mysql_fdw.mdx similarity index 74% rename from product_docs/docs/mysql_data_adapter/2.5.5/05_features_of_mysql_fdw.mdx rename to product_docs/docs/mysql_data_adapter/2.5.5/06_features_of_mysql_fdw.mdx index 9bb16604d2d..be1ab179426 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/05_features_of_mysql_fdw.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/06_features_of_mysql_fdw.mdx @@ -1,9 +1,5 @@ --- title: "Features of the MySQL Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/features_of_mysql_fdw.html" --- @@ -19,9 +15,9 @@ MySQL Foreign Data Wrapper provides the write capability. Users can insert, upda See also: -[Example: Using the MySQL Foreign Data Wrapper](07_example_using_the_mysql_data_adapter/#example_using_the_mysql_data_adapter) +[Example: Using the MySQL Foreign Data Wrapper](08_example_using_the_mysql_data_adapter/#example_using_the_mysql_data_adapter) -[Data Type Mappings](06_configuring_the_mysql_data_adapter/#data-type-mappings) +[Data Type Mappings](07_configuring_the_mysql_data_adapter/#data-type-mappings) ## Connection Pooling @@ -45,12 +41,24 @@ MySQL Foreign Data Wrapper supports Import Foreign Schema which enables the loca See also: -[Example: Import Foreign Schema](08_example_import_foreign_schema/#example_import_foreign_schema) +[Example: Import Foreign Schema](09_example_import_foreign_schema/#example_import_foreign_schema) ## Automated Cleanup -MySQL Foreign Data Wrappper allows the cleanup of foreign tables in a single operation using `DROP EXTENSION` command. This feature is specifically useful when a foreign table is set for a temporary purpose, as in case of data migration. The syntax: +MySQL Foreign Data Wrappper allows the cleanup of foreign tables in a single operation using `DROP EXTENSION` command. This feature is specifically useful when a foreign table is set for a temporary purpose. The syntax: - `DROP EXTENSION mysql_fdw CASCADE;` +> `DROP EXTENSION mysql_fdw CASCADE;` For more information, see [DROP EXTENSION](https://www.postgresql.org/docs/current/sql-dropextension.html). + +## Join Push-down + +MySQL Foreign Data Wrapper supports join push-down. It pushes the joins between two foreign tables from the same remote MySQL server to a remote server, thereby enhancing the performance. + +!!! Note + - Currently, joins involving only relational and arithmetic operators in join-clauses are pushed down to avoid any potential join failure. + - Only the INNER and LEFT/RIGHT OUTER joins are supported. + +See also: + +[Example: Join Push-down](10_example_join_push_down/#example_join_push_down) diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/06_configuring_the_mysql_data_adapter.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/07_configuring_the_mysql_data_adapter.mdx similarity index 50% rename from product_docs/docs/mysql_data_adapter/2.5.5/06_configuring_the_mysql_data_adapter.mdx rename to product_docs/docs/mysql_data_adapter/2.5.5/07_configuring_the_mysql_data_adapter.mdx index b619fe1df01..f733e6dccf9 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/06_configuring_the_mysql_data_adapter.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/07_configuring_the_mysql_data_adapter.mdx @@ -1,19 +1,15 @@ --- title: "Configuring the MySQL Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/configuring_the_mysql_data_adapter.html" --- Before using the MySQL Foreign Data Wrapper, you must: -1. Use the [CREATE EXTENSION](#create-extension) command to create the MySQL Foreign Data Wrapper extension on the Postgres host. -2. Use the [CREATE SERVER](#create-server) command to define a connection to the MySQL server. -3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. -4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a single table in the Postgres database that corresponds to a table that resides on the MySQL server or use the [IMPORT FOREIGN SCHEMA](#import-foreign-schema) command to import multiple remote tables in the local schema. +> 1. Use the [CREATE EXTENSION](#create-extension) command to create the MySQL Foreign Data Wrapper extension on the Postgres host. +> 2. Use the [CREATE SERVER](#create-server) command to define a connection to the MySQL server. +> 3. Use the [CREATE USER MAPPING](#create-user-mapping) command to define a mapping that associates a Postgres role with the server. +> 4. Use the [CREATE FOREIGN TABLE](#create-foreign-table) command to define a single table in the Postgres database that corresponds to a table that resides on the MySQL server or use the [IMPORT FOREIGN SCHEMA](#import-foreign-schema) command to import multiple remote tables in the local schema. @@ -29,21 +25,21 @@ CREATE EXTENSION [IF NOT EXISTS] mysql_fdw [WITH] [SCHEMA schema_name]; `IF NOT EXISTS` - Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. +> Include the `IF NOT EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the same name already exists. `schema_name` - Optionally specify the name of the schema in which to install the extension's objects. +> Optionally specify the name of the schema in which to install the extension's objects. **Example** The following command installs the MySQL foreign data wrapper: - `CREATE EXTENSION mysql_fdw;` +> `CREATE EXTENSION mysql_fdw;` For more information about using the foreign data wrapper `CREATE EXTENSION` command, see: - . +> . @@ -62,15 +58,15 @@ The role that defines the server is the owner of the server; use the `ALTER SERV `server_name` - Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. +> Use `server_name` to specify a name for the foreign server. The server name must be unique within the database. `FOREIGN_DATA_WRAPPER` - Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `mysql_fdw` foreign data wrapper when connecting to the cluster. +> Include the `FOREIGN_DATA_WRAPPER` clause to specify that the server should use the `mysql_fdw` foreign data wrapper when connecting to the cluster. `OPTIONS` - Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server. You can include: +> Use the `OPTIONS` clause of the `CREATE SERVER` command to specify connection information for the foreign server. You can include: | Option | Description | | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -97,7 +93,7 @@ The foreign server uses the default port (`3306`) for the connection to the clie For more information about using the `CREATE SERVER` command, see: - +> @@ -116,25 +112,25 @@ You must be the owner of the foreign server to create a user mapping for that se `role_name` - Use `role_name` to specify the role that will be associated with the foreign server. +> Use `role_name` to specify the role that will be associated with the foreign server. `server_name` - Use `server_name` to specify the name of the server that defines a connection to the MySQL cluster. +> Use `server_name` to specify the name of the server that defines a connection to the MySQL cluster. `OPTIONS` - Use the `OPTIONS` clause to specify connection information for the foreign server. - - `username`: the name of the user on the MySQL server. - - `password`: the password associated with the username. +> Use the `OPTIONS` clause to specify connection information for the foreign server. +> +> `username`: the name of the user on the MySQL server. +> +> `password`: the password associated with the username. **Example** The following command creates a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mysql_server`: - `CREATE USER MAPPING FOR enterprisedb SERVER mysql_server;` +> `CREATE USER MAPPING FOR enterprisedb SERVER mysql_server;` If the database host uses secure authentication, provide connection credentials when creating the user mapping: @@ -146,7 +142,7 @@ The command creates a user mapping for a role named `public` that is associated For detailed information about the `CREATE USER MAPPING` command, see: - +> @@ -181,57 +177,57 @@ and `table_constraint` is: `table_name` - Specifies the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. +> Specifies the name of the foreign table; include a schema name to specify the schema in which the foreign table should reside. `IF NOT EXISTS` - Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. +> Include the `IF NOT EXISTS` clause to instruct the server to not throw an error if a table with the same name already exists; if a table with the same name exists, the server will issue a notice. `column_name` - Specifies the name of a column in the new table; each column should correspond to a column described on the MySQL server. +> Specifies the name of a column in the new table; each column should correspond to a column described on the MySQL server. `data_type` - Specifies the data type of the column; when possible, specify the same data type for each column on the Postgres server and the MySQL server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the MySQL server. If the server cannot identify a compatible data type, it will return an error. +> Specifies the data type of the column; when possible, specify the same data type for each column on the Postgres server and the MySQL server. If a data type with the same name is not available, the Postgres server will attempt to cast the data type to a type compatible with the MySQL server. If the server cannot identify a compatible data type, it will return an error. `COLLATE collation` - Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. +> Include the `COLLATE` clause to assign a collation to the column; if not specified, the column data type's default collation is used. `INHERITS (parent_table [, ... ])` - Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. +> Include the `INHERITS` clause to specify a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. `CONSTRAINT constraint_name` - Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. +> Specify an optional name for a column or table constraint; if not specified, the server will generate a constraint name. `NOT NULL` - Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. +> Include the `NOT NULL` keywords to indicate that the column is not allowed to contain null values. `NULL` - Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. +> Include the `NULL` keywords to indicate that the column is allowed to contain null values. This is the default. `CHECK (expr) [NO INHERIT]` - Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. - - A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. - - Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. +> Use the `CHECK` clause to specify an expression that produces a Boolean result that each row in the table must satisfy. A check constraint specified as a column constraint should reference that column's value only, while an expression appearing in a table constraint can reference multiple columns. +> +> A `CHECK` expression cannot contain subqueries or refer to variables other than columns of the current row. +> +> Include the `NO INHERIT` keywords to specify that a constraint should not propagate to child tables. `DEFAULT default_expr` - Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. +> Include the `DEFAULT` clause to specify a default data value for the column whose column definition it appears within. The data type of the default expression must match the data type of the column. `SERVER server_name [OPTIONS (option 'value' [, ... ] ) ]` - To create a foreign table that will allow you to query a table that resides on a MySQL file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the MySQL data adapter. - - Use the `OPTIONS` clause to specify the following `options` and their corresponding values: +> To create a foreign table that will allow you to query a table that resides on a MySQL file system, include the `SERVER` clause and specify the `server_name` of the foreign server that uses the MySQL data adapter. +> +> Use the `OPTIONS` clause to specify the following `options` and their corresponding values: | option | value | | ------------- | ---------------------------------------------------------------------------------------- | @@ -267,7 +263,7 @@ Include the `SERVER` clause to specify the name of the database stored on the My For more information about using the `CREATE FOREIGN TABLE` command, see: - +> !!! Note MySQL foreign data wrapper supports the write capability feature. @@ -296,9 +292,11 @@ When using the foreign data wrapper, you must create a table on the Postgres ser !!! Note For `ENUM` data type: -MySQL accepts `enum` value in string form. You must create exactly same `enum` listing on Advanced Server as that is present on MySQL server. Any sort of inconsistency will result in an error while fetching rows with values not known on the local server. + MySQL accepts `enum` value in string form. You must create exactly same `enum` listing on Advanced Server as that is present on MySQL server. Any sort of inconsistency will result in an error while fetching rows with values not known on the local server. -Also, when the given `enum` value is not present at MySQL side but present at Postgres/Advanced Server side, an empty string (`''`) is inserted as a value at MySQL side for the `enum` column. To select from such a table having enum value as `''`, create an `enum` type at Postgres side with all valid values and `''`. + Also, when the given `enum` value is not present at MySQL side but present at Postgres/Advanced Server side, an empty string (`''`) is inserted as a value at MySQL side for the `enum` column. To select from such a table having enum value as `''`, create an `enum` type at Postgres side with all valid values and `''`. + + ## IMPORT FOREIGN SCHEMA @@ -316,32 +314,38 @@ IMPORT FOREIGN SCHEMA remote_schema `remote_schema` - Specifies the remote schema (MySQL database) to import from. +> Specifies the remote schema (MySQL database) to import from. `LIMIT TO ( table_name [, ...] )` - By default, all views and tables existing in a particular database on the MySQL host are imported. Using this option, you can limit the list of tables to a specified subset. +> By default, all views and tables existing in a particular database on the MySQL host are imported. Using this option, you can limit the list of tables to a specified subset. `EXCEPT ( table_name [, ...] )` - By default, all views and tables existing in a particular database on the MySQL host are imported. Using this option, you can exclude specified foreign tables from the import. +> By default, all views and tables existing in a particular database on the MySQL host are imported. Using this option, you can exclude specified foreign tables from the import. `SERVER server_name` - Specify the name of server to import foreign tables from. +> Specify the name of server to import foreign tables from. `local_schema` - Specify the name of local schema where the imported foreign tables must be created. +> Specify the name of local schema where the imported foreign tables must be created. `OPTIONS` - Use the `OPTIONS` clause to specify the following `options` and their corresponding values: - -| **Option** | **Description** | -| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | -| import_default | Controls whether column `DEFAULT` expressions are included in the definitions of foreign tables imported from a foreign server. The default is `false`. | -| import_not_null | Controls whether column `NOT NULL` constraints are included in the definitions of foreign tables imported from a foreign server. The default is `true`. | +> Use the `OPTIONS` clause to specify the following `options` and their corresponding values: +> +> >
+> > +> > Y{0.6}\| +> > +> >
+> > +> > | **Option** | **Description** | +> > | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | +> > | import_default | Controls whether column `DEFAULT` expressions are included in the definitions of foreign tables imported from a foreign server. The default is `false`. | +> > | import_not_null | Controls whether column `NOT NULL` constraints are included in the definitions of foreign tables imported from a foreign server. The default is `true`. | **Example** @@ -385,4 +389,146 @@ The command imports table definitions from a remote schema `edb` on server `mysq For more information about using the `IMPORT FOREIGN SCHEMA` command, see: - +> + +## DROP EXTENSION + +Use the `DROP EXTENSION` command to remove an extension. To invoke the command, use your client of choice (for example, psql) to connect to the Postgres database from which you will be dropping the MySQL server, and run the command: + +```text +DROP EXTENSION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]; +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if an extension with the specified name doesn't exists. + +`name` + +> Specify the name of the installed extension. It is optional. +> +> `CASCADE` +> +> Automatically drop objects that depend on the extension. It drops all the other dependent objects too. +> +> `RESTRICT` +> +> Do not allow to drop extension if any objects, other than its member objects and extensions listed in the same DROP command are dependent on it. + +**Example** + +The following command removes the extension from the existing database: + +> `DROP EXTENSION mysql_fdw;` + +For more information about using the foreign data wrapper `DROP EXTENSION` command, see: + +> . + +## DROP SERVER + +Use the `DROP SERVER` command to remove a connection to a foreign server. The syntax is: + +```text +DROP SERVER [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +``` + +The role that drops the server is the owner of the server; use the `ALTER SERVER` command to reassign ownership of a foreign server. To drop a foreign server, you must have `USAGE` privilege on the foreign-data wrapper specified in the `DROP SERVER` command. + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if a server with the specified name doesn't exists. + +`name` + +> Specify the name of the installed server. It is optional. +> +> `CASCADE` +> +> Automatically drop objects that depend on the server. It should drop all the other dependent objects too. +> +> `RESTRICT` +> +> Do not allow to drop the server if any objects are dependent on it. + +**Example** + +The following command removes a foreign server named `mysql_server`: + +> `DROP SERVER mysql_server;` + +For more information about using the `DROP SERVER` command, see: + +> + +## DROP USER MAPPING + +Use the `DROP USER MAPPING` command to remove a mapping that associates a Postgres role with a foreign server. You must be the owner of the foreign server to remove a user mapping for that server. + +```text +DROP USER MAPPING [ IF EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } SERVER server_name; +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the user mapping doesn't exist. + +`user_name` + +> Specify the user name of the mapping. + +`server_name` + +> Specify the name of the server that defines a connection to the MySQL cluster. + +**Example** + +The following command drops a user mapping for a role named `enterprisedb`; the mapping is associated with a server named `mysql_server`: + +> `DROP USER MAPPING FOR enterprisedb SERVER mysql_server;` + +For detailed information about the `DROP USER MAPPING` command, see: + +> + +## DROP FOREIGN TABLE + +A foreign table is a pointer to a table that resides on the MySQL host. Use the `DROP FOREIGN TABLE` command to remove a foreign table. Only the owner of the foreign table can drop it. + +```text +DROP FOREIGN TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +``` + +**Parameters** + +`IF EXISTS` + +> Include the `IF EXISTS` clause to instruct the server to issue a notice instead of throwing an error if the foreign table with the specified name doesn't exists. + +`name` + +> Specify the name of the foreign table. + +`CASCADE` + +> Automatically drop objects that depend on the foreign table. It should drop all the other dependent objects too. + +`RESTRICT` + +> Do not allow to drop foreign table if any objects are dependent on it. + +**Example** + +```text +DROP FOREIGN TABLE warehouse; +``` + +For more information about using the `DROP FOREIGN TABLE` command, see: + +> diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/07_example_using_the_mysql_data_adapter.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/08_example_using_the_mysql_data_adapter.mdx similarity index 88% rename from product_docs/docs/mysql_data_adapter/2.5.5/07_example_using_the_mysql_data_adapter.mdx rename to product_docs/docs/mysql_data_adapter/2.5.5/08_example_using_the_mysql_data_adapter.mdx index f7736cb31e8..6c802b4334a 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/07_example_using_the_mysql_data_adapter.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/08_example_using_the_mysql_data_adapter.mdx @@ -1,9 +1,5 @@ --- title: "Example: Using the MySQL Foreign Data Wrapper" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/example_using_the_mysql_data_adapter.html" --- @@ -20,7 +16,7 @@ CREATE SERVER mysql_server OPTIONS (host '127.0.0.1', port '3306'); -- create user mapping -CREATE USER MAPPING FOR postgres +CREATE USER MAPPING FOR enterprisedb SERVER mysql_server OPTIONS (username 'foo', password 'bar'); -- create foreign table @@ -64,4 +60,13 @@ Limit (cost=10.00..11.00 rows=1 width=36) Output: warehouse_id, warehouse_name Local server startup cost: 10 Remote query: SELECT `warehouse_id`, `warehouse_name` FROM `db`.`warehouse` WHERE ((`warehouse_name` LIKE BINARY 'TV')) + +-- drop foreign table +DROP FOREIGN TABLE warehouse; + +-- drop user mapping +DROP USER MAPPING FOR enterprisedb SERVER mysql_server; + +-- drop server +DROP SERVER mysql_server; ``` diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/08_example_import_foreign_schema.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/09_example_import_foreign_schema.mdx similarity index 77% rename from product_docs/docs/mysql_data_adapter/2.5.5/08_example_import_foreign_schema.mdx rename to product_docs/docs/mysql_data_adapter/2.5.5/09_example_import_foreign_schema.mdx index 57ca4675a9a..04384d09b1a 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/08_example_import_foreign_schema.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/09_example_import_foreign_schema.mdx @@ -1,9 +1,5 @@ --- title: "Example: Import Foreign Schema" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/example_import_foreign_schema.html" --- diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/09_identifying_data_adapter_version.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/10_identifying_data_adapter_version.mdx similarity index 67% rename from product_docs/docs/mysql_data_adapter/2.5.5/09_identifying_data_adapter_version.mdx rename to product_docs/docs/mysql_data_adapter/2.5.5/10_identifying_data_adapter_version.mdx index f458cdca77e..668f438a89f 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/09_identifying_data_adapter_version.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/10_identifying_data_adapter_version.mdx @@ -1,9 +1,5 @@ --- title: "Identifying the MySQL Foreign Data Wrapper Version" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/identifying_data_adapter_version.html" --- diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/11_uninstalling_the_mysql_data_adapter.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/11_uninstalling_the_mysql_data_adapter.mdx new file mode 100644 index 00000000000..188ecceb2b0 --- /dev/null +++ b/product_docs/docs/mysql_data_adapter/2.5.5/11_uninstalling_the_mysql_data_adapter.mdx @@ -0,0 +1,31 @@ +--- +title: "Uninstalling the MySQL Foreign Data Wrapper" +--- + + + +**Uninstalling an RPM Package** + +You can use the `yum remove` or `dnf remove` command to remove a package installed by `yum` or `dnf`. To remove a package, open a terminal window, assume superuser privileges, and enter the command: + +- On RHEL or CentOS 7: + + `yum remove edb-as-mysql_fdw` + +> where `xx` is the server version number, and `x` is the supported release version number of MySQL. For example, to uninstall MySQL 5.0 on RHEL 7: +> +> > `yum remove edb-as-mysql5_fdw` + +- On RHEL or CentOS 8: + + `dnf remove edb-as-mysql8_fdw` + +Where `xx` is the server version number. + +**Uninstalling MySQL Foreign Data Wrapper on a Debian or Ubuntu Host** + +- To uninstall MySQL Foreign Data Wrapper on a Debian or Ubuntu host, invoke the following command. + + `apt-get remove edb-as-mysql-fdw` + +Where `xx` is the server version number. diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/10_troubleshooting.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/12_troubleshooting.mdx similarity index 70% rename from product_docs/docs/mysql_data_adapter/2.5.5/10_troubleshooting.mdx rename to product_docs/docs/mysql_data_adapter/2.5.5/12_troubleshooting.mdx index 4a108a08ddf..2b4ba0109ea 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/10_troubleshooting.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/12_troubleshooting.mdx @@ -1,9 +1,5 @@ --- title: "Troubleshooting" - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/troubleshooting.html" --- In case you are experiencing issues with using MySQL 8 and MySQL_FDW, below is a list of solutions to some frequently seen issues: @@ -16,7 +12,7 @@ ERROR: failed to connect to MySQL: Authentication plugin ‘caching_sha2_passwo Specify the authentication plugin as `mysql_native_password` and set a cleartext password value. The syntax: - `ALTER USER 'username'@'host' IDENTIFIED WITH mysql_native_password BY '';` +> `ALTER USER 'username'@'host' IDENTIFIED WITH mysql_native_password BY '';` !!! Note Refer to [MySQL 8 documentation](https://dev.mysql.com/doc/refman/8.0/en/upgrading-from-previous-series.html) for more details on the above error. diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/index.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/index.mdx index 601da6cd10a..addc86a73b1 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/index.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/index.mdx @@ -1,15 +1,5 @@ --- title: "MySQL Foreign Data Wrapper Guide" -directoryDefaults: - description: "EDB Postgres MySQL Foreign Data Wrapper Version 2.5.5 Documentation and release notes." - -legacyRedirectsGenerated: - # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/index.html" - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/conclusion.html" - - "/edb-docs/p/edb-postgres-mysql-data-adapter/2.5.5" - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/whats_new.html" - - "/edb-docs/d/edb-postgres-mysql-data-adapter/user-guides/user-guide/2.5.5/genindex.html" --- The MySQL Foreign Data Wrapper (`mysql_fdw`) is a Postgres extension that allows you to access data that resides on a MySQL database from EDB Postgres Advanced Server. It is a writable foreign data wrapper that you can use with Postgres functions and utilities, or in conjunction with other data that resides on a Postgres host. @@ -20,6 +10,6 @@ This guide uses the term `Postgres` to refer to an instance of EDB Postgres Adva
-whats_new requirements_overview architecture_overview installing_the_mysql_data_adapter features_of_mysql_fdw configuring_the_mysql_data_adapter example_using_the_mysql_data_adapter example_import_foreign_schema identifying_data_adapter_version troubleshooting conclusion +whats_new requirements_overview architecture_overview installing_the_mysql_data_adapter updating_the_mysql_data_adapter features_of_mysql_fdw configuring_the_mysql_data_adapter example_using_the_mysql_data_adapter example_import_foreign_schema example_join_push_down identifying_data_adapter_version uninstalling_the_mysql_data_adapter troubleshooting conclusion
From 92b8bdb740df7d3c896db9483de05860f7a12af9 Mon Sep 17 00:00:00 2001 From: Jon Ericson <83660216+jericson-edb@users.noreply.github.com> Date: Thu, 20 May 2021 14:41:29 -0700 Subject: [PATCH 2/5] Release/2021 05 20 (#1405) * DB-1232 Installation guides needs update for new parameter ' repo_gpgcheck = 1' in new edb.repo rpm file * Fix conversion error on md file with leading whitespace * New PDFs generated by Github Actions * link spans two lines and wasn't rewritten Co-authored-by: sheetal Co-authored-by: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Co-authored-by: drothery-edb <83650384+drothery-edb@users.noreply.github.com> Co-authored-by: josh-heyer Former-commit-id: 92f3b07cc52ade39f3d9a557bb5caf531a62774d --- .../cloud_native_postgresql/monitoring.mdx | 4 +- .../02_rpm_installation.mdx | 16 +----- .../13/edb_plus/03_installing_edb_plus.mdx | 54 +++---------------- .../source/source_cloud_native_operator.py | 4 ++ 4 files changed, 12 insertions(+), 66 deletions(-) diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx index 79a5321fc14..47340c1ac55 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx @@ -1,4 +1,3 @@ - --- title: 'Monitoring Instances' originalFilePath: 'src/monitoring.md' @@ -120,8 +119,7 @@ data: description: "Replication lag behind primary in seconds" ``` -A list of basic monitoring queries can be found in the [`cnp-basic-monitoring.yaml` file]( -./samples/cnp-basic-monitoring.yaml). +A list of basic monitoring queries can be found in the [`cnp-basic-monitoring.yaml` file](../samples/cnp-basic-monitoring.yaml). ### Structure of a user defined metric diff --git a/product_docs/docs/epas/12/edb_plus/03_installing_edb_plus.mdx/02_rpm_installation.mdx b/product_docs/docs/epas/12/edb_plus/03_installing_edb_plus.mdx/02_rpm_installation.mdx index 89f035a63b9..d22804277bd 100644 --- a/product_docs/docs/epas/12/edb_plus/03_installing_edb_plus.mdx/02_rpm_installation.mdx +++ b/product_docs/docs/epas/12/edb_plus/03_installing_edb_plus.mdx/02_rpm_installation.mdx @@ -27,21 +27,7 @@ To create the repository configuration file, assume superuser privileges, and in dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm ``` -The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. - -After creating the `edb.repo` file, use your choice of editor to ensure that the value of the enabled parameter is 1, and -replace the `username` and `password` placeholders in the baseurl specification with the name and password of a -registered EDB user. - -``` -[edb] -name=EnterpriseDB RPMs $releasever - $basearch -baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY -``` -After saving your changes to the configuration file, you can use the following command to install EDB*Plus: +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. After saving your changes to the configuration file, you can use the following command to install EDB*Plus: - On RHEL or CentOS 7: diff --git a/product_docs/docs/epas/13/edb_plus/03_installing_edb_plus.mdx b/product_docs/docs/epas/13/edb_plus/03_installing_edb_plus.mdx index 18aca812661..7780508d9a3 100644 --- a/product_docs/docs/epas/13/edb_plus/03_installing_edb_plus.mdx +++ b/product_docs/docs/epas/13/edb_plus/03_installing_edb_plus.mdx @@ -86,21 +86,7 @@ You can use an RPM package to install EDB\*Plus on a CentOS host. dnf config-manager --set-enabled PowerTools ``` - The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. - -- After creating the `edb.repo` file, use your choice of editor to ensure that the value of the enabled parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/ - rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` - -- After saving your changes to the configuration file, you can use the following command to install EDB\*Plus: +- The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. After saving your changes to the configuration file, you can use the following command to install EDB\*Plus: On CentOS 7: @@ -174,21 +160,7 @@ You can use an RPM package to install EDB\*Plus on a RHEL host. subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" ``` - The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. - -- After creating the `edb.repo` file, use your choice of editor to ensure that the value of the enabled parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/ - rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` - -- After saving your changes to the configuration file, you can use the following command to install EDB\*Plus: +- The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. After saving your changes to the configuration file, you can use the following command to install EDB\*Plus: On RHEL 7: @@ -222,7 +194,7 @@ You can use an RPM package to install EDB\*Plus on a CentOS or RHEL 7 ppc64le ho The repository configuration file is named `advance-toolchain.repo`. The file resides in `/etc/yum.repos.d`. -- After creating the `advance-toolchain.repo` file, use your choice of editor to set the value of the `enabled` parameter to `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the registered EDB username and password. +- After creating the `advance-toolchain.repo` file, the `enabled` parameter is set to `1` by default. ```text [advance-toolchain] @@ -244,7 +216,7 @@ You can use an RPM package to install EDB\*Plus on a CentOS or RHEL 7 ppc64le ho yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm ``` -- Replace the `USERNAME:PASSWORD` in the following command with the username and password of a registered EDB user: +- Replace the `USERNAME:PASSWORD` variable in the following command with the username and password of a registered EDB user: ```text sed -i "s@:@USERNAME:PASSWORD@" /etc/yum.repos.d/edb.repo @@ -266,21 +238,7 @@ You can use an RPM package to install EDB\*Plus on a CentOS or RHEL 7 ppc64le ho subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" ``` - The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. - -- After creating the `edb.repo` file, use your choice of editor to ensure that the value of the enabled parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. - - ```text - [edb] - name=EnterpriseDB RPMs $releasever - $basearch - baseurl=https://:@yum.enterprisedb.com/edb/redhat/ - rhel-$releasever-$basearch - enabled=1 - gpgcheck=1 - gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY - ``` - -- After saving your changes to the configuration file, you can use the following command to install EDB\*Plus: +- The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. After saving your changes to the configuration file, you can use the following command to install EDB\*Plus: ```text yum -y install edb-asxx-edbplus @@ -334,7 +292,7 @@ The following steps will walk you through using the EDB apt repository to instal apt-get -y install apt-transport-https ``` -- Add the EBD signing key: +- Add the EDB signing key: ```text wget -q -O - https://apt.enterprisedb.com/edb-deb.gpg.key | sudo apt-key add - diff --git a/scripts/source/source_cloud_native_operator.py b/scripts/source/source_cloud_native_operator.py index 63c7c0ada85..f1c58c8449e 100644 --- a/scripts/source/source_cloud_native_operator.py +++ b/scripts/source/source_cloud_native_operator.py @@ -79,6 +79,10 @@ def process_md(file_path): if new_file_path.name == "index.mdx" else "", ) + elif not line.strip(): + line = "" + else: + print("File does not begin with title - frontmatter will not be valid: " + file_path) new_file.write(line) From fdc67be64f592143ceb284bb58a854e9c5f11773 Mon Sep 17 00:00:00 2001 From: Jon Ericson <83660216+jericson-edb@users.noreply.github.com> Date: Thu, 3 Jun 2021 11:31:32 -0700 Subject: [PATCH 3/5] Release/2021 06 03 (#1441) * Bump dns-packet from 1.3.1 to 1.3.4 Bumps [dns-packet](https://github.com/mafintosh/dns-packet) from 1.3.1 to 1.3.4. - [Release notes](https://github.com/mafintosh/dns-packet/releases) - [Changelog](https://github.com/mafintosh/dns-packet/blob/master/CHANGELOG.md) - [Commits](https://github.com/mafintosh/dns-packet/compare/v1.3.1...v1.3.4) Signed-off-by: dependabot[bot] * Updated files checked in for the upcoming ODBC release * Update for EV-259 * Update for EV-259 * New PDFs generated by Github Actions * New PDFs generated by Github Actions * pgBackRest multi-repo feature (#1264) (#1442) * initial draft for multi repo feature * Updated: 2021-04-20 12:16:44 * missing history files rephrasal * fix after review * fix after another review * various updates after review * various updates after review Co-authored-by: Stefan Fercot Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Abhilasha Narendra Co-authored-by: drothery-edb <83650384+drothery-edb@users.noreply.github.com> Co-authored-by: josh-heyer Co-authored-by: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Co-authored-by: Stefan Fercot Former-commit-id: 03cbddbc4fac5f64baef10bb5b62315d00c11cfc --- .../pgbackrest/08-multiple-repositories.mdx | 592 ++++++++++++++ .../42.2.12.3/02_requirements_overview.mdx | 2 +- .../42.2.19.1/02_requirements_overview.mdx | 2 +- .../odbc_connector/13.0.0.1/01_whats_new.mdx | 10 + .../13.0.0.1/02_requirements_overview.mdx | 40 + .../01_installing_edb-odbc.mdx | 518 ++++++++++++ .../13.0.0.1/03_edb-odbc_overview/index.mdx | 26 + .../13.0.0.1/04_creating_a_data_source.mdx | 37 + .../05_edb-odbc_connection_properties.mdx | 300 +++++++ .../06_edb-odbc_driver_functionality.mdx | 753 ++++++++++++++++++ .../13.0.0.1/07_scram_compatibility.mdx | 13 + .../13.0.0.1/images/EDB_logo.png | 3 + .../13.0.0.1/images/advanced_options_1.png | 3 + .../images/connection_is_successful.png | 3 + .../images/create_new_data_source.png | 3 + .../13.0.0.1/images/data_source_names.png | 3 + .../images/data_source_properties_window.png | 3 + .../images/define_the_data_source.png | 3 + .../images/driver_properties_window.png | 3 + .../13.0.0.1/images/global_settings.png | 3 + .../images/installed_edb-odbc_driver.png | 3 + .../13.0.0.1/images/new_driver_definition.png | 3 + .../images/odbc_advanced_options_2.png | 3 + .../images/odbc_installation_complete.png | 3 + .../images/odbc_installation_dialog.png | 3 + .../images/odbc_installation_wizard.png | 3 + .../13.0.0.1/images/ready_to_install.png | 3 + .../select_driver_named_date_source.png | 3 + .../selecting_the_connectors_installer.png | 3 + .../images/starting_stackbuilder_plus.png | 3 + .../unixodbc_data_source_administrator.png | 3 + .../windows_data_source_administrator.png | 3 + .../docs/odbc_connector/13.0.0.1/index.mdx | 23 + yarn.lock | 6 +- 34 files changed, 2380 insertions(+), 5 deletions(-) create mode 100644 advocacy_docs/supported-open-source/pgbackrest/08-multiple-repositories.mdx create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/01_whats_new.mdx create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/02_requirements_overview.mdx create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/03_edb-odbc_overview/01_installing_edb-odbc.mdx create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/03_edb-odbc_overview/index.mdx create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/04_creating_a_data_source.mdx create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/05_edb-odbc_connection_properties.mdx create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/06_edb-odbc_driver_functionality.mdx create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/07_scram_compatibility.mdx create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/images/EDB_logo.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/advanced_options_1.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/connection_is_successful.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/create_new_data_source.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/data_source_names.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/data_source_properties_window.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/define_the_data_source.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/driver_properties_window.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/global_settings.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/installed_edb-odbc_driver.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/new_driver_definition.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/odbc_advanced_options_2.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_complete.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_dialog.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_wizard.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/ready_to_install.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/select_driver_named_date_source.png create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/images/selecting_the_connectors_installer.png create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/images/starting_stackbuilder_plus.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/unixodbc_data_source_administrator.png create mode 100755 product_docs/docs/odbc_connector/13.0.0.1/images/windows_data_source_administrator.png create mode 100644 product_docs/docs/odbc_connector/13.0.0.1/index.mdx diff --git a/advocacy_docs/supported-open-source/pgbackrest/08-multiple-repositories.mdx b/advocacy_docs/supported-open-source/pgbackrest/08-multiple-repositories.mdx new file mode 100644 index 00000000000..0b91dffdd07 --- /dev/null +++ b/advocacy_docs/supported-open-source/pgbackrest/08-multiple-repositories.mdx @@ -0,0 +1,592 @@ +--- +title: 'Multiple Repositories' +description: "Multiple Repositories Support Feature" +--- + +A key feature of the [v2.33](https://github.com/pgbackrest/pgbackrest/releases/tag/release%2F2.33) pgBackrest release is support for multiple repositories. This lets you define different behavior for different backup repositories. As an example, you could have a local repository for fast restore that had short retention period (to save space) paired with a remote repository which is larger for cold storage. + +This feature introduces a new [`--repo`](https://pgbackrest.org/configuration.html#section-repository/option-repo) option which can be used to set the repository the command should apply to. All `repo*-` options are indexed to enable configuring multiple repositories. + + +In this section, we will set up a **demo** cluster (using **EDB Postgres Advanced Server** version 13 on CentOS 7) to demonstrate how this new feature impacts each pgBackRest command. + + +### Configuration + +> It must be noted that in this demo, we will set up two local repositories on our database host. For production purposes, it is **recommended** to store each repository on separate storage, and configure at least one remote repository. + + +```ini +[global] +# repository details +repo1-path=/var/lib/edb/as13/backups/repo1 +repo1-retention-full=1 +repo2-path=/var/lib/edb/as13/backups/repo2 +repo2-retention-full=1 + +# general options +process-max=2 +log-level-console=info +log-level-file=debug +start-fast=y +delta=y + +[demo] +pg1-path=/var/lib/edb/as13/data +pg1-user=enterprisedb +pg1-port=5444 +``` + +For safety reasons, the `--repo` option can't be defined in the configuration file or a warning will be triggered: `WARN: configuration file contains command-line only option 'repo'`. If there is more than one repository configured and the `--repo` option is not specified for a command, the repository with the highest priority order (e.g. `repo1` then `repo2`) will be chosen by default. + +The `--repo` option is not required when only `repo1` is configured to maintain backward compatibility. However, when a single repository is configured, it is recommended to use `repo1` in the configuration. + + +### Initialization + +#### Stanza Create Command + +The `stanza-create` command will automatically operate on all configured repositories: + +```bash +$ pgbackrest --stanza=demo stanza-create +P00 INFO: stanza-create for stanza 'demo' on repo1 +P00 INFO: stanza-create for stanza 'demo' on repo2 +``` + +#### Check Command + +The `check` command will trigger a new WAL segment to be archived and try to push it to all defined repositories: + +```bash +$ pgbackrest --stanza=demo check +P00 INFO: check repo1 configuration (primary) +P00 INFO: check repo2 configuration (primary) +P00 INFO: check repo1 archive for WAL (primary) +P00 INFO: WAL segment ... successfully archived to '...' on repo1 +P00 INFO: check repo2 archive for WAL (primary) +P00 INFO: WAL segment ... successfully archived to '...' on repo2 +``` + +#### Archive Push Command + +The `archive-push` command will always archive WALs in all configured repositories. Backups will need to be scheduled individually for each repository. In most cases, this is desirable since backup types and retention could vary per repository. + +The `archive_command` can still be defined as usual: + +```bash +$ psql -d postgres -c "show archive_command;" + archive_command +------------------------------------------ + pgbackrest --stanza=demo archive-push %p +(1 row) +``` + +Here is a `DEBUG` extract of the PostgreSQL logs showing the `archive-push` activity: + +``` +... +P00 DEBUG: storage/storage::storageNewWrite: => { + type: posix, name: {"/var/lib/edb/as13/backups/repo1/archive/demo/13-1/0000000100000000/ + 000000010000000000000005-be9a61a800934879842fa647b0d50385f44e0228.gz"}, + modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true} +... +P00 DEBUG: storage/storage::storageNewWrite: => { + type: posix, name: {"/var/lib/edb/as13/backups/repo2/archive/demo/13-1/0000000100000000/ + 000000010000000000000005-be9a61a800934879842fa647b0d50385f44e0228.gz"}, + modeFile: 0640, modePath: 0750, createPath: true, syncFile: true, syncPath: true, atomic: true} +... +P00 INFO: pushed WAL file '000000010000000000000005' to the archive +``` + +The `archive-push` command will try to push the WAL archive to all reachable repositories. The idea is to archive to as many repositories as possible even if we still need to throw an error to PostgreSQL to prevent it from removing the WAL file. + +``` +P00 INFO: archive-push command begin: [pg_wal/000000010000000000000006] ... +... +P00 INFO: archive-push command begin: [pg_wal/000000010000000000000007] ... +ERROR: [104]: archive-push command encountered error(s): + repo2: [PathOpenError] unable to list file info for path '...': [13] Permission denied +P00 INFO: archive-push command end: aborted with exception [104] +DETAIL: The failed archive command was: pgbackrest --stanza=demo archive-push pg_wal/000000010000000000000007 +WARNING: archiving write-ahead log file "000000010000000000000007" failed too many times, will try again later +``` + +The PostgreSQL `archiver` process should then report an error: + +```bash +$ ps -o pid,cmd fx |grep archiver + 4440 \_ postgres: archiver failed on 000000010000000000000007 +``` + +The next WAL segments should not then be archived: + +```bash +$ psql -d postgres -c "SELECT pg_create_restore_point('generate some activity'); SELECT pg_switch_wal();" + pg_switch_wal +--------------- + 0/80001C8 +(1 row) + +$ ls as13/data/pg_wal/archive_status/ |grep ".ready" +000000010000000000000007.ready +000000010000000000000008.ready +``` + +This becomes very handy by adding `archive-async=y` to the configuration in order to use asynchronous archiving processes within pgBackRest itself. Even if PostgreSQL `archiver` process is still stuck, the archives will reach the working repositories: + +```bash +$ $ ls /var/lib/edb/as13/backups/repo1/archive/demo/13-1/0000000100000000/ +000000010000000000000005-be9a61a800934879842fa647b0d50385f44e0228.gz +000000010000000000000006-7108296955e1208c93447dcfc5712ce59af97907.gz +000000010000000000000007-f1b6fcdd23e96a19a7a89dc92613674c2316aa49.gz +000000010000000000000008-791c44d39ef772d58e51d64c8e3589231d3916a4.gz +000000010000000000000009-47241214eb3fd5e693eed778caa7b304a778ecc2.gz + +$ ls /var/lib/edb/as13/backups/repo2/archive/demo/13-1/0000000100000000/ +000000010000000000000005-be9a61a800934879842fa647b0d50385f44e0228.gz +000000010000000000000006-7108296955e1208c93447dcfc5712ce59af97907.gz +``` + +Let us unblock the `archiver` process (and remove asynchronous archiving) before going further: + +```bash +$ $ ps -o pid,cmd fx |grep archiver + 4440 \_ postgres: archiver last was 00000001000000000000000 +``` + + +### Backups + +#### Backup Command + +Let us take a few backups: + +```bash +$ pgbackrest backup --stanza=demo --type=full +P00 INFO: backup command begin ... +P00 INFO: repo option not specified, defaulting to repo1 +P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the requested immediate checkpoint completes +P00 INFO: backup start archive = 00000001000000000000000C, lsn = 0/C000060 +P00 INFO: full backup size = 50.4MB +P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive +P00 INFO: backup stop archive = 00000001000000000000000C, lsn = 0/C000138 +P00 INFO: check archive for segment(s) 00000001000000000000000C:00000001000000000000000C +P00 INFO: new backup label = 20210419-142212F +P00 INFO: backup command end: completed successfully + +$ pgbackrest backup --stanza=demo --type=full --repo=2 +P00 INFO: backup command begin ... +P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the requested immediate checkpoint completes +P00 INFO: backup start archive = 00000001000000000000000E, lsn = 0/E000028 +P00 INFO: full backup size = 50.5MB +P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive +P00 INFO: backup stop archive = 00000001000000000000000E, lsn = 0/E000138 +P00 INFO: check archive for segment(s) 00000001000000000000000E:00000001000000000000000E +P00 INFO: new backup label = 20210419-142414F +P00 INFO: backup command end: completed successfully + +$ pgbackrest backup --stanza=demo --type=incr --repo=1 +P00 INFO: backup command begin ... +P00 INFO: last backup label = 20210419-142212F, version = ... +P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the requested immediate checkpoint completes +P00 INFO: backup start archive = 000000010000000000000010, lsn = 0/10000028 +P00 INFO: incr backup size = 50.5MB +P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive +P00 INFO: backup stop archive = 000000010000000000000010, lsn = 0/10000100 +P00 INFO: check archive for segment(s) 000000010000000000000010:000000010000000000000010 +P00 INFO: new backup label = 20210419-142212F_20210419-142502I +P00 INFO: backup command end: completed successfully + +$ pgbackrest backup --stanza=demo --type=incr --repo=2 +P00 INFO: backup command begin ... +P00 INFO: last backup label = 20210419-142414F, version = ... +P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the requested immediate checkpoint completes +P00 INFO: backup start archive = 000000010000000000000012, lsn = 0/12000028 +P00 INFO: incr backup size = 50.6MB +P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive +P00 INFO: backup stop archive = 000000010000000000000012, lsn = 0/12000100 +P00 INFO: check archive for segment(s) 000000010000000000000012:000000010000000000000012 +P00 INFO: new backup label = 20210419-142414F_20210419-142556I +P00 INFO: backup command end: completed successfully +``` + +Here, we alternated `full` and `incr` backups in each repository. + + +#### Info Command + +```bash +$ pgbackrest info --stanza=demo +stanza: demo + status: ok + cipher: none + + db (current) + wal archive min/max (13): 000000010000000000000002/000000010000000000000012 + + full backup: 20210419-142212F + timestamp start/stop: 2021-04-19 14:22:12 / 2021-04-19 14:22:26 + wal start/stop: 00000001000000000000000C / 00000001000000000000000C + database size: 50.4MB, database backup size: 50.4MB + repo1: backup set size: 8MB, backup size: 8MB + + full backup: 20210419-142414F + timestamp start/stop: 2021-04-19 14:24:14 / 2021-04-19 14:24:26 + wal start/stop: 00000001000000000000000E / 00000001000000000000000E + database size: 50.5MB, database backup size: 50.5MB + repo2: backup set size: 8MB, backup size: 8MB + + incr backup: 20210419-142212F_20210419-142502I + timestamp start/stop: 2021-04-19 14:25:02 / 2021-04-19 14:25:04 + wal start/stop: 000000010000000000000010 / 000000010000000000000010 + database size: 50.5MB, database backup size: 613.4KB + repo1: backup set size: 8MB, backup size: 26.3KB + backup reference list: 20210419-142212F + + incr backup: 20210419-142414F_20210419-142556I + timestamp start/stop: 2021-04-19 14:25:56 / 2021-04-19 14:25:57 + wal start/stop: 000000010000000000000012 / 000000010000000000000012 + database size: 50.6MB, database backup size: 661.5KB + repo2: backup set size: 8MB, backup size: 28KB + backup reference list: 20210419-142414F + +``` + +The default order will sort backups by dates mixing the repositories. It might be confusing to find the backups depending on each other. + +We can then split this view per repository: + +```bash +$ pgbackrest info --stanza=demo --repo=1 +stanza: demo + status: ok + cipher: none + + db (current) + wal archive min/max (13): 00000001000000000000000C/000000010000000000000012 + + full backup: 20210419-142212F + timestamp start/stop: 2021-04-19 14:22:12 / 2021-04-19 14:22:26 + wal start/stop: 00000001000000000000000C / 00000001000000000000000C + database size: 50.4MB, database backup size: 50.4MB + repo1: backup set size: 8MB, backup size: 8MB + + incr backup: 20210419-142212F_20210419-142502I + timestamp start/stop: 2021-04-19 14:25:02 / 2021-04-19 14:25:04 + wal start/stop: 000000010000000000000010 / 000000010000000000000010 + database size: 50.5MB, database backup size: 613.4KB + repo1: backup set size: 8MB, backup size: 26.3KB + backup reference list: 20210419-142212F + +$ pgbackrest info --stanza=demo --repo=2 +stanza: demo + status: ok + cipher: none + + db (current) + wal archive min/max (13): 00000001000000000000000E/000000010000000000000012 + + full backup: 20210419-142414F + timestamp start/stop: 2021-04-19 14:24:14 / 2021-04-19 14:24:26 + wal start/stop: 00000001000000000000000E / 00000001000000000000000E + database size: 50.5MB, database backup size: 50.5MB + repo2: backup set size: 8MB, backup size: 8MB + + incr backup: 20210419-142414F_20210419-142556I + timestamp start/stop: 2021-04-19 14:25:56 / 2021-04-19 14:25:57 + wal start/stop: 000000010000000000000012 / 000000010000000000000012 + database size: 50.6MB, database backup size: 661.5KB + repo2: backup set size: 8MB, backup size: 28KB + backup reference list: 20210419-142414F +``` + +The **'wal archive min/max'** shows the minimum and maximum WAL currently stored in the archive and, in the case of multiple repositories, will be reported across all repositories unless the `--repo` option is set. There may be gaps due to archive retention policies or other reasons. + +Let us break the first repository by removing its content: + +```bash +$ pgbackrest --stanza=demo stanza-delete --repo=1 --force +P00 INFO: stanza-delete command end: completed successfully + +$ pgbackrest --stanza=demo stanza-create +P00 INFO: stanza-create for stanza 'demo' on repo1 +P00 INFO: stanza-create for stanza 'demo' on repo2 +P00 INFO: stanza 'demo' already exists on repo2 and is valid +P00 INFO: stanza-create command end: completed successfully +``` + +If multiple repositories are configured, then a status of **mixed** indicates that the stanza is not in a healthy state for one or more of the repositories. In this case, the state of the stanza will be detailed in additional lines per repository. + +```bash +$ pgbackrest info --stanza=demo +stanza: demo + status: mixed + repo1: error (no valid backups) + repo2: ok + cipher: none + + db (current) + wal archive min/max (13): 00000001000000000000000E/000000010000000000000012 + + full backup: 20210419-142414F + timestamp start/stop: 2021-04-19 14:24:14 / 2021-04-19 14:24:26 + wal start/stop: 00000001000000000000000E / 00000001000000000000000E + database size: 50.5MB, database backup size: 50.5MB + repo2: backup set size: 8MB, backup size: 8MB + + incr backup: 20210419-142414F_20210419-142556I + timestamp start/stop: 2021-04-19 14:25:56 / 2021-04-19 14:25:57 + wal start/stop: 000000010000000000000012 / 000000010000000000000012 + database size: 50.6MB, database backup size: 661.5KB + repo2: backup set size: 8MB, backup size: 28KB + backup reference list: 20210419-142414F +``` + +This state can be resolved by taking a new backup: + +```bash +$ pgbackrest backup --stanza=demo --type=full --repo=1 +P00 INFO: backup command begin ... +P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the requested immediate checkpoint completes +P00 INFO: backup start archive = 000000010000000000000014, lsn = 0/14000028 +P00 INFO: full backup size = 50.6MB +P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive +P00 INFO: backup stop archive = 000000010000000000000014, lsn = 0/14000138 +P00 INFO: check archive for segment(s) 000000010000000000000014:000000010000000000000014 +P00 INFO: new backup label = 20210419-143400F +P00 INFO: backup command end: completed successfully +``` + + +### Restore + +Let us initiate a situation with some data, backups and restore point: + +```bash +$ createdb test +$ psql -d test -c "CREATE TABLE t1(id int);" +CREATE TABLE +$ psql -d test -c "INSERT INTO t1 VALUES (1);" +INSERT 0 1 + +$ pgbackrest backup --stanza=demo --type=full --repo=1 +P00 INFO: backup command begin ... +P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the requested immediate checkpoint completes +P00 INFO: backup start archive = 000000010000000000000016, lsn = 0/16000028 +P00 INFO: full backup size = 63.0MB +P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive +P00 INFO: backup stop archive = 000000010000000000000016, lsn = 0/16000138 +P00 INFO: check archive for segment(s) 000000010000000000000016:000000010000000000000016 +P00 INFO: new backup label = 20210419-143643F +P00 INFO: backup command end: completed successfully +P00 INFO: expire command begin ... +P00 INFO: repo1: expire full backup 20210419-143400F +P00 INFO: repo1: remove expired backup 20210419-143400F +P00 INFO: expire command end: completed successfully + +$ psql -d test -c "INSERT INTO t1 VALUES (2);" +INSERT 0 1 +$ pgbackrest backup --stanza=demo --type=full --repo=2 +P00 INFO: backup command begin ... +P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the requested immediate checkpoint completes +P00 INFO: backup start archive = 000000010000000000000018, lsn = 0/18000028 +P00 INFO: full backup size = 63MB +P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive +P00 INFO: backup stop archive = 000000010000000000000018, lsn = 0/18000138 +P00 INFO: check archive for segment(s) 000000010000000000000018:000000010000000000000018 +P00 INFO: new backup label = 20210419-143749F +P00 INFO: backup command end: completed successfully +P00 INFO: expire command begin ... +P00 INFO: repo2: expire full backup set 20210419-142414F, 20210419-142414F_20210419-142556I +P00 INFO: repo2: remove expired backup 20210419-142414F_20210419-142556I +P00 INFO: repo2: remove expired backup 20210419-142414F +P00 INFO: expire command end: completed successfully + +$ psql -d postgres -c "select pg_create_restore_point('RP1');" + pg_create_restore_point +------------------------- + 0/190000C8 +(1 row) + +$ psql -d postgres -Atc "select current_timestamp,current_setting('datestyle'),txid_current();" +19-APR-21 14:38:55.313716 +00:00|Redwood, SHOW_TIME|1183 + +$ psql -d test -c "INSERT INTO t1 VALUES (3); SELECT pg_switch_wal();" + pg_switch_wal +--------------- + 0/19000238 +(1 row) + +$ pgbackrest info --stanza=demo +stanza: demo + status: ok + cipher: none + + db (current) + wal archive min/max (13): 000000010000000000000016/000000010000000000000019 + + full backup: 20210419-143643F + timestamp start/stop: 2021-04-19 14:36:43 / 2021-04-19 14:37:01 + wal start/stop: 000000010000000000000016 / 000000010000000000000016 + database size: 63.0MB, database backup size: 63.0MB + repo1: backup set size: 10MB, backup size: 10MB + + full backup: 20210419-143749F + timestamp start/stop: 2021-04-19 14:37:49 / 2021-04-19 14:38:03 + wal start/stop: 000000010000000000000018 / 000000010000000000000018 + database size: 63MB, database backup size: 63MB + repo2: backup set size: 10MB, backup size: 10MB +``` + +As you can see in the example above, the `backup` command automatically triggered the `expire` command on the repository when we created a backup. Since this command is operating on one repository only, it would not expire anything in the other repositories until the `backup` or `expire` command is run for that repository. + +Since the default retention policy is based on a number of backups, it is expected that only new backups are affecting the backups and archives to expire. + +#### Restore Command + +The restore command automatically defaults to selecting the latest backup from the first repository where backups exist. The order in which the repositories are checked is dictated by order of the repositories as configured in the `pgbackrest.conf` (e.g. repo1 will be checked before repo2). To restore from from a specific repository, the `--repo` option can be used. + +PITR can be performed by specifying `--type=time` and specifying the target time with `--target`. If a backup is not specified via the `--set` option, then the configured repositories will be checked for a backup that contains the requested time. If no backup can be found, the latest backup from the first repository containing backups will be used. + +```bash +$ mkdir /tmp/restored_data +$ pgbackrest restore --stanza=demo --target="RP1" --type=name --no-delta --pg1-path=/tmp/restored_data +P00 INFO: repo1: restore backup set 20210419-143643F + +$ rm -rf /tmp/restored_data/* +$ pgbackrest restore --stanza=demo --target="2021-04-19 14:38:55.313716+00:00" --type=time --no-delta --pg1-path=/tmp/restored_data +HINT: time format must be YYYY-MM-DD HH:MM:SS with optional msec and optional timezone (+/- HH or HHMM or HH:MM) + - if timezone is omitted, local time is assumed (for UTC use +00) +P00 INFO: repo1: restore backup set 20210419-143643F + +$ rm -rf /tmp/restored_data/* +$ pgbackrest restore --stanza=demo --target="RP1" --type=name --no-delta --pg1-path=/tmp/restored_data --repo=2 +P00 INFO: repo2: restore backup set 20210419-143749F +``` + +Even if the backup in **repo2** is newer, the first found match is preserved. Let's take a new backup in **repo1** to check if pgBackRest will auto-select the backup in **repo2**: + +```bash +$ pgbackrest backup --stanza=demo --type=full --repo=1 +P00 INFO: backup command begin ... +P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the requested immediate checkpoint completes +P00 INFO: backup start archive = 00000001000000000000001B, lsn = 0/1B000028 +P00 INFO: full backup size = 63MB +P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive +P00 INFO: backup stop archive = 00000001000000000000001B, lsn = 0/1B000138 +P00 INFO: check archive for segment(s) 00000001000000000000001B:00000001000000000000001B +P00 INFO: new backup label = 20210419-144656F +P00 INFO: backup command end: completed successfully +P00 INFO: expire command begin ... +P00 INFO: repo1: expire full backup 20210419-143643F +P00 INFO: repo1: remove expired backup 20210419-143643F +P00 INFO: expire command end: completed successfully + +$ pgbackrest info --stanza=demo +stanza: demo + status: ok + cipher: none + + db (current) + wal archive min/max (13): 000000010000000000000018/00000001000000000000001B + + full backup: 20210419-143749F + timestamp start/stop: 2021-04-19 14:37:49 / 2021-04-19 14:38:03 + wal start/stop: 000000010000000000000018 / 000000010000000000000018 + database size: 63MB, database backup size: 63MB + repo2: backup set size: 10MB, backup size: 10MB + + full backup: 20210419-144656F + timestamp start/stop: 2021-04-19 14:46:56 / 2021-04-19 14:47:10 + wal start/stop: 00000001000000000000001B / 00000001000000000000001B + database size: 63MB, database backup size: 63MB + repo1: backup set size: 10MB, backup size: 10MB + +$ rm -rf /tmp/restored_data/* +$ pgbackrest restore --stanza=demo --target="2021-04-19 14:38:55.313716+00:00" --type=time --no-delta --pg1-path=/tmp/restored_data +P00 INFO: repo2: restore backup set 20210419-143749F +``` + +The recovery process may be complex and tricky depending on the target, and therefore, the `info` command can really be helpful to determine the repository to restore from. + +#### Archive Get Command + +When multiple repositories are configured, WAL will be fetched from the repositories in priority order (e.g. `repo1`, `repo2`, etc.). In general it is better if faster storage has higher priority. The command can operate on a single repository by specifying it with the `--repo` option. + +```bash +root# systemctl stop edb-as-13.service +$ mv /var/lib/edb/as13/data /var/lib/edb/as13/data.orig +$ mkdir -m 700 /var/lib/edb/as13/data + +$ pgbackrest restore --stanza=demo --target="2021-04-19 14:38:55.313716+00:00" --type=time --no-delta --target-action=promote +P00 INFO: restore command begin ... +P00 INFO: write updated /var/lib/edb/as13/data/postgresql.auto.conf +P00 INFO: restore global/pg_control (performed last to ensure aborted restores cannot be started) +P00 INFO: restore command end: completed successfully + +$ cat /var/lib/edb/as13/data/postgresql.auto.conf|grep restore_command +restore_command = 'pgbackrest --stanza=demo archive-get %f "%p"' + +root# systemctl start edb-as-13.service +$ cat /var/lib/edb/as13/data/log/edb-2021-04-19_145147.log +... +LOG: starting point-in-time recovery to 2021-04-19 14:38:55.313716+00 +P00 INFO: found 000000010000000000000018 in the repo2: 13-1 archive +P00 INFO: found 000000010000000000000019 in the repo2: 13-1 archive +LOG: recovery stopping before commit of transaction 1183, time 2021-04-19 14:38:55.314118+00 +LOG: redo done at 0/19000100 +P00 INFO: unable to find 00000002.history in the archive +LOG: selected new timeline ID: 2 +LOG: archive recovery complete +P00 INFO: pushed WAL file '00000002.history' to the archive + +$ ls /var/lib/edb/as13/backups/repo*/archive/demo/13-1/00000002.history +/var/lib/edb/as13/backups/repo1/archive/demo/13-1/00000002.history +/var/lib/edb/as13/backups/repo2/archive/demo/13-1/00000002.history +``` + +PostgreSQL found the WALs needed for recovery in our **repo2**, picked a new timeline and pushed the history file to the repositories. + +Whenever a new timeline is created, PostgreSQL creates a timeline history file that shows which timeline it branched off from and when. These history files are necessary to allow the system to pick the right WAL segment files when recovering from a backup that contains multiple timelines. + +```bash +root# systemctl stop edb-as-13.service +$ rm -rf /var/lib/edb/as13/data/* +$ rm -rf /var/lib/edb/as13/backups/repo1/archive/demo/13-1/00000002.history +$ pgbackrest restore --stanza=demo --target="2021-04-19 14:38:55.313716+00:00" --type=time --target-timeline=current --no-delta --target-action=promote +P00 INFO: restore command begin ... +P00 INFO: repo2: restore backup set 20210419-143749F +P00 INFO: write updated /var/lib/edb/as13/data/postgresql.auto.conf +P00 INFO: restore global/pg_control (performed last to ensure aborted restores cannot be started) +P00 INFO: restore command end: completed successfully + +root# systemctl start edb-as-13.service +$ cat /var/lib/edb/as13/data/log/edb-2021-04-19_145721.log +P00 INFO: found 00000002.history in the repo2: 13-1 archive +P00 INFO: unable to find 00000003.history in the archive +LOG: selected new timeline ID: 3 +P00 INFO: pushed WAL file '00000003.history' to the archive +``` + +As shown above, since the history file is stored within the same repository as the restored backup set, PostgreSQL is able to pick a new and accurate timeline. + +Let us now retry after moving the history file to **repo1**. + +```bash +root# systemctl stop edb-as-13.service +$ rm -rf /var/lib/edb/as13/data/* +$ mv /var/lib/edb/as13/backups/repo2/archive/demo/13-1/00000002.history /var/lib/edb/as13/backups/repo1/archive/demo/13-1/00000002.history +$ pgbackrest restore --stanza=demo --target="2021-04-19 14:38:55.313716+00:00" --type=time --target-timeline=current --no-delta --target-action=promote +root# systemctl start edb-as-13.service +$ cat /var/lib/edb/as13/data/log/edb-2021-04-19_150037.log +P00 INFO: found 00000002.history in the repo1: 13-1 archive +P00 INFO: found 00000003.history in the repo1: 13-1 archive +P00 INFO: unable to find 00000004.history in the archive +LOG: selected new timeline ID: 4 +P00 INFO: pushed WAL file '00000004.history' to the archive +``` + +As we can see, both `00000002.history` and `00000003.history` have been found in **repo1** so PostgreSQL could pick the next timeline correctly, even if the restored backup set came from **repo2**. + +When using multiple repositories, the `archive-get` command will tolerate gaps in one repository (due to lack of disk space e.g.) mainly because it will still be able to find the missing files in the other repository. diff --git a/product_docs/docs/jdbc_connector/42.2.12.3/02_requirements_overview.mdx b/product_docs/docs/jdbc_connector/42.2.12.3/02_requirements_overview.mdx index dcbbc9b23b3..0961a1ff0fb 100644 --- a/product_docs/docs/jdbc_connector/42.2.12.3/02_requirements_overview.mdx +++ b/product_docs/docs/jdbc_connector/42.2.12.3/02_requirements_overview.mdx @@ -17,7 +17,7 @@ The EDB JDBC Connector is certified with Advanced Server version 9.6 and above. The EDB JDBC Connector native packages are supported on the following 64 bit Linux platforms: - Red Hat Enterprise Linux and CentOS (x86_64) 7.x and 8.x -- OEL Linux 7.x and 8.x +- OL Linux 7.x and 8.x - PPC-LE 8 running RHEL or CentOS 7.x - SLES 12.x - Debian 9.x and 10.x diff --git a/product_docs/docs/jdbc_connector/42.2.19.1/02_requirements_overview.mdx b/product_docs/docs/jdbc_connector/42.2.19.1/02_requirements_overview.mdx index 60b55ef0a3a..219086acdbf 100644 --- a/product_docs/docs/jdbc_connector/42.2.19.1/02_requirements_overview.mdx +++ b/product_docs/docs/jdbc_connector/42.2.19.1/02_requirements_overview.mdx @@ -17,7 +17,7 @@ The EDB JDBC Connector is certified with Advanced Server version 9.6 and above. The EDB JDBC Connector native packages are supported on the following 64 bit Linux platforms: - Red Hat Enterprise Linux and CentOS (x86_64) 7.x and 8.x -- OEL Linux 7.x and 8.x +- OL Linux 7.x and 8.x - PPC-LE 8 running RHEL or CentOS 7.x - SLES 12.x - Debian 9.x and 10.x diff --git a/product_docs/docs/odbc_connector/13.0.0.1/01_whats_new.mdx b/product_docs/docs/odbc_connector/13.0.0.1/01_whats_new.mdx new file mode 100644 index 00000000000..21d6a918d8c --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/01_whats_new.mdx @@ -0,0 +1,10 @@ +--- +title: "What’s New" + +--- + + +The following feature is added to create the EDB ODBC Connector `13.00.0000.01`: + +Merged with the upstream community driver version 13.00.0000. + diff --git a/product_docs/docs/odbc_connector/13.0.0.1/02_requirements_overview.mdx b/product_docs/docs/odbc_connector/13.0.0.1/02_requirements_overview.mdx new file mode 100644 index 00000000000..afbaf9ecdc4 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/02_requirements_overview.mdx @@ -0,0 +1,40 @@ +--- +title: "Requirements Overview" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/requirements_overview.html" +--- + +## Supported Versions + +The EDB ODBC Connector is certified with Advanced Server version 9.6 and above. + +## Supported Platforms + +The EDB ODBC Connector native packages are supported on the following platforms: + +64 bit Linux: + +- Red Hat Enterprise Linux (x86_64) 8.x and 7.x +- CentOS (x86_64) 8.x and 7.x +- OL Linux 8.x and 7.x +- PPC-LE 8 running RHEL or CentOS 7.x +- SLES 12.x +- Debian 10.x and 9.x +- Ubuntu 20.04 and 18.04 LTS + +The EDB ODBC Connector graphical installers are supported on the following Windows platforms: + +64-bit Windows: + +- Windows Server 2019 +- Windows Server 2016 +- Windows Server 2012 R2 +- Windows 10 +- Windows 8.1 + +32-bit Windows: + +- Windows 10 +- Windows 8.1 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/03_edb-odbc_overview/01_installing_edb-odbc.mdx b/product_docs/docs/odbc_connector/13.0.0.1/03_edb-odbc_overview/01_installing_edb-odbc.mdx new file mode 100644 index 00000000000..2b089256549 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/03_edb-odbc_overview/01_installing_edb-odbc.mdx @@ -0,0 +1,518 @@ +--- +title: "Installing EDB-ODBC" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/installing_edb-odbc.html" +--- + +The EDB ODBC Connector is distributed and installed with the EDB Postgres Advanced Server graphical or RPM installer. + +## Installing the Connector with an RPM Package + +You can install the ODBC Connector using an RPM package on the following platforms: + +- [RHEL 7](#rhel7) +- [RHEL 7 PPCLE](#centos7_PPCLE) +- [RHEL 8](#rhel8) +- [CentOS 7](#centos7) +- [CentOS 8](#centos8) + + + +### On RHEL 7 + +Before installing the ODBC Connector, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + +```text +yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +``` + +Enable the optional, extras, and HA repositories: + +```text +subscription-manager repos --enable "rhel-*-optional-rpms" --enable "rhel-*-extras-rpms" --enable "rhel-ha-for-rhel-*-server-rpms" +``` + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials you can: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-odbc`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + +```text +yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file, providing your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + +```text +[edb] +name=EnterpriseDB RPMs $releasever - $basearch +baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +``` + +**Installing ODBC Connector** + +After saving your changes to the configuration file, use the following commands to install the ODBC Connector: + +``` +yum install edb-odbc + +yum install edb-odbc-devel +``` + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + + +### On RHEL 7 PPCLE + +Before installing the ODBC Connector, you must install the following prerequisite packages, and request credentials from EDB: +1. Use the following commands to install Advance Toolchain: +```text +rpm --import https://public.dhe.ibm.com/software/server/POWER/Linux/toolchain/at/redhat/RHEL7/gpg-pubkey-6976a827-5164221b + +cat > /etc/yum.repos.d/advance-toolchain.repo < + +After receiving your repository credentials you can: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install the ODBC Connector. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + +```text +yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file, providing your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + +```text +[edb] +name=EnterpriseDB RPMs $releasever - $basearch +baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +``` + +**Installing ODBC Connector** + +After saving your changes to the configuration file, use the following commands to install the ODBC Connector: + +``` +yum install edb-odbc + +yum install edb-odbc-devel +``` + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + + + +### On RHEL 8 + +Before installing the ODBC Connector, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + +```text +dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm +``` + +Enable the `codeready-builder-for-rhel-8-\*-rpms` repository: + +```text +ARCH=$( /bin/arch ) +subscription-manager repos --enable "codeready-builder-for-rhel-8-${ARCH}-rpms" +``` + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials you can: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-odbc`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + +```text +dnf -y https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file, providing your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + +```text +[edb] +name=EnterpriseDB RPMs $releasever - $basearch +baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +``` + +**Installing ODBC Connector** + +After saving your changes to the configuration file, use the below command to install the ODBC Connector: + +```text +dnf install edb-odbc + +dnf install edb-odbc-devel +``` + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + + + +### On CentOS 7 + +Before installing the ODBC Connector, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + +```text +yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +``` + +!!! Note + You may need to enable the `[extras]` repository definition in the `CentOS-Base.repo` file (located in `/etc/yum.repos.d`). + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials you can: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-odbc`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + +```text +yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file, providing your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + +```text +[edb] +name=EnterpriseDB RPMs $releasever - $basearch +baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +``` + +**Installing ODBC Connector** + +After saving your changes to the configuration file, use the following command to install the ODBC Connector: + +```text +yum install edb-odbc + +yum install edb-odbc-devel +``` + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + + +### On CentOS 8 + +Before installing the ODBC Connector, you must install the following prerequisite packages, and request credentials from EDB: + +Install the `epel-release` package: + +```text +dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm +``` + +Enable the `PowerTools` repository: + +```text +dnf config-manager --set-enabled PowerTools +``` + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + +After receiving your repository credentials you can: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install `edb-odbc`. + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + +```text +dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm +``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. + +**Modifying the file, providing your user name and password** + +After creating the `edb.repo` file, use your choice of editor to ensure that the value of the `enabled` parameter is `1`, and replace the `username` and `password` placeholders in the `baseurl` specification with the name and password of a registered EDB user. + +```text +[edb] +name=EnterpriseDB RPMs $releasever - $basearch +baseurl=https://:@yum.enterprisedb.com/edb/redhat/rhel-$releasever-$basearch +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/ENTERPRISEDB-GPG-KEY +``` + +**Installing ODBC Connector** + +After saving your changes to the configuration file, use the following command to install the ODBC Connector: + +```text +dnf install edb-odbc + +dnf install edb-odbc-devel +``` + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, enter `y`, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + +### Updating an RPM Installation + +If you have an existing EDB ODBC connector RPM installation, you can use yum or dnf to upgrade your repository configuration file and update to a more recent product version. To update the `edb.repo` file, assume superuser privileges and enter: + +- On RHEL or CentOS 7: + + `yum upgrade edb-repo` + +- On RHEL or CentOS 8: + + `dnf upgrade edb-repo` + +yum or dnf will update the `edb.repo` file to enable access to the current EDB repository, configured to connect with the credentials specified in your `edb.repo` file. Then, you can use yum or dnf to upgrade any installed packages: + +- On RHEL or CentOS 7: + + `yum upgrade edb-odbc` + + `yum upgrade edb-odbc-devel` + +- On RHEL or CentOS 8: + + `dnf upgrade edb-odbc` + + `dnf upgrade edb-odbc-devel` + +## Installing the Connector on an SLES 12 Host + +You can use the zypper package manager to install the connector on an SLES 12 host. zypper will attempt to satisfy package dependencies as it installs a package, but requires access to specific repositories that are not hosted at EDB. Before installing the connector, use the following commands to add EDB repository configuration files to your SLES host: + + `zypper addrepo https://zypp.enterprisedb.com/suse/edb-sles.repo` + +After creating the repository configuration files, use the `zypper refresh` command to refresh the metadata on your SLES host to include the EDB repositories. + +When prompted for a `User Name` and `Password`, provide your connection credentials for the EDB repository. To request credentials for the repository, visit [the EDB website](https://www.enterprisedb.com/repository-access-request). + +Before installing EDB Postgres Advanced Server or supporting components, you must also add SUSEConnect and the SUSE Package Hub extension to the SLES host, and register the host with SUSE, allowing access to SUSE repositories. Use the commands: + + `zypper install SUSEConnect` + `SUSEConnect -r 'REGISTRATION_CODE' -e 'EMAIL'` + `SUSEConnect -p PackageHub/12.4/x86_64` + `SUSEConnect -p sle-sdk/12.4/x86_64` + +For detailed information about registering a SUSE host, visit the [SUSE website](https://www.suse.com/support/kb/doc/?id=7016626). + +Then, you can use the zypper utility to install the connector: + +`zypper install edb-odbc` + +`zypper install edb-odbc-devel` + +## Installing the Connector on a Debian or Ubuntu Host + +To install a DEB package on a Debian or Ubuntu host, you must have credentials that allow access to the EDB repository. To request credentials for the repository, visit the [EDB website](https://www.enterprisedb.com/repository-access-request/). + +The following steps will walk you through on using the EDB apt repository to install a DEB package. When using the commands, replace the `username` and `password` with the credentials provided by EDB. + +1. Assume superuser privileges: + + ```text + sudo su – + ``` + +2. Configure the EDB repository: + + On Debian 9: + + ```text + sh -c 'echo "deb https://username:password@apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + ``` + + On Debian 10: + + 1. Set up the EDB repository: + + ```text + -c 'echo "deb [arch=amd64] https://apt.enterprisedb.com/$(lsb_release -cs)-edb/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/edb-$(lsb_release -cs).list' + ``` + + 2. Substitute your EDB credentials for the `username` and `password` in the following command: + + ```text + -c 'echo "machine apt.enterprisedb.com login password " > /etc/apt/auth.conf.d/edb.conf' + ``` + +3. Add support to your system for secure APT repositories: + + ```text + apt-get install apt-transport-https + ``` + +4. Add the EDB signing key: + + ```text + wget -q -O - https://:@apt.enterprisedb.com/edb-deb.gpg.key | apt-key add - + ``` + +5. Update the repository metadata: + + ```text + apt-get update + ``` + +6. Install DEB package: + + ```text + apt-get install edb-odbc + apt-get install edb-odbc-dev + ``` + +## Using the Graphical Installer to Install the Connector + +You can use the EDB Connectors Installation wizard to add the ODBC connector to your system; the wizard is available at the [EDB website](https://www.enterprisedb.com/software-downloads-postgres/). + +Download the installer, and then, right-click on the installer icon, and select `Run As Administrator` from the context menu. + +When the `Language Selection` popup opens, select an installation language and click `OK` to continue to the `Setup` window (shown in Figure below). + +![The ODBC Connectors Installation wizard.](../images/odbc_installation_wizard.png) + +The ODBC Connectors Installation wizard. + +Click `Next` to continue. + +![The Installation dialog.](../images/odbc_installation_dialog.png) + +The Installation dialog + +Use the `Installation Directory` dialog to specify the directory in which the connector will be installed, and click `Next` to continue. + +![The Ready to Install dialog.](../images/ready_to_install.png) + +The Ready to Install dialog. + +Click `Next` on the `Ready to Install` dialog to start the installation; popup dialogs confirm the progress of the installation wizard. + +![The installation is complete.](../images/odbc_installation_complete.png) + +The installation is complete. + +When the wizard informs you that it has completed the setup, click the `Finish` button to exit the dialog. + +You can also use StackBuilder Plus to add or update the connector on an existing Advanced Server installation; to open StackBuilder Plus, select `StackBuilder Plus` from the `Windows Apps` menu. + +![Starting StackBuilder Plus](../images/starting_stackbuilder_plus.png) + +Starting StackBuilder Plus + +When StackBuilder Plus opens, follow the onscreen instructions. Select the `EnterpriseDB ODBC Connector` option from the `Database Drivers` node of the tree control. + +![Selecting the Connectors installer.](../images/selecting_the_connectors_installer.png) + +Selecting the Connectors installer. + +Follow the directions of the onscreen wizard to add or update an installation of the EDB Connectors. diff --git a/product_docs/docs/odbc_connector/13.0.0.1/03_edb-odbc_overview/index.mdx b/product_docs/docs/odbc_connector/13.0.0.1/03_edb-odbc_overview/index.mdx new file mode 100644 index 00000000000..2124ba7911f --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/03_edb-odbc_overview/index.mdx @@ -0,0 +1,26 @@ +--- +title: "EDB-ODBC Overview" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/edb-odbc_overview.html" +--- + +EDB ODBC is an interface that allows an ODBC compliant client application to connect to an Advanced Server database. The EDB-ODBC connector allows an application that was designed to work with other databases to run on Advanced Server; EDB ODBC provides a way for the client application to establish a connection, send queries and retrieve results from Advanced Server. + +While EDB ODBC provides a level of application portability, it should be noted that the portability is limited; EDB ODBC provides a connection, but does not guarantee command compatibility. Commands that are acceptable in another database, may not work in Advanced Server. + +The major components in a typical ODBC application are: + +- The client application - written in a language that has a binding for ODBC +- The ODBC Administrator - handles named connections for Windows or Linux +- The database specific ODBC driver - EDB ODBC +- The ODBC compliant server - EDB Postgres Advanced Server + +Client applications can be written in any language that has a binding for ODBC; C, MS-Access, and C++ are just a few. + +
+ +installing_edb-odbc + +
diff --git a/product_docs/docs/odbc_connector/13.0.0.1/04_creating_a_data_source.mdx b/product_docs/docs/odbc_connector/13.0.0.1/04_creating_a_data_source.mdx new file mode 100644 index 00000000000..903d06485eb --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/04_creating_a_data_source.mdx @@ -0,0 +1,37 @@ +--- +title: "Creating a Data Source" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/creating_a_data_source.html" +--- + +When a client application tries to establish a connection with a server, it typically provides a data source name (also known as a "DSN"). The driver manager looks through the ODBC configuration database for a data source whose name matches the DSN provided by the application. + +On a Linux or Unix host, data sources are defined in a file; that file is usually named /etc/odbc.ini, but the name (and location) may vary. Use the following command to find out where unixODBC is searching for data source definitions: + +`$ odbc_config --odbcini --odbcinstini` + +On a Windows host, data sources are typically defined in the Windows registry. + +You can also store a data source definition (called a "File DSN") in a plain-text file of your choice. A typical data source definition for the EDB-ODBC driver looks like this: + +```text +$ cat /etc/odbc.ini +[EnterpriseDB] +Description = EnterpriseDB DSN +Driver = EnterpriseDB +Trace = yes +TraceFile = /tmp/odbc.log +Database = edb +Servername = localhost +UserName = enterprisedb +Password = manager +Port = 5444 +``` + +The first line in the data source is the data source name. The name is a unique identifier, enclosed in square brackets. The data source name is followed by a series of `'keyword=value'` pairs that identify individual connection properties that make up the data source. + +The ODBC administrator utility creates named data sources for ODBC connections. In most cases, an ODBC administrator utility is distributed with the operating system (if you’re using Windows or unixODBC, the tool is called the `ODBC Data Source Administrator`). If your operating system doesn’t include an ODBC administrator, third-party options are available online. + +Sections `Adding a Data Source Definition in Windows` and `Adding a Data Source Definition in Linux` walk you through adding a data source in Windows and Linux using the graphical tools available for each operating system. During the process of defining a data source, you’ll be asked to specify a set of connection properties. Section `EDB-ODBC Connection Properties` contains information about `optional` data source connection properties; you can specify connection properties with graphical tools or edit the `odbc.ini` file with a text editor. diff --git a/product_docs/docs/odbc_connector/13.0.0.1/05_edb-odbc_connection_properties.mdx b/product_docs/docs/odbc_connector/13.0.0.1/05_edb-odbc_connection_properties.mdx new file mode 100644 index 00000000000..f87c8c4ebd0 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/05_edb-odbc_connection_properties.mdx @@ -0,0 +1,300 @@ +--- +title: "EDB-ODBC Connection Properties" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/edb-odbc_connection_properties.html" +--- + +The following table describes the connection properties that you can specify through the dialogs in the graphical connection manager tools, or in the `odbc.ini` file that defines a named data source. The columns identify the connection property (as it appears in the ODBC Administrator dialogs), the corresponding keyword (as it appears in the `odbc.ini` file), the default value of the property, and a description of the connection property. + +| Property | Keyword name | Default value | Description | +| ----------------------------------------- | ----------------------------------------------------- | ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Database | Database | None | The name of the database to which you are connecting. | +| Driver | Driver | EDB-ODBC | The name of the ODBC driver. | +| Server | Servername | Localhost | The name or IP address of the server that you are connecting to. | +| dbms_name | dbms_name | EnterpriseDB | Database system. Either EnterpriseDB or PostgreSQL. | +| Description | Description | | Descriptive name of the data source. | +| User Name | Username | | The name of the user that this data source uses to connect to the server. | +| Password | Password | | The password of the user associated with this named data source. | +| CPTimeout | CPTimeout | 0 | Number of seconds before a connection times out (in a connection pooling environment). | +| Port | Port | 5444 | The TCP port that the postmaster is listening on. | +| Protocol | Protocol | 7.4 | If specified, forces the driver to use the given protocol version. | +| Level of Rollback on Errors | Use the Protocol option to specify rollback behavior. | Transaction Level | Specifies how the driver handles errors:

0 - Don't rollback

1 - Rollback the transaction

2 - Rollback the statement | +| Usage Count | UsageCount | 1 | The number of installations using this driver. | +| Read Only | ReadOnly | No | Specifies that the connection is READONLY. | +| Show System Tables | ShowSystemTables | No | If enabled, the driver reports system tables in the result set of the SQLTables() function. | +| OID Options: Show Column | ShowOidColumn | No | If enabled, the SQLColumns() function reports the OID column. | +| OID Options: Fake Index | FakeOidIndex | No | If enabled, the SQLStatistics() function reports that a unique index exists on each OID column. | +| Keyset Query Optimization | Ksqo | On | If enabled, enforces server-side support for keyset queries (generated by the MS Jet database engine). | +| Recognize Unique Indexes | UniqueIndex | On | If enabled, the SQLStatistics() function will report unique indexes. If not enabled, the SQLStatistics() function reports that indexes allow duplicate values. | +| Use Declare/Fetch | UseDeclareFetch | Off | If enabled, the driver will use server-side cursors. To enable UseDeclareFetch, specify a value of 1; to disable UseDeclareFetch, specify a value of 0. | +| CommLog | CommLog | Off | If enabled, records all client/server traffic in a log file. | +| Parse Statements | Parse | Off | If enabled, the driver parses simple SELECT statements when you call the SQLNumResultCols(), SQLDescribeCol() or SQLColAttributes() functions. | +| Cancel as FreeStmt | CancelAsFreeStmt | Off | If enabled, the SQLCancel() function will call SQLFreeStmt(SQL_Close) on your behalf. | +| MyLog | Debug | Off | If enabled, the driver records its work in a log file. On Windows, the file name is C:m[ylog](<>)<process-id>; and on Linux the file name is /tmp/[mylog](<>)<username><process-id>.log. | +| Unknown Sizes | UnknownSizes | Maximum | Determines how the SQLDescribeCol() and SQLColAttributes() functions compute the size of a column. Specify 0 to force the driver to report the maximum size allowed for the type; specify 1 to force the driver to report an unknown length or 2 to force the driver to search the result set to find the longest value. Do not specify 2 if you have enabled UseDeclareFetch. | +| Text as LongVarchar | TextAsLongVarChar | 8190 | If enabled, the driver treats TEXT columns as if they are of type SQL_LONGVARCHAR. If disabled, the driver treats TEXT columns as SQL_VARCHAR values. | +| Unknown as Long Varchar | LongVarChar | False | If enabled, the driver treats values of unknown type as SQL_LONGVARCHAR values. If unchecked, the driver will treat values of unknown type as SQL_VARCHAR values. By default, values of unknown type are treated as Y values. | +| Bools as Char | BoolsAsChar | On | If enabled, the driver treats BOOL columns as SQL_CHAR values. If disabled, BOOL columns are treated as SQL_BIT values. | +| Max Varchar | MaxVarcharSize | 255 | If enabled, the driver treats VARCHAR and BPCHAR values longer than MaxVarCharSize as SQL_LONGVARCHAR values | +| Max Long Varchar Size | MaxLongVarcharSize | 8190 | If TextAsLongVarChar is on, the driver reports TEXT values are MaxLongVarcharSize bytes long.

If UnknownAsLongVarChar is on, columns of unknown type are MaxLongVarcharSize bytes long; otherwise, they are reported to be MaxVarcharSize bytes in length. | +| Cache Size | Fetch | 100 | Determines the number of rows fetched by the driver when UseDeclareFetch is enabled. | +| SysTable Prefixes | ExtraSysTablePrefixes | [dd](<>); | Use the SysTablePrefixes field to specify a semi-colon delimited list of prefixes that indicate that a table is a system table. By default, the list contains [dd](<>);. | +| Cumulative Row Count for Insert | MapSqlParcNoBatch | Off/0 | If enabled, the SQLRowCount() function will return a single, cumulative row count for the entire array of parameter settings for an INSERT statement. If disabled, an individual row count will be returned for each parameter setting. By default, this option is disabled. | +| LF<-> CR/LF conversion | LFConversion | System Dependent | The LF<->CR/LF conversion option instructs the driver to convert line-feed characters to carriage-return/line-feed pairs when fetching character values from the server and convert carriage-return/line-feed pairs back to line-feed characters when sending character values to the server. By default, this option is enabled. | +| Updatable Cursors | UpdatableCursors | Off | Permits positioned UPDATE and DELETE operations using the SQLSetPos() or SQLBulkOperations() functions. | +| Bytea as Long VarBinary | ByteaAsLongVarBinary | Off | If enabled, the driver treats BYTEA values as if they are of type SQL_LONGVARBINARY. If disabled, BYTEA values are treated as SQL_VARBINARY values. | +| Bytea as LO | ByteaAsLO | False | If enabled, the driver treats BYTEA values as if they are large objects. | +| Row versioning | RowVersioning | Off | The Row Versioning option specifies if the driver should include the xmin column when reporting the columns in a table. The xmin value is the ID of the transaction that created the row. You must use row versioning if you plan to create cursors where SQL_CONCURRENCY = SQL_CONCUR_ROWVER. | +| Disallow Premature | DisallowPremature | No/0 | Determines driver behavior if you try to retrieve information about a query without executing the query. If Yes, the driver declares a cursor for the query and fetches the meta-data from the cursor. If No, the driver executes the command as soon as you request any meta-data. | +| True is -1 | TrueIsMinus1 | Off/0 | TrueIsMinus1 tells the driver to return BOOL values of TRUE as -1. If this option is not enabled, the driver will return BOOL values of TRUE as 1. The driver always returns BOOL values of FALSE as 0. | +| Server side prepare | UseServerSidePrepare | No/0 | If enabled, the driver uses the PREPARE and EXECUTE commands to implement the Prepare/Execute model. | +| Use GSSAPI for GSS request | GssAuthUseGSS | False/0 | If set to True/1, the driver will send a GSSAPI authentication request to the server. Windows only. | +| Int8 As | BI | 0 | The value of BI determines how the driver treats BIGINT values:

If -5 as a SQL_BIGINT,

If 2 as a SQL_NUMERIC,

If 8 as a SQL_DOUBLE,

If 4 as a SQL_INTEGER,

If 12 as a SQL_VARCHAR,

If 0 (on an MS Jet client), as a SQL_NUMERIC,

If 0 on any other client, as a SQL_BIGINT. | +| Extra options

Connect Settings | AB

ConnSettings | 0x0 | 0x1 - Forces the output of short-length formatted connection strings. Specify this option if you are using the MFC CDatabase class.

0x2 - Allows MS Access to recognize PostgreSQL's serial type as AutoNumber type.

0x4 - Return ANSI character types for the inquiries from applications. Specify this option for applications that have difficulty handling Unicode data.

0x8 - If set, NULL dates are reported as empty strings and empty strings are interpreted as NULL dates on input.

0x10 - Determines if SQLGetInfo returns information about all tables, or only accessible tables. If set, only information is returned for accessible tables.

0x20 - If set, each SQL command is processed in a separate network round-trip, otherwise, SQL commands are grouped into as few round-trips as possible to reduce network latency. Contains a semicolon-delimited list of SQL commands that are executed when the driver connects to the server. | +| | Socket | 4096 | Specifies the buffer size that the driver uses to connect to the client. | +| | Lie | Off | If enabled, the driver claims to support unsupported ODBC features. | +| Lowercase Identifier | LowerCaseIdentifier | Off | If enabled, the driver translates identifiers to lowercase. | +| Disable Genetic Optimizer | Optimizer | Yes/1 | Disables the genetic query optimizer. | +| Allow Keyset | UpdatableCursors | Yes/1 | Allow Keyset driven cursors | +| SSL mode | SSLMode | Disabled | If libpq (and its dependencies) are installed in the same directory as the EDB-ODBC driver, enabling SSL Mode allows you to use SSL and other utilities. | +| Force Abbreviated Connection String | CX | No/0 | Enables the option to force abbreviation of connection string. | +| Fake MSS | FakeOidIndex | No/0 | Impersonates MS SQL Server enabling MS Access to recognize PostgreSQL’s serial type as AutoNumber type. | +| BDE Environment | BDE | No/0 | Enabling this option tunes EDB-ODBC to cater to Borland Database Engine compliant output (related to Unicode). | +| XA_Opt | INI_XAOPT | Yes/1 | If enabled, calls to SQL_TABLES only include user-accessible tables. | + +## Adding a Data Source Definition in Windows + +The Windows ODBC `Data Source Administrator` is a graphical interface that creates named data sources. You can open the `ODBC Data Source Administrator` by navigating to the `Control Panel`, opening the `Administrative Tools` menu, and double-clicking the appropriate `ODBC Data Sources` icon (`32- or 64- bit`). + +![The Windows Data Source Administrator](images/windows_data_source_administrator.png) + +The Windows Data Source Administrator + +Click the `Add` button to open the `Create New Data Source` dialog. Choose `EnterpriseDB (ANSI)` or `EnterpriseDB (UNICODE)` from the list of drivers and click `Finish`. + +![The Create New Data Source dialog](images/create_new_data_source.png) + +The Create New Data Source dialog + +The EnterpriseDB ODBC Driver dialog opens. + +![Define the data source](images/define_the_data_source.png) + +Define the data source + +Use the fields on the dialog to define the named data source: + +- Enter the Database name in the `Database` field. +- Enter the host name or IP address of Advanced Server in the `Server` field. +- Enter the name of a user in the `User Name` field. +- Enter a descriptive name for the named data source in the `Description` field. +- If libpq is installed in the same directory as the EDB-ODBC driver, the drop-down listbox next to the `SSL Mode` label will be active, allowing you to use SSL and other Advanced Server utilities. +- Accept the default port number (5444), or enter an alternative number in the `Port` field. +- Enter the password of the user in the `Password` field. + +Use the `Datasource` button (located in the `Options` box) to open the `Advanced Options` dialog and specify connection properties. + +The `Global` button opens a dialog on which you can specify logging options for the EDB-ODBC driver (not the data source, but the driver itself). + +![Page 1 of the Advanced Options dialog](images/advanced_options_1.png) + +Page 1 of the Advanced Options dialog + +- Check the box next to `Disable Genetic Optimizer` to disable the genetic query optimizer. By default, the query optimizer is `on`. +- Check the box next to `KSQO (Keyset Query Optimization)` to enable server-side support for keyset queries. By default, `Keyset Query Optimization` is `on`. +- Check the box next to `Recognize Unique Indexes` to force the `SQLStatistics()` function to report unique indexes; if the option is not checked, the `SQLStatistics()` function will report that all indexes allow duplicate values. By default, `Recognize Unique Indexes` is `on`. +- Check the box next to `Use Declare/Fetch` to specify that the driver should use server-side cursors whenever your application executes a `SELECT` command. By default, `Use Declare/Fetch` is `off`. +- Check the box next to `CommLog (C:\psqlodbc_xxxx.log)` to record all client/server traffic in a log file. By default, logging is `off`. +- Check the box next to `Parse Statements` to specify that the driver (rather than the server) should attempt to parse simple `SELECT` statements when you call the `SQLNumResultCols()`, `SQLDescribeCol()`, or `SQLColAttributes()` function. By default, this option is `off`. +- Check the box next to `Cancel as FreeStmt (Exp)` to specify that the `SQLCancel()` function should call `SQLFreeStmt(SQLClose)` on your behalf. By default, this option is `off`. +- Check the box next to `MyLog (C:\mylog_xxxx.log)` to record a detailed record of driver activity in a log file. The log file is named `c:\mylog\_\ *process-id*.log`. By default, logging is `off`. + +The radio buttons in the Unknown Sizes box specify how the `SQLDescribeCol()` and `SQLColAttributes()` functions compute the size of a column of unknown type (see Section `Supported Data Types` for a list of known data types). + +- Choose the button next to `Maximum` to specify that the driver report the maximum size allowed for a `VARCHAR` or `LONGVARCHAR` (dependent on the `Unknowns as LongVarChar` setting). If `Unknowns as LongVarChar` is enabled, the driver returns the maximum size of a `LONGVARCHAR` (specified in the `Max LongVarChar` field in the `Miscellaneous` box). If `Unknowns as LongVarChar` is not enabled, the driver returns the size specified in the `Max VarChar` field (in the `Miscellaneous` box). +- Choose the button next to `Don’t know` to specify that the driver report a length of "unknown". +- Choose the button next to `Longest` to specify that the driver search the result set and report the longest value found. (Note: you should not specify `Longest` if `UseDeclareFetch` is enabled.) + +The properties in the `Data Type Options` box determine how the driver treats columns of specific types: + +- Check the box next to `Text as LongVarChar` to treat `TEXT` values as if they are of type `SQL_LONGVARCHAR`. If the box is not checked, the driver will treat `TEXT` values as `SQL_VARCHAR` values. By default, `TEXT` values are treated as `SQL_LONGVARCHAR` values. +- Check the box next to `Unknowns as LongVarChar` to specify that the driver treat values of unknown type as `SQL_LONGVARCHAR` values. If unchecked, the driver will treat values of unknown type as `SQL_VARCHAR` values. By default, values of unknown type are treated as `SQL_VARCHAR` values. +- Check the box next to `Bools as Char` to specify that the driver treat `BOOL` values as `SQL_CHAR` values. If unchecked, `BOOL` values are treated as `SQL_BIT` values. By default, `BOOL` values are treated as `SQL_CHAR` values. + +You can specify values for some of the properties associated with the named data source in the fields in the `Miscellaneous` box: + +- Indicate the maximum length allowed for a `VARCHAR` value in the Max `VarChar` field. By default, this value is set to `255`. +- Enter the maximum length allowed for a `LONGVARCHAR` value in the Max `LongVarChar` field. By default, this value is set to `8190`. +- Specify the number of rows fetched by the driver (when `UseDeclareFetch` is enabled) in the `Cache Size` field. The default value is `100`. +- Use the `SysTablePrefixes` field to specify a semi-colon delimited list of prefixes that indicate that a table is a system table. By default, the list contains `dd_`;. + +You can reset the values on this dialog to their default settings by choosing the `Defaults` button. + +Click the `Apply` button to apply any changes to the data source properties, or the `Cancel` button to exit the dialog without applying any changes. Choose the `OK` button to apply any changes to the dialog and exit. + +Select the `Page 2` button (in the upper-left hand corner of the `Advanced Options` dialog) to access a second set of advanced options. + +![Page 2 of the Advanced Options dialog](images/odbc_advanced_options_2.png) + +Page 2 of the Advanced Options dialog + +- Check the box next to `Read Only` to prevent the driver from executing the following commands: `INSERT`, `UPDATE`, `DELETE`, `CREATE`, `ALTER`, `DROP`, `GRANT`, `REVOKE` or `LOCK`. Invoking the `Read Only` option also prevents any calls that use ODBC’s procedure call escape syntax (`call=procedure-name?`). By default, this option is `off`. +- Check the box next to `Show System Tables` to include system tables in the result set of the `SQLTables()` function. If the option is enabled, the driver will include any table whose name starts with `pg\_` or any of the prefixes listed in the `SysTablePrefixes` field of `Page 1` of the `Advanced Options` dialog. By default, this option is `off`. +- Check the box next to `Show sys/dbo Tables [Access]` to access objects in the `sys` schema and `dbo` schema through the ODBC data source. By default, this option is enabled (checked). +- Check the box next to `Cumulative Row Count for Insert` to cause a single, cumulative row count to be returned for the entire array of parameter settings for an `INSERT` statement when a call to the `SQLRowCount()` method is performed. If this option is not enabled (the box is not checked), then an individual row count is available for each parameter setting in the array, and thus, a call to `SQLRowCount()` returns the count for the last inserted row. +- Check the box next to `LF<->CR/LF` conversion to instruct the driver to convert line-feed characters to carriage-return/line-feed pairs when fetching character values from the server and convert carriage-return/line-feed pairs back to line-feed characters when sending character values to the server. By default, this option is enabled. +- Check the box next to `Updatable Cursors` to specify that the driver should permit positioned `UPDATE` and `DELETE` operations with the `SQLSetPos()` or `SQLBulkOperations()` functions. By default, this option is enabled. +- Check the box next to `bytea as LO` to specify that the driver should treat `BYTEA` values as if they are `SQL_LONGVARBINARY` values. If the box is not checked, EDB-ODBC will treat `BYTEA` values as if they are `SQL_VARBINARY` values. By default, `BYTEA` values are treated as `SQL_VARBINARY` values. +- Check the box next to `Row Versioning` to include the `xmin` column when reporting the columns in a table. The `xmin` column is the ID of the transaction that created the row. You must use row versioning if you plan to create cursors where `SQL_CONCURRENCY = SQL_CONCUR_ROWVER`. By default, `Row Versioning` is `off`. +- Check the box next to `Disallow Premature` to specify that the driver should retrieve meta-data about a query (i.e., the number of columns in a result set, or the column types) without actually executing the query. If this option is not specified, the driver executes the query when you request meta-data about the query. By default, `Disallow Premature` is off. +- Check the box next to `True is -1` to tell the driver to return `BOOL` values of `True` as a `-1`. If this option is not enabled, the driver will return `BOOL` values of `True` as `1`. The driver always returns `BOOL` values of `False` as `0`. +- Check the box next to `Server side prepare` to tell the driver to use the `PREPARE` and `EXECUTE` commands to implement the `Prepare/Execute` model. By default, this box is checked. +- Check the box next to `use gssapi for GSS request` to instruct the driver to send a GSSAPI connection request to the server. +- Enter the database system (either `EnterpriseDB` or `PostgreSQL`) in the `dbms_name` field. The value entered here is returned in the `SQL_DBMS_NAME` argument when the `SQLGetInfo()` function is called. The default is `EnterpriseDB`. + +Use the radio buttons in the `Int8` As box to specify how the driver should return `BIGINT` values to the client. Select the radio button next to `default` to specify the default type of `NUMERIC` if the client is MS Jet, `BIGINT` if the client is any other ODBC client. You can optionally specify that the driver return `BIGINT` values as a `bigint (SQL_BIGINT)`, `numeric (SQL_NUMERIC)`, `varchar (SQL_VARCHAR)`, `double (SQL_DOUBLE)`, or `int4 (SQL_INTEGER)`. + +The default value of the `Extra Opts` field is `0x0`. `Extra Opts` may be: + +| Option | Specifies | +| ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 0x1 | Forces the output of short-length formatted connection string. Select this option when you are using the MFC CDatabase class. | +| 0x2 | Allows MS Access to recognize PostgreSQL's serial type as AutoNumber type. | +| 0x4 | Return ANSI character types for the inquiries from applications. Select this option for applications that have difficulty handling Unicode data. | +| 0x8 | If set, NULL dates are reported as empty strings and empty strings are interpreted as NULL dates on input. | +| 0x10 | Determines if SQLGetInfo returns information about all tables, or only accessible tables. If set, only information is returned for accessible tables. | +| 0x20 | If set, each SQL command is processed in a separate network round-trip, otherwise, SQL commands are grouped into as few round-trips as possible to reduce network latency. | + +The `Protocol` box contains radio buttons that tell the driver to interact with the server using a specific front-end/back-end protocol version. By default, the `Protocol` selected is `7.4+`; you can optionally select from versions `6.4+`, `6.3` or `6.2`. + +The `Level of Rollback on errors` box contains radio buttons that specify how the driver handles error handling: + +| Option | Specifies | +| ----------- | -------------------------------------------------------------------------------------------------------------------------- | +| Transaction | If the driver encounters an error, it will rollback the current transaction. | +| Statement | If the driver encounters an error, it will rollback the current statement. | +| Nop | If the driver encounters an error, you must manually rollback the current transaction before the application can continue. | + +The `OID Options` box contains options that control the way the driver exposes the OID column contained in some tables: + +- Check the box next to `Show Column` to include the `OID` column in the result set of the `SQLColumns()` function. If this box is not checked, the `OID` column is hidden from `SQLColumns()`. +- Check the box next to `Fake Columns` to specify that the `SQLStatistics()` function should report that a unique index exists on each `OID` column. + +Use the `Connect Settings` field to specify a list of parameter assignments that the driver will use when opening this connection. Any configuration parameter that you can modify with a `SET` statement can be included in the semi-colon delimited list. For example: + +`set search_path to company1,public;` + +When you’ve defined the connection properties for the named data source, click the `Apply` button to apply the options; you can optionally exit without saving any options by choosing `Cancel`. Select the `OK` button to save the options and exit. + +Choose the `Global` button (on the `EnterpriseDB ODBC Driver` dialog) to open the `Global Settings` dialog. The options on this dialog control logging options for the EDB-ODBC driver. Use this dialog to enforce logging when the driver is used without a named data source, or for logging driver operations that occur before the connection string is parsed. + +![The Global Settings dialog](images/global_settings.png) + +The Global Settings dialog + +- Check the box next to the `CommLog` field to record all client/server traffic in a log file. The logfile is named `C:\psqlodbc_process-id` where `process-id` is the name of the process in use. +- Check the box next to the `Mylog` field to keep a logfile of the driver’s activity. The logfile is named `c:\mylog_process-id` where `process-id` is the name of the process in use. +- Specify a location for the logfiles in the `Folder for logging` field. + +When you’ve entered the connection information for the named data source, click the `Test` button to verify that the driver manager can connect to the defined data source. + +![The Connection is successful](images/connection_is_successful.png) + +The Connection is successful + +Click the OK button to exit `Connection Test` dialog. If the connection is successful, click the `Save` button to save the named data source. If there are problems establishing a connection, adjust the parameters and test again. + +## Adding a Data Source Definition in Linux + +The Linux `ODBC Administrator` is a graphical tool that is distributed with unixODBC; you can use the `ODBC Administrator` to manage ODBC drivers and named resources. To add the ODBC Administrator to your system, open a terminal window, assume superuser privileges, and enter: + + `yum install unixODBC` + +followed by: + + `yum install unixODBC-kde` + +To invoke the `ODBC Administrator`, open a terminal window and enter ODBCConfig. + +![The unixODBC Data Source Administrator](images/unixodbc_data_source_administrator.png) + +The unixODBC Data Source Administrator + +When you install the Advanced Server `Connectors` component, the EDB-ODBC driver is added to the list of drivers in the ODBC Administrator. Click `Advanced`, and then select the `Drivers` tab to verify that the `enterprisedb` driver appears in the list. + +![The Drivers tab shows the installed EDB-ODBC driver](images/installed_edb-odbc_driver.png) + +The Drivers tab shows the installed EDB-ODBC driver + +If the EDB-ODBC driver does not appear in the list of drivers, you can add it using the `ODBC Administrator`. To add a driver definition, select the `Drivers` tab, and click `Add`. The `Driver Properties (new)` window opens, as shown below: + +![The Driver Properties window](images/driver_properties_window.png) + +The Driver Properties window + +Complete the `Driver Properties` window to register the EDB-ODBC driver with the driver manager: + +- Add a unique name for the driver to the `Name` field. + +- Add a driver description to the `Description` field. + +- Add the path to the location of the EDB-ODBC driver in the `Driver` field. By default, the complete path to the driver is: + + `/usr/edb/odbc/lib/edb-odbc.so` + +- Add the path to the location of the EDB-ODBC driver setup file in the `Setup` field. By default, the complete path to the driver setup file is: + + `/usr/edb/odbc/lib/libodbcedbS.so` + +When you’ve described the driver properties for the EDB-ODBC driver, click `OK`. The ODBC Data Source Administrator window now includes the EDB-ODBC driver in the list of available ODBC drivers. + +![The Drivers tab shows the new driver definition](images/new_driver_definition.png) + +The Drivers tab shows the new driver definition + +With the EDB-ODBC driver available to the driver manager, you can add a data source. Click the `Data Source` Names option in the left panel, and then choose the appropriate DSN tab for the type of data source name you would like to add: + +- Choose the `User` tab to add a named data source that is available only to the current user (the data source will be stored in `/user/.odbc.ini`). +- Choose the `System` tab add a named data source that is available to all users. All system data sources are stored in a single file (usually `/etc/odbc.ini`). +- Choose the `File` tab to add a named data source that is available to all users, but that is stored in a file of your choosing. + +Select the appropriate tab and click `Add`. The `Create a New Data Source…` window opens, as shown below: + +![Select a driver for the named data source](images/select_driver_named_date_source.png) + +Select a driver for the named data source + +Select the EDB-ODBC driver from the list, and click `OK` to open the `Data Source Properties` window. + +Complete the `Data Source Properties (new)` window, specifying the connection properties for the EDB-ODBC driver. + +![The Data Source Properties window](images/data_source_properties_window.png) + +The Data Source Properties window + +- Enter the data source name in the `Name` field. +- Enter a description of the named data source in the `Description` field. +- The unixODBC driver includes a trace utility that records the sequence of calls made an ODBC application to a log file. Specify `Yes` in the `Trace` field to turn the trace utility on. Note that using the trace utility can slow down an application. +- Use the `TraceFile` field to specify a file to receive information returned by the `Trace` utility. +- Enter the name of the Advanced Server database in the `Database` field. +- Enter the host name or IP address of Advanced Server in the `Servername` field. +- Enter the name of a user in the `Username` field. +- Enter the password for the user in the `Password` field. +- Enter a port number (or accept the default value of `5444`) in the `Port` field. +- Use the `Protocol` field to specify a front-end/back-end protocol version; the default value is `7.4`. You can optionally select from protocol versions `7.4`, `6.4`, `6.3` or `6.2`. +- Use the `ReadOnly` field to specify `Yes` to prevent the driver from executing the following commands: `INSERT`, `UPDATE`, `DELETE`, `CREATE`, `ALTER`, `DROP`, `GRANT`, `REVOKE` or `LOCK`. Enabling the `Read Only` option also prevents any calls that use the ODBC procedure call escape syntax (`call=procedure-name?`). By default, `ReadOnly` is set to `No`. +- Use the `RowVersioning` field to specify `Yes` if the driver should include the `xmin` column when reporting the columns in a table. The `xmin` column is the ID of the transaction that created the row. You must use row versioning if you plan to create cursors where `SQL_CONCURRENCY = SQL_CONCUR_ROWVER`. By default, `Row Versioning` is set to `No`. +- Use the `ShowSystemTables` field to specify `Yes` if the driver should include system tables in the result set of the `SQLTables()` function. By default, this field is set to `No`. +- Use the `ShowOidColumn` field to specify `Yes` if the driver should include the `OID` column in the result set of the `SQLColumns()` function. If `ShowOidColumn` is set to `No`, the `OID` column is hidden from `SQLColumns()`. By default, this option is set to `No`. +- Use the `FakeOidIndex` field to specify Yes if the `SQLStatistics()` function should report that a unique index exists on each `OID` column. This is useful when your application needs a unique identifier and your table doesn’t include one. The default value is `No`. +- Use the `ConnSettings` field to specify a list of parameter assignments that the driver will use when opening this connection. + +When you’ve defined the connection properties, click `OK`. + +The new data source is added to the list of data source names: + +![The new data source is included on the Data Source Names list](images/data_source_names.png) + +The new data source is included on the Data Source Names list diff --git a/product_docs/docs/odbc_connector/13.0.0.1/06_edb-odbc_driver_functionality.mdx b/product_docs/docs/odbc_connector/13.0.0.1/06_edb-odbc_driver_functionality.mdx new file mode 100644 index 00000000000..9aa1f2ff4e2 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/06_edb-odbc_driver_functionality.mdx @@ -0,0 +1,753 @@ +--- +title: "EDB-ODBC Driver Functionality" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/edb-odbc_driver_functionality.html" +--- + +You can use ODBC functions to query ODBC for specific information about the various attributes of the connection between EDB-ODBC and the server. + +- `SQLGetInfo()` returns information about the EDB-ODBC driver and Advanced Server. +- `SQLGetEnvAttr()` returns information about ODBC environment attributes. +- `SQLGetConnectAttr()` returns information about attributes specific to an individual connection. +- `SQLGetStmtAttr()` returns information about the attributes specific to an individual statement. + +You can also use ODBC functions to set various attributes of the objects that you use to interface with ODBC: + +- Use the `SQLSetConnectAttr()` function to set connection attributes. +- Use the `SQLSetEnvAttr()` function to set environment attributes. +- Use the `SQLSetStmtAttr()` function to set statement attributes. + +## SQLGetInfo() + +The ODBC `SQLGetInfo()` function returns information about the EDB-ODBC driver and Advanced Server. You must have an open connection to call `SQLGetInfo()`, unless you specify `SQL_ODBC_VER` as the `info_type`. The signature for `SQLGetInfo()` is: + +```c++ +SQLRETURN SQLGetInfo +( + SQLHDBC conn_handle , // Input + SQLUSMALLINT info_type , // Input + SQLPOINTER info_pointer , // Output + SQLSMALLINT buffer_len , // Input + SQLSMALLINT * string_length_pointer // Output +); +``` + +- `conn_handle` The connection handle. + +- `info_type` The type of information SQLGetInfo() is retrieving. + +- `info_pointer` A pointer to a memory buffer that will hold the retrieved value. + + If the `info_type` argument is `SQL_DRIVER_HDESC` or `SQL_DRIVER_HSTMT`, the `info_pointer` argument is both `Input` and `Output`. + +- `buffer_len` is the length of the allocated memory buffer pointed to by `info_pointer`. If `info_pointer` is `NULL`, `buffer_len` is ignored. If the returned value is a fixed size, `buffer_len` is ignored. `buffer_len` is only used if the requested value is returned in the form of a character string. + +- `string_length_pointer` is a pointer to an `SQLSMALLINT` value. `SQLGetInfo()` writes the size of the requested value in this integer. + +A typical usage is to call `SQLGetInfo()` with a `NULL info_pointer` to obtain the length of the requested value, allocate the required number of bytes, and then call `SQLGetInfo()` again (providing the address of the newly allocated buffer) to obtain the actual value. The first call retrieves the number of bytes required to hold the value; the second call retrieves the value. + +If the size of the returned value exceeds `buffer_len`, the information is truncated and `NULL` terminated. If the returned value is a fixed size, `string_length` is ignored (and the size of the requested value is not provided by `SQLGetInfo()`). + +`SQLGetInfo()` writes information in one of the following formats: + +- a `SQLUINTEGER` bitmask +- a `SQLUINTEGER` flag +- a `SQLUINTEGER` binary value +- a `SQLUSMALLINT` value +- a `NULL` terminated character string + +`SQLGetInfo()` returns `SQL_SUCCESS`, `SQL_SUCCESS_WITH_INFO`, `SQL_ERROR`, or `SQL_INVALID_HANDLE`. + +The following table lists the information returned by EDB-ODBC about the Advanced Server connection: + +| **SQL info_type Argument and Description** | **EDB_ODBC/Advanced Server Returns:** | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| SQL_ACCESSIBLE_PROCEDURES: Indicates if procedures returned by SQLProcedures()can be executed by the application. | Returns N. Some procedures executed by the SQLProcedures() function may be executed by the application. | +| SQL_ACCESSIBLE_TABLES: Indicates if the user has SELECT privileges on all table names returned by SQLTables(). | Returns N. The user may not have select privileges on one or more tables returned by the SQLTables() function. | +| SQL_ACTIVE_CONNECTIONS prev. SQL_MAX_DRIVER_CONNECTIONS: Indicates the maximum number of connections EDB-ODBC can support. | Returns 0. There is no specified limit to the number of connections allowed. | +| SQL_ACTIVE_ENVIRONMENTS: The number of active environments EDB-ODBC can support. | Returns 0. There is no specified limit to the number of environments allowed. | +| SQL_ACTIVE_STATEMENTS prev. SQL_MAX_CONCURRENT_ACTIVITIES: Indicates the maximum number of active statements EDB-ODBC can support. | Returns 0. There is no specified limit to the number of active statements allowed. | +| SQL_AGGREGATE_FUNCTION: Identifies the aggregate functions supported by the server and driver. | Returns SQL_AF_ALL | +| SQL_ALTER_DOMAIN: Identifies the ALTER DOMAIN clauses supported by the server. | Returns 0. ALTER DOMAIN clauses are not supported. | +| SQL_ALTER_TABLE: Identifies the ALTER TABLE clauses supported by the server. | Returns SQL_AT_ADD_COLUMN, SQL_AT_DROP_TABLE_CONSTRAINT_CASCADE, SQL_AT_DROP_TABLE_CONSTRAINT, SQL_AT_CONSTRAINT_INITIALLY_DEFERRED, SQL_AT_CONSTRAINT_INITIALLY_IMMEDIATE, SQL_AT_CONSTRAINT_DEFERRABLE | +| SQL_ASYNC_MODE: Level of Asynchronous Mode Supported by EDB-ODBC. | Returns SQL_AM_NONE. Asynchronous mode is not supported. | +| SQL_BATCH_ROW_COUNT: Indicates how the driver returns row counts. | Returns SQL_BRC_EXPLICIT. Row Counts are available when executed by calling SQLExecute or SQLExecDirect. | +| SQL_BATCH_SUPPORT: Indicates support for batch statement execution. | Returns: SQL_BS_SELECT_EXPLICIT, SQL_BS_ROW_COUNT_EXPLICIT. The driver supports explicit batches with result set and row count generating statements. | +| SQL_BOOKMARK_PERSISTENCE: Indicates level of support for bookmarks. | Returns: SQL_BP_DELETE, SQL_BP_TRANSACTION, SQL_BP_UPDATE, SQL_BP_SCROLL. | +| SQL_CATALOG_LOCATION Now SQL_QUALIFIER_LOCATION: Indicates the position of the catalog in a qualified table name. | Returns SQL_CL_START. The catalog portion of a qualified table name is at the beginning of the name. | +| SQL_CATALOG_NAME Now SQL_QUALIFIER_NAME: Indicates support for catalog names. | Returns Y. The server supports catalog names. | +| SQL_CATALOG_NAME_SEPARATOR Now SQL_QUALIFIER_NAME_SEPARATOR: Character separating the catalog name from the adjacent name element. | Returns '.' The server expects a '.' character between the qualifier and the table name. | +| SQL_CATALOG_TERM Now SQL_QUALIFIER_TERM: The term used to describe a catalog. | Returns catalog. | +| SQL_CATALOG_USAGE Now SQL_QUALIFIER_USAGE: Indicates the SQL statements that may refer to catalogs. | Returns SQL_CU_DML_STATEMENTS. Catalog names can be used in SELECT, INSERT, UPDATE, DELETE, SELECT FOR UPDATE and positioned UPDATE and DELETE statements. | +| SQL_COLLATION_SEQ: Returns the name of the Collation Sequence. | Returns an empty string. The name of the default collation is unknown. | +| SQL_COLUMN_ALIAS: Indicates server support for column aliases. | Returns Y. The server supports column aliases. | +| SQL_CONCAT_NULL_BEHAVIOR: Indicates how the server handles concatenation of NULL values. | Returns SQL_CB_NON_NULL. Concatenation of a NULL value and a non NULL value will result in a NULL value. | +| SQL_CONVERT_BIGINT: Indicates conversion support from the BIGINT type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_BINARY: Indicates conversion support from the BINARY type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_BIT: Indicates conversion support from the BIT type using the CONVERT function. | Returns: SQL_CVT_INTEGER, SQL_CVT_BIT. | +| SQL_CONVERT_CHAR: Indicates conversion support from the CHAR type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_DATE: Indicates conversion support from the DATE type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_DECIMAL: Indicates conversion support from the DECIMAL type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_DOUBLE: Indicates conversion support from the DOUBLE type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_FLOAT: Indicates conversion support from the FLOAT type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_FUNCTIONS: Lists the scalar conversion functions supported by the server and driver using the CONVERT function. | Returns: SQL_FN_CVT_CONVERT. | +| SQL_CONVERT_INTEGER: Lists the conversion support from the INTEGER type using the CONVERT function. | Returns: SQL_CVT_INTEGER, SQL_CVT_BIT. | +| SQL_CONVERT_INTERVAL_DAY_TIME: Indicates conversion support from the INTERVAL_DAY_TIME type using the CONVERT function. | This info_type is not currently supported. | +| SQL_CONVERT_INTERVAL_YEAR_MONTH: Indicates conversion support from the INTERVAL_YEAR_MONTH type using the CONVERT function. | This info_type is not currently supported. | +| SQL_CONVERT_LONGVARBINARY: Indicates conversion support for the LONG_VARBINARY type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_LONGVARCHAR: Indicates conversion support for the LONGVARCHAR type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_NUMERIC: Indicates conversion support for the NUMERIC type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_REAL: Indicates conversion support for the REAL type using the CONVERT function | Returns 0. The server does not support conversion. | +| SQL_CONVERT_SMALLINT: Indicates conversion support for the SMALLINT type using the CONVERT function. | Returns: SQL_CVT_INTEGER, SQL_CVT_BIT. | +| SQL_CONVERT_TIME: Indicates conversion support for TIME type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CVT_TIMESTAMP: Indicates conversion support for TIMESTAMP type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_TINYINT: Indicates conversion support for the TINYINT type using the CONVERT function. | Returns: SQL_CVT_INTEGER, SQL_CVT_BIT. | +| SQL_CONVERT_VARBINARY: Indicates conversion support for the VARBINARY type using the CONVERT function. | Returns 0. The server does not support conversion. | +| SQL_CONVERT_VARCHAR: Indicates conversion support for VARCHAR type using the CONVERT function. | Returns: SQL_CVT_INTEGER, SQL_CVT_BIT. | +| SQL_CONVERT_WCHAR: Indicates conversion support for the WCHAR type using the CONVERT function. | This info_type is valid only when using the Unicode driver. Returns 0. The server does not support conversion. | +| SQL_CONVERT_WLONGVARCHAR: Indicates conversion support for the WLONGVARCHAR type using the CONVERT function. | This info_type is valid only when using the Unicode driver. Returns 0. The server does not support conversion. | +| SQL_CONVERT_WVARCHAR: Indicates conversion support for the WVARCHAR type using the CONVERT function. | This info_type is valid only when using the Unicode driver. Returns 0. The server does not support conversion. | +| SQL_CORRELATION_NAME: Indicates server support for correlation names. | Returns SQL_CN_ANY. Correlation names are supported and can be any valid name. | +| SQL_CREATE_ASSERTION: Indicates support for the CREATE ASSERTION statement. | Returns 0. The CREATE ASSERTION statement is not supported. | +| SQL_CREATE_CHARACTER_SET: Indicates support for CREATE CHARACTER statement. | Returns 0. The CREATE CHARACTER statement is not supported. | +| SQL_CREATE_COLLATION: Indicates support for the CREATE COLLATION. | Returns 0. The CREATE COLLATION statement is not supported. | +| SQL_CREATE_DOMAIN: Indicates support for the CREATE DOMAIN statement. | Returns 0. The CREATE DOMAIN statement is not supported. | +| SQL_CREATE_SCHEMA: Indicates support for the CREATE SCHEMA statement. | Returns: SQL_CS_CREATE_SCHEMA, SQL_CS_AUTHORIZATION. | +| SQL_CREATE_TABLE: Indicates support for the CREATE TABLE statement. | Returns: SQL_CT_CREATE_TABLE, SQL_CT_GLOBAL_TEMPORARY, SQL_CT_CONSTRAINT_INITIALLY_DEFERRED, SQL_CT_CONSTRAINT_INITIALLY_IMMEDIATE, SQL_CT_CONSTRAINT_DEFERRABLE, SQL_CT_COLUMN_CONSTRAINT, SQL_CT_COLUMN_DEFAULT, SQL_CT_TABLE_CONSTRAINT, SQL_CT_CONSTRAINT_NAME_DEFINITION | +| SQL_CREATE_TRANSLATION: Indicates support for the CREATE TRANSLATION statement. | Returns 0. The CREATE TRANSLATION statement is not supported. | +| SQL_CREATE_VIEW: Indicates support for the CREATE VIEW statement. | Returns SQL_CV_CREATE_VIEW. | +| SQL_CURSOR_COMMIT_BEHAVIOR: Indicates how a COMMIT operation affects the cursor. | Returns SQL_CB_PRESERVE. Cursors are unchanged, and can continue to fetch data. | +| SQL_CURSOR_ROLLBACK_BEHAVIOR: Indicates the server behavior after a ROLLBACK operation. | Returns SQL_CB_PRESERVE. Cursors are unchanged, and can continue to fetch data. | +| SQL_CURSOR_SENSITIVITY:Indicates how the server synchronizes changes to a result set. | This info_type is not currently supported. | +| SQL_DATA_SOURCE_NAME: Returns the server name used during connection. | The value returned is determined by the connection properties. | +| SQL_DATA_SOURCE_READ_ONLY: Indicates if the connection is in READ ONLY mode. | The value returned is determined by the connection properties. | +| SQL_DATABASE_NAME: Returns the name of the database. | The value returned is determined by the connection properties. | +| SQL_DATETIME_LITERALS: Indicates the DATETIME LITERALS supported by the server. | This info_type is not supported. | +| SQL_DBMS_NAME: Returns the name of the DBMS system. | Returns the value given by the dbms_name parameter from the odbc.ini file on Linux or the dbms_name field of page 2 of the Advanced Options dialog box when defining a data source in Windows. The default is EnterpriseDB. | +| SQL_DBMS_VER: Returns the server version. | Determined by the server. | +| SQL_DDL_INDEX: Indicates support for creating and dropping indexes. | Returns: SQL_DI_CREATE_INDEX, SQL_DI_DROP_INDEX. | +| SQL_DEFAULT_TXN_ISOLATION: Indicates support for transaction isolation by the server. | Returns TXN_READ_COMMITTED. Non-repeatable or phantom reads are possible; Dirty reads are not. | +| SQL_DESCRIBE_PARAMETER: Indicates support for the DESCRIBE INPUT statement. | Returns N. The DESCRIBE INPUT statement is not supported. | +| SQL_DM_VER: The version of the Driver Manager. | Determined by driver manager. | +| SQL_DRIVER_HDBC: The Driver's connection handle. | Returns an SQLULEN value that contains the driver’s connection handle. | +| SQL_DRIVER_HDESC: The Driver descriptor handle. | Returns an SQLULEN value that contains driver’s descriptor handle. | +| SQL_DRIVER_HENV: The Driver's environment handle. | Returns an SQLULEN value that contains the driver’s environment handle. | +| SQL_DRIVER_HLIB: The Driver handle. | Returns an SQLULEN value that contains the library handle (returned to the ODBC driver manager when the manager loaded the driver). | +| SQL_DRIVER_HSTMT: The Driver's statement handle. | Returns an SQLULEN value that contains the driver’s statement handle. | +| SQL_DRIVER_NAME: The name of the driver. | Returns EDB-ODBC.DLL | +| SQL_DRIVER_ODBC_VER: Identifies the ODBC version that the driver supports. | Returns 03.50 | +| SQL_DRIVER_VER: Identifies the driver version. | Returns 9.0.0.6 | +| SQL_DROP_ASSERTION: Lists the DROP ASSERTION clauses supported by the server. | Returns 0 | +| SQL_DROP_CHARACTER_SET: Lists the DROP CHARACTER clauses supported by the server. | Returns 0 | +| SQL_DROP_COLLATION: Lists the DROP COLLATION clauses supported by the server. | Returns 0 | +| SQL_DROP_DOMAIN: Lists the DROP DOMAIN clauses supported by the server. | Returns 0 | +| SQL_DROP_SCHEMA: Lists the DROP SCHEMA clauses supported by the server. | Returns: SQL_DS_DROP_SCHEMA, SQL_DS_RESTRICT, SQL_DS_CASCADE. | +| SQL_DROP_TABLE: Lists the DROP TABLE clauses supported by the server. | Returns: SQL_DT_DROP_TABLE, SQL_DS_RESTRICT, SQL_DS_CASCADE. | +| SQL_DROP_TRANSLATION: Lists the DROP TRANSLATION clauses supported by the server. | Returns 0. | +| SQL_DROP_VIEW: Lists the DROP VIEW clauses supported by the server. | Returns: SQL_DV_DROP_VIEW, SQL_DS_RESTRICT, SQL_DS_CASCADE. | +| SQL_DYNAMIC_CURSOR_ATTRIBUTES1: Describes the first set of dynamic cursor attributes supported by the driver. | Returns 0 | +| SQL_DYNAMIC_CURSOR_ATTRIBUTES2: Describes the second set of dynamic cursor attributes supported by the driver. | Returns 0 | +| SQL_EXPRESSIONS_IN_ORDERBY: Indicates server support for ORDER BY. | Returns Y. | +| SQL_FETCH_DIRECTION: Indicates FETCH order options (deprecated in ODBC 3.0). | Returns: SQL_FD_FETCH_NEXT, SQL_FD_FETCH_FIRS, SQL_FD_FETCH_LAST, SQL_FD_FETCH_PRIOR, SQL_FD_FETCH_ABSOLUTE, SQL_FD_FETCH_RELATIVE, SQL_FD_FETCH_BOOKMARK. | +| SQL_FILE_USAGE: Indicates how a single-tier driver treats files on the server. | Returns SQL_FILE_NOT_SUPPORTED. The driver is not a single-tier file. | +| SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1: Describes the forward-only cursor attributes supported by the driver. | Returns SQL_CA1_NEXT. | +| SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2: Describes extended attributes for the forward-only cursor designated by SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1. | Returns: SQL_CA2_READ_ONLY_CONCURRENCY, SQL_CA2_CRC_EXACT. | +| SQL_GETDATA_EXTENSIONS: Lists supported extensions to SQLGetData. | Returns: SQL_GD_ANY_COLUMN, SQL_GD_ANY_ORDER, SQL_GD_BLOCK, SQL_GD_BOUND. | +| SQL_GROUP_BY: Indicates the relationship between a GROUP BY clause and columns in the SELECT list. | Returns SQL_GB_GROUP_BY_EQUALS_SELECT. | +| SQL_IDENTIFIER_CASE: Indicates case-sensitivity and case-storage of SQL identifiers. | Returns SQL_IC_LOWER. | +| SQL_INDEX_KEYWORDS: Indicates support for the CREATE INDEX statement. | Returns SQL_IK_NONE. | +| SQL_INFO_SCHEMA_VIEWS: Lists the views supported in the INFORMATION_SCHEMA. | Returns 0. | +| SQL_INTEGRITY Prev. SQL_ODBC_SQL_OPT_IEF: Indicates server support for referential integrity syntax checking. | Returns N. | +| SQL_INSERT_STATEMENT: Indicates level of support for the INSERT statement. | Returns: SQL_IS_INSERT_LITERALS, SQL_IS_INSERT_SEARCHED, SQL_IS_SELECT_INTO. | +| SQL_KEYSET_CURSOR_ATTRIBUTES1: Describes the first set of keyset cursor attributes supported by the driver. | Returns: SQL_CA1_NEXT, SQL_CA1_ABSOLUTE, SQL_CA1_RELATIVE, SQL_CA1_BOOKMARK, SQL_CA1_LOCK_NO_CHANGE, SQL_CA1_POS_POSITION, SQL_CA1_POS_UPDATE, SQL_CA1_POS_DELETE, SQL_CA1_POS_REFRESH, SQL_CA1_BULK_ADD, SQL_CA1_BULK_UPDATE_BY_BOOKMARK, SQL_CA1_BULK_DELETE_BY_BOOKMARK, SQL_CA1_BULK_FETCH_BY_BOOKMARK. | +| SQL_KEYSET_CURSOR_ATTRIBUTES2: Describes the second set of keyset cursor attributes supported by the driver. | Returns: SQL_CA2_READ_ONLY_CONCURRENCY, SQL_CA2_OPT_ROWVER_CONCURRENCY, SQL_CA2_SENSITIVITY_ADDITIONS, SQL_CA2_SENSITIVITY_DELETIONS, SQL_CA2_SENSITIVITY_UPDATES, SQL_CA2_CRC_EXACT. | +| SQL_KEYWORDS: Identifies the server specific reserved keywords. | Returns “”. There are no server specific reserved keywords. | +| SQL_LIKE_ESCAPE_CLAUSE: Indicates support for an escape character in LIKE predicates. | Returns N. Advanced Server does not support escape characters in LIKE predicates. | +| SQL_LOCK_TYPES: Lists supported lock types (deprecated in ODBC 3.0). | Returns SQL_LCK_NO_CHANGE. | +| SQL_MAX_ASYNC_CONCURRENT_STATEMENTS: The number of active concurrent statements that the driver can support. | This info_type is currently unsupported. | +| SQL_MAX_BINARY_LITERAL_LEN: The maximum length of a binary literal. | Returns 0. The maximum length is unspecified. | +| SQL_MAX_CATALOG_NAME_LEN: The maximum length of a catalog name on the server. | Returns 0. The maximum length is unspecified. | +| SQL_MAX_QUALIFIER_NAME_LEN: The maximum length of a qualifier. | Returns 0. The maximum length is unspecified. | +| SQL_MAX_CHAR_LITERAL_LEN: The maximum number of characters in a character string. | Returns 0. The maximum length is unspecified. | +| SQL_MAX_COLUMN_NAME_LEN: The maximum length of a column name. | Returns 64. Column names cannot exceed 64 characters in length. | +| SQL_MAX_COLUMNS_IN_GROUP_BY: The maximum number of columns allowed in a GROUP BY clause. | Returns 0. The maximum length is unspecified. | +| SQL_MAX_COLUMNS_IN_INDEX: The maximum number of columns allowed in an index. | Returns 0. The maximum length is unspecified. | +| SQL_MAX_COLUMNS_IN_ORDER_BY: The maximum number of columns allowed in an ORDER BY clause. | Returns 0. The maximum length is unspecified. | +| SQL_MAX_COLUMNS_IN_SELECT: The maximum number of columns allowed in a SELECT list. | Returns 0. The maximum length is unspecified. | +| SQL_MAX_COLUMNS_IN_TABLE: The maximum number of columns allowed in a table. | Returns 0. The maximum length is unspecified. | +| SQL_MAX_CONCURRENT_ACTIVITIES prev. SQL_MAX_ACTIVE_STATEMENTS: The maximum number of active SQL statements that the driver can support. | Returns 0. The maximum length is unspecified. | +| SQL_MAX_CURSOR_NAME_LEN: The maximum length of a cursor name. | Returns 32. A cursor name cannot exceed 32 characters in length. | +| SQL_MAX_DRIVER_CONNECTIONS prev. SQL_ACTIVE_CONNECTIONS: The maximum number of active connections the driver can support. | Returns 0. There is no specified limit to the number of connections supported. | +| SQL_MAX_IDENTIFIER_LEN: The maximum identifier length allowed by the server. | Returns 64. Identifiers cannot exceed 64 characters in length. | +| SQL_MAX_INDEX_SIZE: The maximum number of bytes allowed in the (combined) fields of an index. | Returns 0. The maximum size is unspecified. | +| SQL_MAX_OWNER_NAME_LEN Now SQL_MAX_SCHEMA_NAME_LEN: The maximum length of an owner name allowed by the server. | Returns 64. The maximum length of an owner name is 64 characters. | +| SQL_MAX_PROCEDURE_NAME_LEN: The maximum length of a procedure name allowed by the server. | Returns 0. The maximum length is unspecified. | +| SQL_MAX_QUALIFIER_NAME_LEN Now SQL_MAX_CATALOG_NAME_LEN: The maximum length of a qualifier name allowed by the server. | Returns 0. The maximum length of a qualifier is unspecified. | +| SQL_MAX_ROW_SIZE: The maximum length of a row. | Returns 0. The maximum row length is unspecified. | +| SQL_MAX_ROW_SIZE_INCLUDES_LONG: Indicates whether the SQL_MAX_ROW_SIZE includes the length of any LONGVARCHAR or LONGVARBINARY columns in the row. | Returns Y. SQL_MAX_ROW_SIZE includes the length of any LONGVARCHAR or LONGVARBINARY columns in the row. | +| SQL_MAX_SCHEMA_NAME_LEN: The maximum length of a schema name allowed by the server. | Returns 64. The maximum length of a schema name is 64 characters. | +| SQL_MAX_STATEMENT_LEN: The maximum length of a SQL statement. | Returns 0. Maximum statement length is limited by available memory. | +| SQL_MAX_TABLE_NAME_LEN: The maximum length of a table name allowed by the server. | Returns 64. The maximum length of a table name is 64 characters. | +| SQL_MAX_TABLES_IN_SELECT: The maximum number of tables allowed in the FROM clause of a SELECT statement. | Returns 0. The maximum number of tables allowed is unspecified. | +| SQL_MAX_USER_NAME_LEN: The maximum length of the user name allowed by the server. | Returns 0. The maximum length of a user name is unspecified. | +| SQL_MULT_RESULT_SETS: Indicates server support for multiple result sets. | Returns Y. Advanced Server supports multiple result sets. | +| SQL_MULTIPLE_ACTIVE_TXN: Indicates if the server supports multiple active transactions. | Returns Y. Advanced Server supports multiple active transactions. | +| SQL_NEED_LONG_DATA_LEN: Indicates if the server needs the length of a LONG data value before receiving the value. | Returns N. Advanced Server does not need the length of a LONG data value before receiving the value. | +| SQL_NON_NULLABLE_COLUMNS: Indicates if the server supports NOT NULL values in columns. | Returns SQL_NNC_NON_NULL. Advanced Server does support NOT NULL values in columns. | +| SQL_NULL_COLLATION: Indicates where NULL values are located in a result set. | Returns SQL_NC_HIGH. The location of NULL values in a data set is determined by the ASC and DESC keywords; NULL values are sorted to the high end of the data set. | +| SQL_NUMERIC_FUNCTIONS: Lists the numeric functions supported by the driver and the server. | Returns: SQL_FN_NUM_ABS, SQL_FN_NUM_ATAN, SQL_FN_NUM_CEILING, SQL_FN_NUM_COS, SQL_FN_NUM_EXP, SQL_FN_NUM_FLOOR, SQL_FN_NUM_LOG, SQL_FN_NUM_MOD, SQL_FN_NUM_SIGN, SQL_FN_NUM_SIN, SQL_FN_NUM_SQRT, SQL_FN_NUM_TAN, SQL_FN_NUM_RAND, SQL_FN_NUM_POWER, SQL_FN_NUM_ROUND. | +| SQL_ODBC_API_CONFORMANCE: Indicates the ODBC 3.0 compliance level | Returns SQL_OAC_LEVEL1. The driver conforms to ODBC Level 1 interface. | +| SQL_ODBC_INTERFACE_CONFORMANCE: Indicates the ODBC interface that the driver adheres to. | Returns SQL_OIC_CORE. | +| SQL_ODBC_SAG_CLI_CONFORMANCE: Indicates the SQL Access Group compliance level that the driver adheres to. | Returns SQL_OSCC_NOT_COMPLIANT. The driver is not SAG CLI compliant. | +| SQL_ODBC_SQL_CONFORMANCE: Indicates the SQL grammar level that the driver conforms to. | Returns SQL_OSC_CORE. The driver conforms to the core grammar level. | +| SQL_ODBC_SQL_OPT_IEF Now SQL_INTEGRITY: Indicates server support for referential integrity syntax checking. | Returns N. The server does not support referential integrity syntax checking. | +| SQL_ODBC_VER: The ODBC version supported by the driver manager | Returns 03.52.0000. | +| SQL_OJ_CAPABILITIES: Identifies the outer joins that are supported by the server. | Returns: SQL_OJ_LEFT, SQL_OJ_RIGHT, SQL_OJ_FULL, SQL_OJ_NESTED, SQL_OJ_NOT_ORDERED, SQL_OJ_INNER, SQL_OJ_ALL_COMPARISON_OPS. | +| SQL_OUTER_JOINS: Indicates support for outer joins and the outer join escape sequence. | Returns Y. Outer joins are supported. | +| SQL_OWNER_TERM prev. SQL_SCHEMA_TERM: The term used to describe a schema. | Returns schema. | +| SQL_ORDER_BY_COLUMNS_IN_SELECT: Indicates if the columns in an ORDER BY clause must be included in the SELECT list. | Returns N. Columns in an ORDER BY clause do not have to be in the SELECT list. | +| SQL_OWNER_USAGE prev. SQL_SCHEMA_USAGE: Returns a string that indicates which statements support schema qualifiers. | Returns: SQL_OU_DML_STATEMENTS, SQL_OU_TABLE_DEFINITION, SQL_OU_INDEX_DEFINITION, SQL_OU_PRIVILEGE_DEFINITION. | +| SQL_PARAM_ARRAY_ROW_COUNTS: Indicates if the server will return a single row count or separate row counts for each element in an array when executing a parameterized statement with at least one parameter bound to the array. | Returns SQL_PARC_BATCH, if separate row counts are available for each element in an array. SQL_PARC_NO_BATCH if a single, cumulative row count is available for the entire array. | +| SQL_PARAM_ARRAY_SELECTS: Indicates if the server will return one result set or a separate result set for each element in an array (or if the driver does not allow this feature) when executing a parameterized statement with at least one parameter bound to the array. | Returns SQL_PAS_BATCH. One data set is available for each element in an array. | +| SQL_POS_OPERATION: Lists the options supported by SQLSetPos(). | Returns: SQL_POS_POSITION, SQL_POS_REFRESH, SQL_POS_UPDATE, SQL_POS_DELETE, SQL_POS_ADD. | +| SQL_POSITIONED_STATEMENTS: Lists the supported positioned SQL statements. | Returns: SQL_PS_POSITIONED_DELETE, SQL_PS_POSITIONED_UPDATE, SQL_PS_SELECT_FOR_UPDATE. | +| SQL_PROCEDURE_TERM: The term used to describe a procedure. | Returns procedure. | +| SQL_PROCEDURES: Indicates if the server and the driver support SQL procedures and procedure invocation syntax. | Returns Y. The server and driver support procedures and procedure invocation syntax. | +| SQL_QUALIFIER_LOCATION prev. SQL_CATALOG_LOCATION: Indicates the position of the schema name in a qualified table name. | Returns SQL_CL_START. The catalog portion of a qualified table name is at the beginning of the name. | +| SQL_QUALIFIER_NAME prev. SQL_CATALOG_NAME: Indicates server support for catalog names. | Returns Y. The server supports catalog names. | +| SQL_QUALIFIER_NAME_SEPARATOR prev. SQL_CATALOG_NAME_SEPARATOR: Character separating the qualifier name from the adjacent name element. | Returns '.'. The server expects a '.' character between the qualifier and the table name. | +| SQL_QUALIFIER_TERM prev. SQL_CATALOG_TERM: The term used to describe a qualifier. | Returns catalog. | +| SQL_QUALIFIER_USAGE prev. SQL_CATALOG_USAGE: Indicates the SQL statements that may refer to qualifiers. | Returns SQL_CU_DML_STATEMENTS. Catalog names can be used in SELECT, INSERT, UPDATE, DELETE, SELECT FOR UPDATE and positioned UPDATE and DELETE statements. | +| SQL_QUALIFIER_USAGE Now SQL_CATALOG_USAGE: Identifies DML statements that support qualifier names. | Returns SQL_CU_DML_STATEMENTS. Qualifiers can be used in all DML statements (SELECT, INSERT, UPDATE, DELETE, SELECT FOR UPDATE). | +| SQL_QUOTED_IDENTIFIER_CASE: Indicates case sensitivity of quoted identifiers. | Returns SQL_IC_SENSITIVE. Quoted identifiers are case sensitive. | +| SQL_QUALIFIER_NAME_SEPARATOR Now SQL CATALOG_NAME_SEPARATOR: The character that separates the name qualifier from the name element. | Returns . The '.' character is used as a separator in qualified names. | +| SQL_QUALIFIER_TERM: The term used to describe a qualifier. | Returns catalog | +| SQL_QUALIFIER_LOCATION: The position of the qualifier in a qualified table name. | Returns SQL_CL_START. The qualifier precedes the table name in a qualified table name. | +| SQL_ROW_UPDATES: Indicates if keyset-driven or mixed cursors maintain row versions or values. | Returns Y. Cursors maintain values for all fetched rows and can detect updates to the row values. | +| SQL_SCHEMA_TERM: The term used to describe a schema. | Returns schema | +| SQL_SCHEMA_USAGE: Indicates the SQL statements that may refer to schemas. | Returns: SQL_OU_DML_STATEMENTS, SQL_OU_TABLE_DEFINITION, SQL_OU_INDEX_DEFINITION, SQL_OU_PRIVILEGE_DEFINITION. | +| SQL_SCROLL_CONCURRENCY: Indicates the cursor concurrency control options supported by the server. | Returns: SQL_SCCO_READ_ONLY, SQL_SCCO_OPT_ROWVER. | +| SQL_SCROLL_OPTIONS: Indicates the cursor scroll options supported by the server. | Returns: SQL_SO_FORWARD_ONLY, SQL_SO_KEYSET_DRIVEN, SQL_SO_STATIC. | +| SQL_SEARCH_PATTERN_ESCAPE: The escape character that allows use of the wildcard characters % and \_ in search patterns. | Returns . The '' character is used as an escape character for the '%' and '\_' characters in search patterns. | +| SQL_SERVER_NAME: Indicates the name of the host. | The returned value is determined by connection properties. | +| SQL_SPECIAL_CHARACTERS: Indicates any special characters allowed in identifier names. | Returns \_. The underscore character is allowed in identifier names. | +| SQL_SQL_CONFORMANCE: Indicates the level of SQL-92 compliance. | Returns SQL_SC_SQL92_ENTRY. The driver is SQL92 Entry level compliant. | +| SQL_SQL92_DATETIME_FUNCTIONS: Lists the datetime functions supported by the server. | Returns: SQL_SDF_CURRENT_DATE, SQL_SDF_CURRENT_TIME, SQL_SDF_CURRENT_TIMESTAMP. | +| SQL_SQL92_FOREIGN_KEY_DELETE_RULE: Indicates the server-enforced rules for using a foreign key in a DELETE statement. | Returns: SQL_SFKD_CASCADE, SQL_SFKD_NO_ACTION, SQL_SFKD_SET_DEFAULT, SQL_SFKD_SET_NULL. | +| SQL_SQL92_FOREIGN_KEY_UPDATE_RULE: Indicates the server-enforced rules for using a foreign key in an UPDATE statement. | Returns: SQL_SFKU_CASCADE, SQL_SFKU_NO_ACTION, SQL_SFKU_SET_DEFAULT, SQL_SFKU_SET_NULL. | +| SQL_SQL92_GRANT: Indicates the supported GRANT statement clauses. | Returns: SQL_SG_DELETE_TABLE, SQL_SG_INSERT_TABLE, SQL_SG_REFERENCES_TABLE, SQL_SG_SELECT_TABLE, SQL_SG_UPDATE_TABLE. | +| SQL_SQL92_NUMERIC_VALUE_FUNCTIONS: Lists the scalar numeric functions supported by the server and driver. | Returns: SQL_SNVF_BIT_LENGTH, SQL_SNVF_CHAR_LENGTH, SQL_SNVF_CHARACTER_LENGTH, SQL_SNVF_EXTRACT, SQL_SNVF_OCTET_LENGTH, SQL_SNVF_POSITION. | +| SQL_SQL92_PREDICATES, Identifies the predicates of a SELECT statement supported by the server. | Returns: SQL_SP_EXISTS, SQL_SP_ISNOTNULL, SQL_SP_ISNULL, SQL_SP_OVERLAPS, SQL_SP_LIKE, SQL_SP_IN, SQL_SP_BETWEEN, SQL_SP_COMPARISON, SQL_SP_QUANTIFIED_COMPARISON. | +| SQL_SQL92_RELATIONAL_JOIN_OPERATORS: Identifies the relational join operators supported by the server. | Returns: SQL_SRJO_CROSS_JOIN, SQL_SRJO_EXCEPT_JOIN, SQL_SRJO_FULL_OUTER_JOIN, SQL_SRJO_INNER_JOIN, SQL_SRJO_INTERSECT_JOIN, SQL_SRJO_LEFT_OUTER_JOIN, SQL_SRJO_NATURAL_JOIN, SQL_SRJO_RIGHT_OUTER_JOIN, SQL_SRJO_UNION_JOIN. | +| SQL_SQL92_REVOKE: Identifies the clauses in a REVOKE statement that are supported by the server. | Returns: SQL_SR_DELETE_TABLE, SQL_SR_INSERT_TABLE, SQL_SR_REFERENCES_TABLE, SQL_SR_SELECT_TABLE, SQL_SR_UPDATE_TABLE. | +| SQL_SQL92_ROW_VALUE_CONSTRUCTOR: Indicates the row value constructor expressions in a SELECT statement that are supported by the server. | Returns: SQL_SRVC_VALUE_EXPRESSION, SQL_SRVC_NULL. | +| SQL_SQL92_STRING_FUNCTIONS: Lists the string scalar functions supported by the server and driver. | Returns: SQL_SSF_CONVERT, SQL_SSF_LOWER, SQL_SSF_UPPER, SQL_SSF_SUBSTRING, SQL_SSF_TRANSLATE, SQL_SSF_TRIM_BOTH, SQL_SSF_TRIM_LEADING, SQL_SSF_TRIM_TRAILING. | +| SQL_SQL92_VALUE_EXPRESSIONS: Indicates the value expressions supported by the server. | Returns: SQL_SVE_CASE, SQL_SVE_CAST, SQL_SVE_COALESCE, SQL_SVE_NULLIF. | +| SQL_STANDARD_CLI_CONFORMANCE: Indicates the CLI standard the driver conforms to. | This info_type is currently unsupported. | +| SQL_STATIC_CURSOR_ATTRIBUTES1: Describes the first set of static cursor attributes supported by the driver. | Returns: SQL_CA1_NEXT, SQL_CA1_ABSOLUTE, SQL_CA1_RELATIVE, SQL_CA1_BOOKMARK, SQL_CA1_LOCK_NO_CHANGE, SQL_CA1_POS_POSITION, SQL_CA1_POS_UPDATE, SQL_CA1_POS_DELETE, SQL_CA1_POS_REFRESH, SQL_CA1_BULK_ADD, SQL_CA1_BULK_UPDATE_BY_BOOKMARK, SQL_CA1_BULK_DELETE_BY_BOOKMARK, SQL_CA1_BULK_FETCH_BY_BOOKMARK. | +| SQL_STATIC_CURSOR_ATTRIBUTES2: Describes the second set of static cursor attributes supported by the driver. | Returns: SQL_CA2_READ_ONLY_CONCURRENCY, SQL_CA2_OPT_ROWVER_CONCURRENCY, SQL_CA2_SENSITIVITY_ADDITIONS, SQL_CA2_SENSITIVITY_DELETIONS, SQL_CA2_SENSITIVITY_UPDATES, SQL_CA2_CRC_EXACT. | +| SQL_STATIC_SENSITIVITY: Indicates whether changes made to a static cursor by SQLSetPos() or UPDATE or DELETE statements are detected by the application. | Returns: SQL_SS_ADDITIONS, SQL_SS_DELETIONS, SQL_SS_UPDATES. | +| SQL_STRING_FUNCTIONS: Lists the scalar string functions supported by the server and driver. | Returns: SQL_FN_STR_CONCAT, SQL_FN_STR_LTRIM, SQL_FN_STR_LENGTH, SQL_FN_STR_LOCATE, SQL_FN_STR_LCASE, SQL_FN_STR_RTRIM, SQL_FN_STR_SUBSTRING, SQL_FN_STR_UCASE. | +| SQL_SUBQUERIES: Identifies the subquery predicates to a SELECT statement supported by the server. | Returns: SQL_SQ_COMPARISON, SQL_SQ_EXISTS, SQL_SQ_IN, SQL_SQ_QUANTIFIED. | +| SQL_SYSTEM_FUNCTIONS: Lists the scalar system functions supported by the server and driver. | Returns 0. | +| SQL_TABLE_TERM: The term used to describe a table. | Returns table. | +| SQL_TIMEDATE_ADD_INTERVALS: Indicates the timestamp intervals supported by the server for the TIMESTAMPADD scalar function. | Returns 0. | +| SQL_TIMEDATE_DIFF_INTERVALS: Indicates the timestamp intervals supported by the server for the TIMESTAMPDIFF scalar function. | Returns 0 | +| SQL_TIMEDATE_FUNCTIONS: Indicates the date and time functions supported by the server. | Returns: SQL_FN_TD_NOW, SQL_FN_TD_CURDATE, SQL_FN_TD_CURTIME. | +| SQL_TXN_CAPABLE: Identifies the transaction support offered by the server and driver. | Returns SQL_TC_ALL. Transactions can contain both DML and DDL statements. | +| SQL_TXN_ISOLATION_OPTION: Indicates the transaction isolation level supported by the server. | Returns: SQL_TXN_READ_COMMITTED, SQL_TXN_SERIALIZABLE. | +| SQL_UNION: Indicates server support for the UNION clause. | Returns: SQL_U_UNION, SQL_U_UNION_ALL. | +| SQL_USER_NAME: Identifies the name of the user connected to a database; may be different than the login name. | This value is determined by the connection properties. | +| SQL_XOPEN_CLI_YEAR: The publication year of the X/Open specification that the driver manager complies with. | This info_type is currently unsupported. | + +## Connection Attributes + +You can use the ODBC `SQLGetConnectAttr()` and `SQLSetConnectAttr()` functions to retrieve or set the value of a connection attribute. + +### SQLGetConnectAttr() + +The `SQLGetConnectAttr()` function returns the current value of a connection attribute. The signature is: + +```c++ +SQLRETURN SQLGetConnectAttr +( + SQLHDBC conn_handle, //Input + SQLINTEGER attribute, //Input + SQLPOINTER value_pointer, //Output + SQLINTEGER buffer_length, //Input + SQLINTEGER * string_length_pointer //Output +); +``` + +- `conn_handle` The connection handle. + +- `attribute` identifies the attribute whose value you wish to retrieve. + +- `value_pointer` A pointer to the location in memory that will receive the `attribute` value. + +- `buffer_length` If `attribute` is defined by ODBC and `value_pointer` points to a character string or binary buffer, `buffer_length` is the length of `value_pointer`. If `value_pointer` points to a fixed-size value (such as an integer), `buffer_length` is ignored. + + If EDB-ODBC defines the attribute, `SQLGetConnectAttr()` sets the `buffer_length` parameter. `buffer_length` can be: + + | Value type | Meaning | + | ---------------------- | ----------------------------------------- | + | Character string | The length of the character string | + | Binary buffer | The result of SQL_LEN_BINARY_ATTR(length) | + | Fixed length data type | SQL_IS_INTEGER or SQL_IS_UINTEGER | + | Any other type | SQL_IS_POINTER | + +- `string_length_pointer` A pointer to a `SQLINTEGER` that receives the number of bytes available to return in `value_pointer`. If `value_pointer` is `NULL`, `string_length_pointer` is not returned. + +This function returns `SQL_SUCCESS`, `SQL_SUCCESS_WITH_INFO`, `SQL_NO_DATA`, `SQL_ERROR` or `SQL_INVALID_HANDLE`. + +The following table lists the connection attributes supported by EDB-ODBC. + +| Attribute | Supported? | Notes | +| ---------------------------- | ---------- | ----------------------------------------------------- | +| SQL_ATTR_ACCESS_MODE | NO | SQL_MODE_READ_WRITE | +| SQL_ATTR_ASYNC_ENABLE | NO | SQL_ASYNC_ENABLE_OFF | +| SQL_ATTR_AUTO_IPD | NO | | +| SQL_ATTR_AUTOCOMMIT | YES | SQL_AUTOCOMMIT, SQL_AUTOCOMMIT_ON, SQL_AUTOCOMMIT_OFF | +| SQL_ATTR_CONNECTION_TIMEOUT | NO | | +| SQL_ATTR_CURRENT_CATALOG | NO | | +| SQL_ATTR_DISCONNECT_BEHAVIOR | NO | | +| SQL_ATTR_ENLIST_IN_DTC | YES | For win32 and with conditional compilation | +| SQL_ATTR_ENLIST_IN_XA | NO | | +| SQL_ATTR_LOGIN_TIMEOUT | NO | SQL_LOGIN_TIMEOUT | +| SQL_ATTR_ODBC_CURSORS | NO | | +| SQL_ATTR_PACKET_SIZE | NO | | +| SQL_ATTR_QUIET_MODE | NO | | +| SQL_ATTR_TRACE | NO | | +| SQL_ATTR_TRACEFILE | NO | | +| SQL_ATTR_TRANSLATE_LIB | NO | | +| SQL_ATTR_TRANSLATE_OPTION | NO | | +| SQL_ATTR_TXN_ISOLATION | YES | SQL_TXN_ISOLATION, SQL_DEFAULT_TXN_ISOLATION | + +### SQLSetConnectAttr() + +You can use the ODBC `SQLSetConnectAttr()` function to set the values of connection attributes. The signature of the function is: + +```c++ +SQLRETURN SQLSetConnectAttr +( + SQLHDBC conn_handle , // Input + SQLINTEGER attribute , // Input + SQLPOINTER value_pointer , // Input + SQLINTEGER string_length , // Input +); +``` + +`conn_handle` + +The connection handle + +`attribute` + +`attribute` identifies the attribute whose value you wish to set + +`value_pointer` + +A pointer to the value that the `attribute` will assume. + +`string_length` + +If `attribute` is defined by ODBC and `value_pointer` points to a binary buffer or character string, `string_length` is the length of `value_pointer`. If `value_pointer` points to a fixed-length value (such as an integer), `string_length` is ignored. + +If EDB-ODBC defines the attribute, the application sets the `string_length` parameter. Possible `string_length` values are: + +| Value Type | Meaning | +| ---------------------- | --------------------------------------------- | +| Character string | The length of the character string or SQL_NTS | +| Binary buffer | The result of SQL_LEN_BINARY_ATTR(length) | +| Fixed length data type | SQL_IS_INTEGER or SQL_IS_UINTEGER | +| Any other type | SQL_IS_POINTER | + +`SQLSetConnectAttr()` returns `SQL_SUCCESS`, `SQL_SUCCESS_WITH_INFO`, `SQL_ERROR`, `SQL_STILL_EXECUTING` or `SQL_INVALID_HANDLE`. + +You can call `SQLSetConnectAttr()` any time after the connection handle is allocated, until the time that the connection is closed with a call to `SQLFreeHandle()`. All attributes set by the call persist until the call to `SQLFreeHandle()`. + +Connection attributes have a specific time frame in which they can be set. Some attributes must be set before the connection is established, while others can only be set after a connection is established. + +The following table lists the connection attributes and the time frame in which they can be set: + +| Attribute | Set Before or After establishing a connection? | +| --------------------------- | ---------------------------------------------- | +| SQL_ATTR_ACCESS_MODE | Before or After | +| SQL_ATTR_ASYNC_ENABLE | Before or After | +| SQL_ATTR_AUTO_IPD | Before or After | +| SQL_ATTR_AUTOCOMMIT | Before or After | +| SQL_ATTR_CONNECTION_TIMEOUT | Before or After | +| SQL_ATTR_CURRENT_CATALOG | Before or After | +| SQL_ATTR_ENLIST_IN_DTC | After | +| SQL_ATTR_ENLIST_IN_XA | After | +| SQL_ATTR_LOGIN_TIMEOUT | Before | +| SQL_ATTR_ODBC_CURSORS | Before | +| SQL_ATTR_PACKET_SIZE | Before | +| SQL_ATTR_QUIET_MODE | Before or After | +| SQL_ATTR_TRACE | Before or After | +| SQL_ATTR_TRACEFILE | Before or After | +| SQL_ATTR_TRANSLATE_LIB | After | +| SQL_ATTR_TRANSLATE_OPTION | After | +| SQL_ATTR_TXN_ISOLATION | Before or After | + +## Environment Attributes + +You can use the ODBC `SQLGetEnvAttr()` and `SQLSetEnvAttr()` functions to retrieve or set the value of an environment attribute. + +### SQLGetEnvAttr() + +Use the `SQLGetEnvAttr()` function to find the current value of environment attributes on your system. The signature of the function is: + +```c++ +SQLRETURN SQLGetConnectAttr +( + SQLHDBC env_handle, // Input + SQLINTEGER attribute, // Input + SQLPOINTER value_ptr, // Output + SQLINTEGER buffer_length, // Input + SQLINTEGER * string_length_pointer // Output +); +``` + +`env_handle` + +The environment handle. + +`attribute` + +`attribute` identifies the attribute whose value you wish to retrieve. + +`value_pointer` + +A pointer to the location in memory that will receive the `attribute` value. + +`buffer_length` + +If the attribute is a character string, `buffer_length` is the length of `value_ptr`. If the value of the attribute is not a character string, `buffer_length` is unused. + +`string_length_pointer` + +A pointer to a `SQLINTEGER` that receives the number of bytes available to return in `value_pointer`. If `value_pointer` is NULL, `string_length_pointer` is not returned. + +This function returns `SQL_SUCCESS`, `SQL_SUCCESS_WITH_INFO` , `SQL_NO_DATA`, `SQL_ERROR` or `SQL_INVALID_HANDLE`. + +The following table lists the environment attributes supported by EDB-ODBC. + +| Attribute | Supported? | Restrictions? | +| --------------------------- | ----------------------------------- | ----------------------------------- | +| SQL_ATTR_CONNECTION_POOLING | SQL_CP_ONE_PER_DRIVER or SQL_CP_OFF | Determined by connection properties | +| SQL_ATTR_ODBC_VERSION | (SQL_OV_ODBC3), (SQL_OV_ODBC2) | NONE | +| SQL_ATTR_OUTPUT_NTS | SQL_SUCCESS | NONE | + +## SQLSetEnvAttr() + +You can use the `SQLSetEnvAttr()` function to set the values of environment attributes. The signature of the function is: + +```c++ +SQLRETURN SQLSetEnvAttr +( + SQLHENV env_handle , //Input + SQLINTEGER attribute , //Input + SQLPOINTER value_pointer , //Input + SQLINTEGER string_length //Input +); +``` + +- `env_handle` The environment handle. +- `attribute` identifies the attribute whose value you wish to set. +- `value_pointer` A pointer to the value assigned to the `attribute`. +- The value will be a `NULL` terminated character string or a 32 bit integer value depending on the specified `attribute`. +- `string_length` If `value_pointer` is a pointer to a binary buffer or character string,`string\_length` is the length of `value_pointer`. If the value being assigned to the attribute is a character, `string_length` is the length of that character string. If `value_pointer` is NULL, `string_length` is not returned. If value_pointer is an integer,`string_length`is ignored. + +`SQLSetEnvAttr()` returns `SQL_SUCCESS`, `SQL_INVALID_HANDLE`, `SQL_ERROR` or `SQL_SUCCESS_WITH_INFO`. The application must call `SQLSetEnvAttr()` before allocating a connection handle; all values applied to environment attributes will persist until `SQLFreeHandle()` is called for the connection. ODBC version 3.x allows you to allocate multiple environment handles simultaneously. + +The following table lists the environment attributes you can set with `SQLSetAttr()`. + +| Attribute | Value_pointer type | Restrictions? | +| --------------------- | ------------------ | -------------------------------------------------------------------------------------------------------------------------- | +| SQL_ATTR_ODBC_VERSION | 32 bit Integer | Set this attribute before the application calls any function that includes an SQLHENV argument. | +| SQL_ATTR_OUTPUT_NTS | 32-bit Integer | Defaults to SQL_TRUE. Calls that set this attribute to SQL_FALSE return SQL_ERROR/SQLSTATEHYC00 (feature not implemented). | + +## Statement Attributes + +You can use the ODBC `SQLGetStmtAttr()` and `SQLSetStmtAttr()`functions to retrieve and set the value of a statement attribute. + +### SQLGetStmtAttr() + +The `SQLGetStmtAttr()` function returns the current value of statement attribute. The signature is: + +```c++ +SQLRETURN SQLGetConnectAttr +( + SQLHDBC stmt_handle, //Input + SQLINTEGER attribute, //Input + SQLPOINTER value_ptr, //Output + SQLINTEGER buffer_length, //Input + SQLINTEGER * string_length_pointer //Output +); +``` + +- `stmt_handle` The statement handle + +- `attribute` is the attribute value + +- `value_pointer` A pointer to the location in memory that will receive the `attribute` value. + +- `buffer_length` If the attribute is defined by ODBC, `buffer_length` is the length of `value_pointer` (if `value_pointer` points to a character string or binary buffer). If `value_pointer` points to an integer, `buffer_length` is ignored. + + If EDB-ODBC defines the attribute, the application sets the `buffer_length` parameter. `buffer_length`can be: + + | Value Type | Meaning | + | ---------------------- | ----------------------------------------- | + | Character string | The length of the character string | + | Binary buffer | The result of SQL_LEN_BINARY_ATTR(length) | + | Fixed length data type | SQL_IS_INTEGER or SQL_IS_UINTEGER | + | Any other type | SQL_IS_POINTER | + +- `string_length_pointer` A pointer to an `SQLINTEGER` that receives the number of bytes required to hold the requested value. If `value_pointer` is NULL, `string_length_pointer` is not returned. + + This function returns `SQL_SUCCESS`, `SQL_SUCCESS_WITH_INFO`, `SQL_ERROR` or `SQL_INVALID_HANDLE`. + + | Attribute | Supported? | Restrictions? | + | ------------------------------ | ---------- | ----------------------- | + | SQL_ATTR_APP_PARAM_DESC | YES | | + | SQL_ATTR_APP_ROW_DESC | YES | | + | SQL_ATTR_ASYNC_ENABLE | NO | | + | SQL_ATTR_CONCURRENCY | YES | SQL_CONCUR_READ_ONLY | + | SQL_ATTR_CURSOR_SCROLLABLE | YES | | + | SQL_ATTR_CURSOR_TYPE | YES | SQL_CURSOR_FORWARD_ONLY | + | SQL_ATTR_CURSOR_SENSITIVITY | YES | SQL_INSENSITIVE | + | SQL_ATTR_ENABLE_AUTO_IPD | NO | | + | SQL_ATTR_FETCH_BOOKMARK_PTR | YES | | + | SQL_ATTR_IMP_PARAM_DESC | YES | | + | SQL_ATTR_IMP_ROW_DESC | YES | | + | SQL_ATTR_KEYSET_SIZE | NO | | + | SQL_ATTR_MAX_LENGTH | NO | | + | SQL_ATTR_MAX_ROWS | NO | | + | SQL_ATTR_METADATA_ID | YES | | + | SQL_ATTR_NOSCAN | NO | | + | SQL_ATTR_PARAM_BIND_OFFSET_PTR | YES | ODBC V2.0 | + | SQL_ATTR_PARAM_BIND_TYPE | YES | | + | SQL_ATTR_PARAM_OPERATION_PTR | YES | | + | SQL_ATTR_PARAM_STATUS_PTR | YES | | + | SQL_ATTR_PARAMS_PROCESSED_PTR | YES | | + | SQL_ATTR_PARAMSET_SIZE | YES | | + | SQL_ATTR_QUERY_TIMEOUT | NO | | + | SQL_ATTR_RETRIEVE_DATA | NO | | + | SQL_ATTR_ROW_BIND_OFFSET_PTR | YES | | + | SQL_ATTR_ROW_BIND_TYPE | NO | | + | SQL_ATTR_ROW_NUMBER | YES | | + | SQL_ATTR_ROW_OPERATION_PTR | YES | | + | SQL_ATTR_ROW_STATUS_PTR | YES | | + | SQL_ATTR_ROWS_FETCHED_PTR | YES | | + | SQL_ATTR_ROW_ARRAY_SIZE | YES | | + | SQL_ATTR_SIMULATE_CURSOR | NO | | + | SQL_ATTR_USE_BOOKMARKS | YES | | + | SQL_ROWSET_SIZE | YES | | + +### SQLSetStmtAttr() + +You can use the `SQLSetStmtAttr()` function to set the values of environment attributes. The signature is: + +```c++ +SQLRETURN SQLSetStmtAttr +( + SQLHENV stmt_handle , //Input + SQLINTEGER attribute , //Input + SQLPOINTER value_pointer , //Input + SQLINTEGER string_length //Input +); +``` + +- `stmt_handle` is the environment handle. + +- `attribute` identifies the statement attribute whose value you wish to set. + +- `value_pointer` is a pointer to the location in memory that holds the value that will be assigned to the attribute. `value_pointer`can be a pointer to: + + - A null-terminated character string + - A binary buffer + - A value defined by the driver + - A value of the type `SQLLEN`, `SQLULEN` or `SQLUSMALLINT` + + Value-pointer can also optionally hold one of the following values: + - An ODBC descriptor handle + - A `SQLUINTEGER` value + - A `SQLULEN` value + - A signed INTEGER (if attribute is a driver-specific value) + +- `string_length` If `attribute` is defined by ODBC and `value_pointer` points to a binary buffer or character string, `string_length` is the length of `value_pointer`. If `value_pointer` points to an integer, `string_length` is ignored. + If EDB-ODBC defines the attribute, the application sets the `string_length` parameter. Possible `string_length` values are: + +| Value Type | Meaning | +| ---------------------- | --------------------------------------------- | +| Character string | The length of the character string or SQL_NTS | +| Binary buffer | The result of SQL_LEN_BINARY_ATTR(length) | +| Fixed length data type | SQL_IS_INTEGER or SQL_IS_UINTEGER | +| Any other type | SQL_IS_POINTER | + +## Error Handling + +Diagnostic information for the ODBC functions mentioned in this guide can be retrieved via the ODBC `SQLGetDiagRec()` function. + +### SQLGetDiagRec() + + The `SQLGetDiagRec()` function returns status and error information from a diagnostic record written by the ODBC functions that retrieve or set attribute values. The signature is: + +```c++ +SQLRETURN SQLGetDiagRec +( + SQLSMALLINT handle_type, // Input + SQLHANDLE handle, // Input + SQLSMALLINT record_number, // Input + SQLCHAR *SQLState_pointer, // Output + SQLINTEGER *native_error_pointer, // Output + SQLCHAR *error_text_pointer, // Output + SQLSMALLINT buffer_length, // Input + SQLSMALLINT *text_length_pointer // Output +); +``` + +- `handle_type` The handle type of the `handle` argument. `handle_type`must be one of the following: + + - `SQL_HANDLE_ENV` specifies an environment handle. + - `SQL_HANDLE_STMT` specifies a statement handle. + - `SQL_HANDLE_DBC` specifies a connection handle. + - `SQL_HANDLE_DESC` specifies a descriptor handle. + +- `handle` The handle associated with the attribute error message. + +- `record_number` The status record that the application is seeking information from (must be greater than or equal to 1). + +- `SQLState_pointer` Pointer to a memory buffer that receives the `SQLState` error code from the record. + +- `native_error_pointer` Pointer to a buffer that receives the native error message for the data source (contained in the `SQL_DIAG_NATIVE` field). + +- `error_text_pointer` Pointer to a memory buffer that receives the error text (contained in the `SQL_DIAG_MESSAGE_TEXT` field) + +- `buffer_length` The length of the `error_text` buffer. + +- `text_length_pointer` Pointer to the buffer that receives the size (in characters) of the `error_text_pointer` field. If the number of characters in the `error_text_pointer` parameter exceeds the number available (in `buffer_length`), `error_text_pointer` will be truncated. + +`SQLGetDiagRec()` returns `SQL_SUCCESS`, `SQL_ERROR`, `SQL_INVALID_HANDLE`, `SQL_SUCCESS_WITH_DATA` or `SQL_NO_DATA`. + +## Supported ODBC API Functions + +The following table lists the ODBC API functions; the right column specifies `Yes` if the API is supported by the EDB-ODBC driver. Use the ODBC `SQLGetFunctions()` function (specifying a function ID of `SQL_API_ODBC3_ALL_FUNCTIONS`) to return a current version of this list. + +| ODBC API Function Name | Supported by EDB-ODBC? | +| ---------------------- | ---------------------- | +| SQLAllocConnect() | Yes | +| SQLAllocEnv() | Yes | +| SQLAllocStmt() | Yes | +| SQLBindCol() | Yes | +| SQLCancel() | Yes | +| SQLColAttributes() | Yes | +| SQLConnect() | Yes | +| SQLDescribeCol() | Yes | +| SQLDisconnect() | Yes | +| SQLError() | Yes | +| SQLExecDirect() | Yes | +| SQLExecute() | Yes | +| SQLFetch() | Yes | +| SQLFreeConnect() | Yes | +| SQLFreeEnv() | Yes | +| SQLFreeStmt() | Yes | +| SQLGetCursorName() | Yes | +| SQLNumResultCols() | Yes | +| SQLPrepare() | Yes | +| SQLRowCount() | Yes | +| SQLSetCursorName() | Yes | +| SQLSetParam() | Yes | +| SQLTransact() | Yes | +| SQLColumns() | Yes | +| SQLDriverConnect() | Yes | +| SQLGetConnectOption() | Yes | +| SQLGetData() | Yes | +| SQLGetFunctions() | Yes | +| SQLGetInfo() | Yes | +| SQLGetStmtOption() | Yes | +| SQLGetTypeInfo() | Yes | +| SQLParamData() | Yes | +| SQLPutData() | Yes | +| SQLSetConnectOption() | Yes | +| SQLSetStmtOption() | Yes | +| SQLSpecialColumns() | Yes | +| SQLStatistics() | Yes | +| SQLTables() | Yes | +| SQLBrowseConnect() | No | +| SQLColumnPrivileges() | No | +| SQLDataSources() | Yes | +| SQLDescribeParam() | No | +| SQLExtendedFetch() | Yes | +| SQLForeignKeys() | Yes | +| SQLMoreResults() | Yes | +| SQLNativeSQL() | Yes | +| SQLNumParams() | Yes | +| SQLParamOptions() | Yes | +| SQLPrimaryKeys() | Yes | +| SQLProcedureColumns() | Yes | +| SQLProcedures() | Yes | +| SQLSetPos() | Yes | +| SQLSetScrollOptions() | No | +| SQLTablePrivileges() | Yes | +| SQLDrivers() | Yes | +| SQLBindParameter() | Yes | +| SQLAllocHandle() | Yes | +| SQLBindParam() | Yes | +| SQLCloseCursor() | Yes | +| SQLColAttribute() | Yes | +| SQLCopyDesc() | Yes | +| SQLendTran() | Yes | +| SQLFetchScroll() | Yes | +| SQLFreeHandle() | Yes | +| SQLGetConnectAttr() | Yes | +| SQLGetDescField() | Yes | +| SQLGetDescRec() | Yes | +| SQLGetDiagField() | Yes | +| SQLGetDiagRec() | Yes | +| SQLGetEnvAttr() | Yes | +| SQLGetStmtAttr() | Yes | +| SQLSetConnectAttr() | Yes | +| SQLSetDescField() | Yes | +| SQLSetDescRec() | No | +| SQLSetEnvAttr() | Yes | +| SQLSetStmtAttr() | Yes | +| SQLBulkOperations() | Yes | + +## Supported Data Types + +EDB-ODBC supports the following ODBC data types: + +| ODBC Data Type | Corresponding Advanced Server Data Type | +| ------------------ | --------------------------------------- | +| SQL_BIGINT | PG_TYPE_INT8 | +| SQL_BINARY | PG_TYPE_BYTEA | +| SQL_BIT | PG_TYPE_BOOL or PG_TYPE_CHAR | +| SQL_CHAR | PG_TYPE_BPCHAR | +| SQL_TYPE_DATE | PG_TYPE_DATE | +| SQL_DECIMAL | PG_TYPE_NUMERIC | +| SQL_DOUBLE | PG_TYPE_FLOAT8 | +| SQL_FLOAT | PG_TYPE_FLOAT8 | +| SQL_INTEGER | PG_TYPE_INT4 | +| SQL_LONGVARBINARY | PG_TYPE_BYTEA | +| SQL_LONGVARCHAR | PG_TYPE_VARCHAR or PG_TYPE_TEXT | +| SQL_NUMERIC | PG_TYPE_NUMERIC | +| SQL_NUMERIC | PG_TYPE_NUMERIC | +| SQL_REAL | PG_TYPE_FLOAT4 | +| SQL_SMALLINT | PG_TYPE_INT2 | +| SQL_TYPE_TIME | PG_TYPE_TIME | +| SQL_TYPE_TIMESTAMP | PG_TYPE_DATETIME | +| SQL_TINYINT | PG_TYPE_INT2 | +| SQL_VARBINARY | PG_TYPE_BYTEA | +| SQL_VARCHAR | PG_TYPE_VARCHAR | + +## prerequisite for ADO users + +You must execute `Command.Prepared = True` before executing `Command.Execute`. + +## Thread Safety + +EDB-ODBC is thread safe. diff --git a/product_docs/docs/odbc_connector/13.0.0.1/07_scram_compatibility.mdx b/product_docs/docs/odbc_connector/13.0.0.1/07_scram_compatibility.mdx new file mode 100644 index 00000000000..44a60d89c9e --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/07_scram_compatibility.mdx @@ -0,0 +1,13 @@ +--- +title: "Scram Compatibility" +legacyRedirects: + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/security_and_encryption.html" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/scram_compatibility.html" +--- + + + +The EDB ODBC Connector provides SCRAM-SHA-256 support for Advanced Server versions 10 and above. This support is available from EDB ODBC 10.01.0000.01 release onwards. diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/EDB_logo.png b/product_docs/docs/odbc_connector/13.0.0.1/images/EDB_logo.png new file mode 100644 index 00000000000..f4a93cf57f5 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/EDB_logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07423b012a855204780fe5a2a5a1e33607304a5c3020ae4acbf3d575691dedd6 +size 12136 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/advanced_options_1.png b/product_docs/docs/odbc_connector/13.0.0.1/images/advanced_options_1.png new file mode 100755 index 00000000000..ca572ffb228 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/advanced_options_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e6f03b0307da9b9a0fa34ee58c331aa5a64a890dbd236b5b0de4eadd6a6880e +size 66479 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/connection_is_successful.png b/product_docs/docs/odbc_connector/13.0.0.1/images/connection_is_successful.png new file mode 100755 index 00000000000..9a16b5a9568 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/connection_is_successful.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d620de8c559038fcaed267c1933e42d026bcfd8385c0aec50d2665f2a291b06 +size 11904 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/create_new_data_source.png b/product_docs/docs/odbc_connector/13.0.0.1/images/create_new_data_source.png new file mode 100755 index 00000000000..9aef5128b26 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/create_new_data_source.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81fa2a21c9e45ffb0bc49ab00f461eb49fdfde5792acb98bf7d53dae3cac71ff +size 79950 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/data_source_names.png b/product_docs/docs/odbc_connector/13.0.0.1/images/data_source_names.png new file mode 100755 index 00000000000..94b86591441 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/data_source_names.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff2d2f1794c2d9d1872904ec17228dc6ad2833840deb281486f089f768e1c2ef +size 79437 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/data_source_properties_window.png b/product_docs/docs/odbc_connector/13.0.0.1/images/data_source_properties_window.png new file mode 100755 index 00000000000..d56d373ab89 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/data_source_properties_window.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cbaafeb117554d23d2b2900160a849d5a769961da6c9cd4651999d3c567d01b +size 43846 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/define_the_data_source.png b/product_docs/docs/odbc_connector/13.0.0.1/images/define_the_data_source.png new file mode 100755 index 00000000000..cb0d3378e6d --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/define_the_data_source.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed196314172d2361e74db1dc375bf9665594b508c12f4ce18ed5d6be013dc9a8 +size 28976 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/driver_properties_window.png b/product_docs/docs/odbc_connector/13.0.0.1/images/driver_properties_window.png new file mode 100755 index 00000000000..274bb8a8bf0 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/driver_properties_window.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21b21544592514a735c7c3562313f9c1207a81bbcebba7ea2911bf6ff0c06bf5 +size 19360 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/global_settings.png b/product_docs/docs/odbc_connector/13.0.0.1/images/global_settings.png new file mode 100755 index 00000000000..34d359a1b46 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/global_settings.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:702119d63552fd2b78acf0a4b21b53fa6efd6ec2c38324965d8ab4be12fc99db +size 26399 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/installed_edb-odbc_driver.png b/product_docs/docs/odbc_connector/13.0.0.1/images/installed_edb-odbc_driver.png new file mode 100755 index 00000000000..0704d4cc5b7 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/installed_edb-odbc_driver.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e0d273b43ac65b71c974c95e742c6c01c28f7562b7cd590cf2d811b8783e72a +size 79978 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/new_driver_definition.png b/product_docs/docs/odbc_connector/13.0.0.1/images/new_driver_definition.png new file mode 100755 index 00000000000..487f387a0ce --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/new_driver_definition.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:287ef91217e2b8a340a8dcfe66cc89d6c50469888d00f7a2772f96fe9bdc0347 +size 82264 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_advanced_options_2.png b/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_advanced_options_2.png new file mode 100755 index 00000000000..6730245f187 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_advanced_options_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adea506cc173b7d5058081dff9c9de1b7e1930d446c347b390ff0e2d32bf9144 +size 72827 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_complete.png b/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_complete.png new file mode 100755 index 00000000000..eb5e0210b4c --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_complete.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:807e25bfa4bb212640947dbf0bb14ec4c72da609b79d0c4272a721803f14451f +size 326390 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_dialog.png b/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_dialog.png new file mode 100755 index 00000000000..f22bef1c1f3 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_dialog.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb88560036129e2920ee996ff79daf46dd84677e7526276d0c109ef1ea79b583 +size 146246 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_wizard.png b/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_wizard.png new file mode 100755 index 00000000000..bd5c3de71e4 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/odbc_installation_wizard.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9540ccb5eedbda2d2456d100dc3a618d1948543da81878b205c4ffe80924521b +size 282717 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/ready_to_install.png b/product_docs/docs/odbc_connector/13.0.0.1/images/ready_to_install.png new file mode 100755 index 00000000000..e5d427b9323 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/ready_to_install.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ee7b00b4d103563cbf2b7c4b1b9e017f15b1b53d64a1141b8d52ba22eabea76 +size 113014 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/select_driver_named_date_source.png b/product_docs/docs/odbc_connector/13.0.0.1/images/select_driver_named_date_source.png new file mode 100755 index 00000000000..57b41f7eb27 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/select_driver_named_date_source.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe4b534a2b466e7d4de8563e728e74b9cb58a4605c8388d0dd4a7ae0fd4e5a8b +size 27316 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/selecting_the_connectors_installer.png b/product_docs/docs/odbc_connector/13.0.0.1/images/selecting_the_connectors_installer.png new file mode 100644 index 00000000000..457f3f16213 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/selecting_the_connectors_installer.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31f84a8f760ae6e7a2a41bc81b8537906939bc81efd03aca21eed29ae163d796 +size 172259 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/starting_stackbuilder_plus.png b/product_docs/docs/odbc_connector/13.0.0.1/images/starting_stackbuilder_plus.png new file mode 100644 index 00000000000..11665300652 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/starting_stackbuilder_plus.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce6bcefb865ca14239fb7e0e2ac5149ed56251cfbc5153869070d039f70857c6 +size 91989 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/unixodbc_data_source_administrator.png b/product_docs/docs/odbc_connector/13.0.0.1/images/unixodbc_data_source_administrator.png new file mode 100755 index 00000000000..cf3893d9575 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/unixodbc_data_source_administrator.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:daeab0b9844442fcf1a5484a184cfe9bb38ff5f5cea315ccf2fb1332d5c8f902 +size 69084 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/images/windows_data_source_administrator.png b/product_docs/docs/odbc_connector/13.0.0.1/images/windows_data_source_administrator.png new file mode 100755 index 00000000000..ab29ce6f5d9 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/images/windows_data_source_administrator.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77ea546d98802b3a3cc12bb1000aef47be802e668da438e591f353b9d7d72693 +size 102851 diff --git a/product_docs/docs/odbc_connector/13.0.0.1/index.mdx b/product_docs/docs/odbc_connector/13.0.0.1/index.mdx new file mode 100644 index 00000000000..d274580d050 --- /dev/null +++ b/product_docs/docs/odbc_connector/13.0.0.1/index.mdx @@ -0,0 +1,23 @@ +--- +title: "EDB ODBC Connector" +directoryDefaults: + description: "EDB ODBC Connector Version 12.2.0.2 Documentation and release notes." + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/index.html" + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/conclusion.html" + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/whats_new.html" + - "/edb-docs/d/edb-postgres-odbc-connector/user-guides/odbc-guide/12.2.0.2/genindex.html" + - "/edb-docs/p/edb-postgres-odbc-connector/12.2.0.2" +--- + +ODBC (Open Database Connectivity) is a programming interface that allows a client application to connect to any database that provides an ODBC driver. The EDB ODBC Connector provides connectivity between EDB Postgres Advanced Server (Advanced Server) and ODBC-compliant applications. + +This guide contains installation information for the EDB ODBC as well as information about creating data source definitions for the EDB ODBC. This guide also contains reference information that details the ODBC functionality supported by the EDB ODBC. + +
+ +whats_new requirements_overview edb-odbc_overview creating_a_data_source edb-odbc_connection_properties edb-odbc_driver_functionality scram_compatibility conclusion + +
diff --git a/yarn.lock b/yarn.lock index 47bb12f7686..940459fbad0 100644 --- a/yarn.lock +++ b/yarn.lock @@ -4886,9 +4886,9 @@ dns-equal@^1.0.0: integrity sha1-s55/HabrCnW6nBcySzR1PEfgZU0= dns-packet@^1.3.1: - version "1.3.1" - resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-1.3.1.tgz#12aa426981075be500b910eedcd0b47dd7deda5a" - integrity sha512-0UxfQkMhYAUaZI+xrNZOz/as5KgDU0M/fQ9b6SpkyLbk3GEswDi6PADJVaYJradtRVsRIlF1zLyOodbcTCDzUg== + version "1.3.4" + resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-1.3.4.tgz#e3455065824a2507ba886c55a89963bb107dec6f" + integrity sha512-BQ6F4vycLXBvdrJZ6S3gZewt6rcrks9KBgM9vrhW+knGRqc8uEdT7fuCwloc7nny5xNoMJ17HGH0R/6fpo8ECA== dependencies: ip "^1.1.0" safe-buffer "^5.0.1" From 1f657d76bf5b5155cb77ae3e842140cf175c179a Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Fri, 11 Jun 2021 10:11:14 -0600 Subject: [PATCH 4/5] Release: 2021-06-11 (#1455) * OEL to OL changes in FDW Guides * New PDFs generated by Github Actions * DF-237 Removed note from tools and utilities guide * Updated titles * New PDFs generated by Github Actions * Updated title * New PDFs generated by Github Actions * update CNP to 1.5.0 * bump version refs in demo Co-authored-by: Abhilasha Narendra Co-authored-by: drothery-edb <83650384+drothery-edb@users.noreply.github.com> Co-authored-by: sheetal Co-authored-by: Jon Ericson <83660216+jericson-edb@users.noreply.github.com> Former-commit-id: 27453723f0215cf2049a466685fd4a5c6a4deeb3 --- .../cloud_native_postgresql/api_reference.mdx | 270 ++++++++++++------ .../cloud_native_postgresql/architecture.mdx | 1 + .../cloud_native_postgresql/bootstrap.mdx | 268 ++++++++++++++++- .../cloud_native_postgresql/certificates.mdx | 160 +++++++++++ .../cloud_native_postgresql/cnp-plugin.mdx | 6 +- .../cloud_native_postgresql/e2e.mdx | 5 +- .../cloud_native_postgresql/failure_modes.mdx | 2 +- .../cloud_native_postgresql/index.mdx | 1 + .../installation_upgrade.mdx | 4 +- .../interactive_demo.mdx | 4 +- .../cloud_native_postgresql/logging.mdx | 8 +- .../cloud_native_postgresql/monitoring.mdx | 21 +- .../operator_capability_levels.mdx | 10 +- .../postgresql_conf.mdx | 13 +- .../cloud_native_postgresql/quickstart.mdx | 2 + .../cloud_native_postgresql/release_notes.mdx | 50 +++- .../resource_management.mdx | 3 +- .../samples/cluster-clone-basicauth.yaml | 28 ++ .../samples/cluster-clone-tls.yaml | 29 ++ .../samples/cluster-example-full.yaml | 2 +- .../cloud_native_postgresql/security.mdx | 19 +- .../ssl_connections.mdx | 4 +- .../cloud_native_postgresql/storage.mdx | 33 ++- .../interactive_demo.mdx | 4 +- .../12/epas_compat_ora_dev_guide/index.mdx | 2 +- .../epas_compat_tools_guide/02_edb_loader.mdx | 4 +- .../epas/12/epas_compat_tools_guide/index.mdx | 2 +- .../docs/epas/12/epas_rel_notes/index.mdx | 2 +- .../epas/13/edb_pgadmin_linux_qs/index.mdx | 2 +- .../13/epas_compat_ora_dev_guide/index.mdx | 2 +- .../epas_compat_tools_guide/02_edb_loader.mdx | 2 - .../epas/13/epas_compat_tools_guide/index.mdx | 2 +- .../docs/epas/13/epas_qs_windows/index.mdx | 2 - .../docs/epas/13/epas_rel_notes/index.mdx | 2 +- .../2.0.7/02_requirements_overview.mdx | 2 +- .../5.2.8/02_requirements_overview.mdx | 2 +- .../2.5.5/02_requirements_overview.mdx | 2 +- .../2.6.0/02_requirements_overview.mdx | 4 +- 38 files changed, 843 insertions(+), 136 deletions(-) create mode 100644 advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx create mode 100644 advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml create mode 100644 advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx index 0173c9dc96c..2e9c7e0d537 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx @@ -30,12 +30,18 @@ Below you will find a description of the defined resources: - [BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration) - [BootstrapConfiguration](#BootstrapConfiguration) - [BootstrapInitDB](#BootstrapInitDB) +- [BootstrapPgBaseBackup](#BootstrapPgBaseBackup) - [BootstrapRecovery](#BootstrapRecovery) +- [CertificatesConfiguration](#CertificatesConfiguration) +- [CertificatesStatus](#CertificatesStatus) - [Cluster](#Cluster) - [ClusterList](#ClusterList) - [ClusterSpec](#ClusterSpec) - [ClusterStatus](#ClusterStatus) +- [ConfigMapKeySelector](#ConfigMapKeySelector) - [DataBackupConfiguration](#DataBackupConfiguration) +- [ExternalCluster](#ExternalCluster) +- [LocalObjectReference](#LocalObjectReference) - [MonitoringConfiguration](#MonitoringConfiguration) - [NodeMaintenanceWindow](#NodeMaintenanceWindow) - [PostgresConfiguration](#PostgresConfiguration) @@ -46,6 +52,7 @@ Below you will find a description of the defined resources: - [ScheduledBackupList](#ScheduledBackupList) - [ScheduledBackupSpec](#ScheduledBackupSpec) - [ScheduledBackupStatus](#ScheduledBackupStatus) +- [SecretKeySelector](#SecretKeySelector) - [SecretsResourceVersion](#SecretsResourceVersion) - [StorageConfiguration](#StorageConfiguration) - [WalBackupConfiguration](#WalBackupConfiguration) @@ -57,11 +64,12 @@ Below you will find a description of the defined resources: AffinityConfiguration contains the info we need to create the affinity rules for Pods -Name | Description | Type ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------- -`enablePodAntiAffinity` | Activates anti-affinity for the pods. The operator will define pods anti-affinity unless this field is explicitly set to false | *bool -`topologyKey ` | TopologyKey to use for anti-affinity configuration. See k8s documentation for more info on that - *mandatory* | string -`nodeSelector ` | NodeSelector is map of key-value pairs used to define the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | map[string]string +Name | Description | Type +--------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- +`enablePodAntiAffinity` | Activates anti-affinity for the pods. The operator will define pods anti-affinity unless this field is explicitly set to false | *bool +`topologyKey ` | TopologyKey to use for anti-affinity configuration. See k8s documentation for more info on that - *mandatory* | string +`nodeSelector ` | NodeSelector is map of key-value pairs used to define the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | map[string]string +`tolerations ` | Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run on tainted nodes. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | []corev1.Toleration @@ -71,7 +79,7 @@ Backup is the Schema for the backups API Name | Description | Type -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ -`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta) +`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta) `spec ` | Specification of the desired behavior of the backup. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [BackupSpec](#BackupSpec) `status ` | Most recently observed status of the backup. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [BackupStatus](#BackupStatus) @@ -93,7 +101,7 @@ BackupList contains a list of Backup Name | Description | Type -------- | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- -`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#listmeta-v1-meta) +`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta) `items ` | List of backups - *mandatory* | [[]Backup](#Backup) @@ -102,9 +110,9 @@ Name | Description BackupSpec defines the desired state of Backup -Name | Description | Type -------- | --------------------- | ---------------------------------------------------------------------------------------------------------------------------- -`cluster` | The cluster to backup | [v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) +Name | Description | Type +------- | --------------------- | --------------------------------------------- +`cluster` | The cluster to backup | [LocalObjectReference](#LocalObjectReference) @@ -121,11 +129,15 @@ Name | Description `encryption ` | Encryption method required to S3 API | string `backupId ` | The ID of the Barman backup | string `phase ` | The last backup status | BackupPhase -`startedAt ` | When the backup was started | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) -`stoppedAt ` | When the backup was terminated | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) +`startedAt ` | When the backup was started | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) +`stoppedAt ` | When the backup was terminated | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) +`beginWal ` | The starting WAL | string +`endWal ` | The ending WAL | string +`beginLSN ` | The starting xlog | string +`endLSN ` | The ending xlog | string `error ` | The detected error | string -`commandOutput ` | The backup command output | string -`commandError ` | The backup command output | string +`commandOutput ` | Unused. Retained for compatibility with old versions. | string +`commandError ` | The backup command output in case of error | string @@ -148,10 +160,11 @@ Name | Description BootstrapConfiguration contains information about how to create the PostgreSQL cluster. Only a single bootstrap method can be defined among the supported ones. `initdb` will be used as the bootstrap method if left unspecified. Refer to the Bootstrap page of the documentation for more information. -Name | Description | Type --------- | ----------------------------------- | ---------------------------------------- -`initdb ` | Bootstrap the cluster via initdb | [*BootstrapInitDB](#BootstrapInitDB) -`recovery` | Bootstrap the cluster from a backup | [*BootstrapRecovery](#BootstrapRecovery) +Name | Description | Type +------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------ +`initdb ` | Bootstrap the cluster via initdb | [*BootstrapInitDB](#BootstrapInitDB) +`recovery ` | Bootstrap the cluster from a backup | [*BootstrapRecovery](#BootstrapRecovery) +`pg_basebackup` | Bootstrap the cluster taking a physical backup of another compatible PostgreSQL instance | [*BootstrapPgBaseBackup](#BootstrapPgBaseBackup) @@ -159,13 +172,23 @@ Name | Description | Type BootstrapInitDB is the configuration of the bootstrap process when initdb is used Refer to the Bootstrap page of the documentation for more information. -Name | Description | Type --------- | -------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- -`database` | Name of the database used by the application. Default: `app`. - *mandatory* | string -`owner ` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. - *mandatory* | string -`secret ` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | [*corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) -`redwood ` | If we need to enable/disable Redwood compatibility. Requires EPAS and for EPAS defaults to true | *bool -`options ` | The list of options that must be passed to initdb when creating the cluster | []string +Name | Description | Type +-------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------- +`database` | Name of the database used by the application. Default: `app`. - *mandatory* | string +`owner ` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. - *mandatory* | string +`secret ` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | [*LocalObjectReference](#LocalObjectReference) +`redwood ` | If we need to enable/disable Redwood compatibility. Requires EPAS and for EPAS defaults to true | *bool +`options ` | The list of options that must be passed to initdb when creating the cluster | []string + + + +## BootstrapPgBaseBackup + +BootstrapPgBaseBackup contains the configuration required to take a physical backup of an existing PostgreSQL cluster + +Name | Description | Type +------ | ----------------------------------------------------------------- | ------ +`source` | The name of the server of which we need to take a physical backup - *mandatory* | string @@ -173,10 +196,45 @@ Name | Description BootstrapRecovery contains the configuration required to restore the backup with the specified name and, after having changed the password with the one chosen for the superuser, will use it to bootstrap a full cluster cloning all the instances from the restored primary. Refer to the Bootstrap page of the documentation for more information. -Name | Description | Type --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- -`backup ` | The backup we need to restore - *mandatory* | [corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) -`recoveryTarget` | By default the recovery will end as soon as a consistent state is reached: in this case that means at the end of a backup. This option allows to fine tune the recovery process | [*RecoveryTarget](#RecoveryTarget) +Name | Description | Type +-------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------- +`backup ` | The backup we need to restore - *mandatory* | [LocalObjectReference](#LocalObjectReference) +`recoveryTarget` | By default, the recovery process applies all the available WAL files in the archive (full recovery). However, you can also end the recovery as soon as a consistent state is reached or recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET | [*RecoveryTarget](#RecoveryTarget) + + + +## CertificatesConfiguration + +CertificatesConfiguration contains the needed configurations to handle server certificates. + +Name | Description | Type +----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- +`serverCASecret ` | The secret containing the Server CA certificate. If not defined, a new secret will be created with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret. + +Contains: + +- `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings. +- `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted. | string +`serverTLSSecret ` | The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. If not defined, ServerCASecret must provide also `ca.key` and a new secret will be created using the provided CA. | string +`serverAltDNSNames` | The list of the server alternative DNS names to be added to the generated server TLS certificates, when required. | []string + + + +## CertificatesStatus + +CertificatesStatus contains configuration certificates and related expiration dates. + +Name | Description | Type +-------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- +`clientCASecret ` | The secret containing the Client CA certificate. This secret contains a self-signed CA and is used to sign TLS certificates used for client authentication. + +Contains: + +- `ca.crt`: CA that should be used to validate the client certificate, used as `ssl_ca_file`. - `ca.key`: key used to sign client SSL certs. | string +`replicationTLSSecret` | The secret of type kubernetes.io/tls containing the TLS client certificate to authenticate as `streaming_replica` user. | string +`expirations ` | Expiration dates for all certificates. | map[string]string @@ -186,7 +244,7 @@ Cluster is the Schema for the PostgreSQL API Name | Description | Type -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ -`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta) +`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta) `spec ` | Specification of the desired behavior of the cluster. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ClusterSpec](#ClusterSpec) `status ` | Most recently observed status of the cluster. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ClusterStatus](#ClusterStatus) @@ -198,7 +256,7 @@ ClusterList contains a list of Cluster Name | Description | Type -------- | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- -`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#listmeta-v1-meta) +`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta) `items ` | List of clusters - *mandatory* | [[]Cluster](#Cluster) @@ -207,29 +265,31 @@ Name | Description ClusterSpec defines the desired state of Cluster -Name | Description | Type ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- -`description ` | Description of this PostgreSQL cluster | string -`imageName ` | Name of the container image | string -`postgresUID ` | The UID of the `postgres` user inside the image, defaults to `26` | int64 -`postgresGID ` | The GID of the `postgres` user inside the image, defaults to `26` | int64 -`instances ` | Number of instances required in the cluster - *mandatory* | int32 -`minSyncReplicas ` | Minimum number of instances required in synchronous replication with the primary. Undefined or 0 allow writes to complete when no standby is available. | int32 -`maxSyncReplicas ` | The target value for the synchronous replication quorum, that can be decreased if the number of ready standbys is lower than this. Undefined or 0 disable synchronous replication. | int32 -`postgresql ` | Configuration of the PostgreSQL server | [PostgresConfiguration](#PostgresConfiguration) -`bootstrap ` | Instructions to bootstrap this cluster | [*BootstrapConfiguration](#BootstrapConfiguration) -`superuserSecret ` | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | [*corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) -`imagePullSecrets ` | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | [[]corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) -`storage ` | Configuration of the storage of the instances | [StorageConfiguration](#StorageConfiguration) -`startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32 -`stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32 -`affinity ` | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#AffinityConfiguration) -`resources ` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core) -`primaryUpdateStrategy` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy -`backup ` | The configuration to be used for backups | [*BackupConfiguration](#BackupConfiguration) -`nodeMaintenanceWindow` | Define a maintenance window for the Kubernetes nodes | [*NodeMaintenanceWindow](#NodeMaintenanceWindow) -`licenseKey ` | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string -`monitoring ` | The configuration of the monitoring infrastructure of this cluster | [*MonitoringConfiguration](#MonitoringConfiguration) +Name | Description | Type +--------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- +`description ` | Description of this PostgreSQL cluster | string +`imageName ` | Name of the container image, supporting both tags (`:`) and digests for deterministic and repeatable deployments (`:@sha256:`) | string +`postgresUID ` | The UID of the `postgres` user inside the image, defaults to `26` | int64 +`postgresGID ` | The GID of the `postgres` user inside the image, defaults to `26` | int64 +`instances ` | Number of instances required in the cluster - *mandatory* | int32 +`minSyncReplicas ` | Minimum number of instances required in synchronous replication with the primary. Undefined or 0 allow writes to complete when no standby is available. | int32 +`maxSyncReplicas ` | The target value for the synchronous replication quorum, that can be decreased if the number of ready standbys is lower than this. Undefined or 0 disable synchronous replication. | int32 +`postgresql ` | Configuration of the PostgreSQL server | [PostgresConfiguration](#PostgresConfiguration) +`bootstrap ` | Instructions to bootstrap this cluster | [*BootstrapConfiguration](#BootstrapConfiguration) +`superuserSecret ` | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | [*LocalObjectReference](#LocalObjectReference) +`certificates ` | The configuration for the CA and related certificates | [*CertificatesConfiguration](#CertificatesConfiguration) +`imagePullSecrets ` | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | [[]LocalObjectReference](#LocalObjectReference) +`storage ` | Configuration of the storage of the instances | [StorageConfiguration](#StorageConfiguration) +`startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32 +`stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32 +`affinity ` | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#AffinityConfiguration) +`resources ` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#resourcerequirements-v1-core) +`primaryUpdateStrategy` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy +`backup ` | The configuration to be used for backups | [*BackupConfiguration](#BackupConfiguration) +`nodeMaintenanceWindow` | Define a maintenance window for the Kubernetes nodes | [*NodeMaintenanceWindow](#NodeMaintenanceWindow) +`licenseKey ` | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string +`monitoring ` | The configuration of the monitoring infrastructure of this cluster | [*MonitoringConfiguration](#MonitoringConfiguration) +`externalClusters ` | The list of external clusters which are used in the configuration | [[]ExternalCluster](#ExternalCluster) @@ -255,6 +315,17 @@ Name | Description `phase ` | Current phase of the cluster | string `phaseReason ` | Reason for the current phase | string `secretsResourceVersion` | The list of resource versions of the secrets managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the secret data | [SecretsResourceVersion](#SecretsResourceVersion) +`certificates ` | The configuration for the CA and related certificates, initialized with defaults. | [CertificatesStatus](#CertificatesStatus) + + + +## ConfigMapKeySelector + +ConfigMapKeySelector contains enough information to let you locate the key of a ConfigMap + +Name | Description | Type +--- | ----------------- | ------ +`key` | The key to select - *mandatory* | string @@ -269,16 +340,41 @@ Name | Description `immediateCheckpoint` | Control whether the I/O workload for the backup initial checkpoint will be limited, according to the `checkpoint_completion_target` setting on the PostgreSQL server. If set to true, an immediate checkpoint will be used, meaning PostgreSQL will complete the checkpoint as soon as possible. `false` by default. | bool `jobs ` | The number of parallel jobs to be used to upload the backup, defaults to 2 | *int32 + + +## ExternalCluster + +ExternalCluster represents the connection parameters of an external server which is used in the cluster configuration + +Name | Description | Type +-------------------- | ---------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------- +`name ` | The server name, required - *mandatory* | string +`connectionParameters` | The list of connection parameters, such as dbname, host, username, etc | map[string]string +`sslCert ` | The reference to an SSL certificate to be used to connect to this instance | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) +`sslKey ` | The reference to an SSL private key to be used to connect to this instance | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) +`sslRootCert ` | The reference to an SSL CA public key to be used to connect to this instance | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) +`password ` | The reference to the password to be used to connect to the server | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) + + + +## LocalObjectReference + +LocalObjectReference contains enough information to let you locate a local object with a known type inside the same namespace + +Name | Description | Type +---- | --------------------- | ------ +`name` | Name of the referent. - *mandatory* | string + ## MonitoringConfiguration MonitoringConfiguration is the type containing all the monitoring configuration for a certain cluster -Name | Description | Type ----------------------- | ----------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- -`customQueriesConfigMap` | The list of config maps containing the custom queries | [[]corev1.ConfigMapKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#configmapkeyselector-v1-core) -`customQueriesSecret ` | The list of secrets containing the custom queries | [[]corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) +Name | Description | Type +---------------------- | ----------------------------------------------------- | ----------------------------------------------- +`customQueriesConfigMap` | The list of config maps containing the custom queries | [[]ConfigMapKeySelector](#ConfigMapKeySelector) +`customQueriesSecret ` | The list of secrets containing the custom queries | [[]SecretKeySelector](#SecretKeySelector) @@ -329,7 +425,7 @@ RollingUpdateStatus contains the information about an instance which is being up Name | Description | Type --------- | ----------------------------------- | ------------------------------------------------------------------------------------------------ `imageName` | The image which we put into the Pod - *mandatory* | string -`startedAt` | When the update has been started | [metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) +`startedAt` | When the update has been started | [metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) @@ -337,10 +433,10 @@ Name | Description | Type S3Credentials is the type for the credentials to be used to upload files to S3 -Name | Description | Type ---------------- | -------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- -`accessKeyId ` | The reference to the access key id - *mandatory* | [corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) -`secretAccessKey` | The reference to the secret access key - *mandatory* | [corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) +Name | Description | Type +--------------- | -------------------------------------- | --------------------------------------- +`accessKeyId ` | The reference to the access key id - *mandatory* | [SecretKeySelector](#SecretKeySelector) +`secretAccessKey` | The reference to the secret access key - *mandatory* | [SecretKeySelector](#SecretKeySelector) @@ -350,7 +446,7 @@ ScheduledBackup is the Schema for the scheduledbackups API Name | Description | Type -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ -`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta) +`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta) `spec ` | Specification of the desired behavior of the ScheduledBackup. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ScheduledBackupSpec](#ScheduledBackupSpec) `status ` | Most recently observed status of the ScheduledBackup. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ScheduledBackupStatus](#ScheduledBackupStatus) @@ -362,7 +458,7 @@ ScheduledBackupList contains a list of ScheduledBackup Name | Description | Type -------- | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- -`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#listmeta-v1-meta) +`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta) `items ` | List of clusters - *mandatory* | [[]ScheduledBackup](#ScheduledBackup) @@ -371,11 +467,11 @@ Name | Description ScheduledBackupSpec defines the desired state of ScheduledBackup -Name | Description | Type --------- | -------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- -`suspend ` | If this backup is suspended of not | *bool -`schedule` | The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - *mandatory* | string -`cluster ` | The cluster to backup | [v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) +Name | Description | Type +-------- | -------------------------------------------------------------------- | --------------------------------------------- +`suspend ` | If this backup is suspended of not | *bool +`schedule` | The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - *mandatory* | string +`cluster ` | The cluster to backup | [LocalObjectReference](#LocalObjectReference) @@ -385,9 +481,19 @@ ScheduledBackupStatus defines the observed state of ScheduledBackup Name | Description | Type ---------------- | -------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- -`lastCheckTime ` | The latest time the schedule | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) -`lastScheduleTime` | Information when was the last time that backup was successfully scheduled. | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) -`nextScheduleTime` | Next time we will run a backup | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) +`lastCheckTime ` | The latest time the schedule | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) +`lastScheduleTime` | Information when was the last time that backup was successfully scheduled. | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) +`nextScheduleTime` | Next time we will run a backup | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) + + + +## SecretKeySelector + +SecretKeySelector contains enough information to let you locate the key of a Secret + +Name | Description | Type +--- | ----------------- | ------ +`key` | The key to select - *mandatory* | string @@ -395,13 +501,15 @@ Name | Description SecretsResourceVersion is the resource versions of the secrets managed by the operator -Name | Description | Type ------------------------- | ----------------------------------------------------------------- | ------ -`superuserSecretVersion ` | The resource version of the "postgres" user secret - *mandatory* | string -`replicationSecretVersion` | The resource version of the "streaming_replication" user secret - *mandatory* | string -`applicationSecretVersion` | The resource version of the "app" user secret - *mandatory* | string -`caSecretVersion ` | The resource version of the "ca" secret version - *mandatory* | string -`serverSecretVersion ` | The resource version of the PostgreSQL server-side secret version - *mandatory* | string +Name | Description | Type +------------------------ | -------------------------------------------------------------------- | ------ +`superuserSecretVersion ` | The resource version of the "postgres" user secret - *mandatory* | string +`replicationSecretVersion` | The resource version of the "streaming_replication" user secret - *mandatory* | string +`applicationSecretVersion` | The resource version of the "app" user secret - *mandatory* | string +`caSecretVersion ` | Unused. Retained for compatibility with old versions. | string +`clientCaSecretVersion ` | The resource version of the PostgreSQL client-side CA secret version - *mandatory* | string +`serverCaSecretVersion ` | The resource version of the PostgreSQL server-side CA secret version - *mandatory* | string +`serverSecretVersion ` | The resource version of the PostgreSQL server-side secret version - *mandatory* | string @@ -414,7 +522,7 @@ Name | Description `storageClass ` | StorageClass to use for database data (`PGDATA`). Applied after evaluating the PVC template, if available. If not specified, generated PVCs will be satisfied by the default storage class | *string `size ` | Size of the storage. Required if not already specified in the PVC template. Changes to this field are automatically reapplied to the created PVCs. Size cannot be decreased. - *mandatory* | string `resizeInUseVolumes` | Resize existent PVCs, defaults to true | *bool -`pvcTemplate ` | Template to be used to generate the Persistent Volume Claim | [*corev1.PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#persistentvolumeclaim-v1-core) +`pvcTemplate ` | Template to be used to generate the Persistent Volume Claim | [*corev1.PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#persistentvolumeclaim-v1-core) diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx index 4dccd20f717..814957ab9e9 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx @@ -124,3 +124,4 @@ The `-app` credentials are the ones that should be used by applications connecting to the PostgreSQL cluster. The `-superuser` ones are supposed to be used only for administrative purposes. + diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx index a6288dce3c4..11e1806e562 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx @@ -4,6 +4,11 @@ originalFilePath: 'src/bootstrap.md' product: 'Cloud Native Operator' --- +!!! Note + When referring to "PostgreSQL cluster" in this section, the same + concepts apply to both PostgreSQL and EDB Postgres Advanced, unless + differently stated. + This section describes the options you have to create a new PostgreSQL cluster and the design rationale behind them. @@ -34,9 +39,13 @@ The `initdb` bootstrap method is used. We currently support the following bootstrap methods: -- `initdb`: initialise an empty PostgreSQL cluster -- `recovery`: create a PostgreSQL cluster restoring from an existing backup - and replaying all the available WAL files. +- `initdb`: initialize an empty PostgreSQL cluster +- `recovery`: create a PostgreSQL cluster by restoring from an existing backup + and replaying all the available WAL files or up to a given point in time +- `pg_basebackup`: create a PostgreSQL cluster by cloning an existing one of the + same major version using `pg_basebackup` via streaming replication protocol - + useful if you want to migrate databases to Cloud Native PostgreSQL, even + from outside Kubernetes. ## initdb @@ -306,3 +315,256 @@ spec: targetName: "maintenance-activity" exclusive: false ``` + +## pg_basebackup + +The `pg_basebackup` bootstrap mode lets you create a new cluster (*target*) as +an exact physical copy of an existing and **binary compatible** PostgreSQL +instance (*source*), through a valid *streaming replication* connection. +The source instance can be either a primary or a standby PostgreSQL server. + +The primary use case for this method is represented by **migrations** to Cloud Native PostgreSQL, +either from outside Kubernetes or within Kubernetes (e.g., from another operator). + +!!! Warning + The current implementation creates a *snapshot* of the origin PostgreSQL + instance when the cloning process terminates and immediately starts + the created cluster. See ["Current limitations"](#current-limitations) below for details. + +Similar to the case of the `recovery` bootstrap method, once the clone operation +completes, the operator will take ownership of the target cluster, starting from +the first instance. This includes overriding some configuration parameters, as +required by Cloud Native PostgreSQL, resetting the superuser password, creating +the `streaming_replica` user, managing the replicas, and so on. The resulting +cluster will be completely independent of the source instance. + +!!! Important + Configuring the network between the target instance and the source instance + goes beyond the scope of Cloud Native PostgreSQL documentation, as it depends + on the actual context and environment. + +The streaming replication client on the target instance, which will be +transparently managed by `pg_basebackup`, can authenticate itself on the source +instance in any of the following ways: + +1. via [username/password](#usernamepassword-authentication) +2. via [TLS client certificate](#tls-certificate-authentication) + +The latter is the recommended one if you connect to a source managed +by Cloud Native PostgreSQL or configured for TLS authentication. +The first option is, however, the most common form of authentication to a +PostgreSQL server in general, and might be the easiest way if the source +instance is on a traditional environment outside Kubernetes. +Both cases are explained below. + +### Requirements + +The following requirements apply to the `pg_basebackup` bootstrap method: + +- target and source must have the same hardware architecture +- target and source must have the same major PostgreSQL version +- source must not have any tablespace defined (see ["Current limitations"](#current-limitations) below) +- source must be configured with enough `max_wal_senders` to grant + access from the target for this one-off operation by providing at least + one *walsender* for the backup plus one for WAL streaming +- the network between source and target must be configured to enable the target + instance to connect to the PostgreSQL port on the source instance +- source must have a role with `REPLICATION LOGIN` privileges and must accept + connections from the target instance for this role in `pg_hba.conf`, preferably + via TLS (see ["About the replication user"](#about-the-replication-user) below) +- target must be able to successfully connect to the source PostgreSQL instance + using a role with `REPLICATION LOGIN` privileges + +!!! Seealso + For further information, please refer to the + ["Planning" section for Warm Standby](https://www.postgresql.org/docs/current/warm-standby.html#STANDBY-PLANNING), + the + [`pg_basebackup` page](https://www.postgresql.org/docs/current/app-pgbasebackup.html) + and the + ["High Availability, Load Balancing, and Replication" chapter](https://www.postgresql.org/docs/current/high-availability.html) + in the PostgreSQL documentation. + +### About the replication user + +As explained in the requirements section, you need to have a user +with either the `SUPERUSER` or, preferably, just the `REPLICATION` +privilege in the source instance. + +If the source database is created with Cloud Native PostgreSQL, you +can reuse the `streaming_replica` user and take advantage of client +TLS certificates authentication (which, by default, is the only allowed +connection method for `streaming_replica`). + +For all other cases, including outside Kubernetes, please verify that +you already have a user with the `REPLICATION` privilege, or create +a new one by following the instructions below. + +As `postgres` user on the source system, please run: + +```console +createuser -P --replication streaming_replica +``` + +Enter the password at the prompt and save it for later, as you +will need to add it to a secret in the target instance. + +!!! Note + Although the name is not important, we will use `streaming_replica` + for the sake of simplicity. Feel free to change it as you like, + provided you adapt the instructions in the following sections. + +### Username/Password authentication + +The first authentication method supported by Cloud Native PostgreSQL +with the `pg_basebackup` bootstrap is based on username and password matching. + +Make sure you have the following information before you start the procedure: + +- location of the source instance, identified by a hostname or an IP address + and a TCP port +- replication username (`streaming_replica` for simplicity) +- password + +You might need to add a line similar to the following to the `pg_hba.conf` +file on the source PostgreSQL instance: + +``` +# A more restrictive rule for TLS and IP of origin is recommended +host replication streaming_replica all md5 +``` + +The following manifest creates a new PostgreSQL 13.3 cluster, +called `target-db`, using the `pg_basebackup` bootstrap method +to clone an external PostgreSQL cluster defined as `source-db` +(in the `externalClusters` array). As you can see, the `source-db` +definition points to the `source-db.foo.com` host and connects as +the `streaming_replica` user, whose password is stored in the +`password` key of the `source-db-replica-user` secret. + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: target-db +spec: + instances: 3 + imageName: quay.io/enterprisedb/postgresql:13.3 + + bootstrap: + pg_basebackup: + source: source-db + + storage: + size: 1Gi + + externalClusters: + - name: source-db + connectionParameters: + host: source-db.foo.com + user: streaming_replica + password: + name: source-db-replica-user + key: password +``` + +All the requirements must be met for the clone operation to work, including +the same PostgreSQL version (in our case 13.3). + +### TLS certificate authentication + +The second authentication method supported by Cloud Native PostgreSQL +with the `pg_basebackup` bootstrap is based on TLS client certificates. +This is the recommended approach from a security standpoint. + +The following example clones an existing PostgreSQL cluster (`cluster-example`) +in the same Kubernetes cluster. + +!!! Note + This example can be easily adapted to cover an instance that resides + outside the Kubernetes cluster. + +The manifest defines a new PostgreSQL 13.3 cluster called `cluster-clone-tls`, +which is bootstrapped using the `pg_basebackup` method from the `cluster-example` +external cluster. The host is identified by the read/write service +in the same cluster, while the `streaming_replica` user is authenticated +thanks to the provided keys, certificate, and certification authority +information (respectively in the `cluster-example-replication` and +`cluster-example-ca` secrets). + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-clone-tls +spec: + instances: 3 + imageName: quay.io/enterprisedb/postgresql:13.3 + + bootstrap: + pg_basebackup: + source: cluster-example + + storage: + size: 1Gi + + externalClusters: + - name: cluster-example + connectionParameters: + host: cluster-example-rw.default.svc + user: streaming_replica + sslmode: verify-full + sslKey: + name: cluster-example-replication + key: tls.key + sslCert: + name: cluster-example-replication + key: tls.crt + sslRootCert: + name: cluster-example-ca + key: ca.crt +``` + +### Current limitations + +#### Missing tablespace support + +Cloud Native PostgreSQL does not currently include full declarative management +of PostgreSQL global objects, namely roles, databases, and tablespaces. +While roles and databases are copied from the source instance to the target +cluster, tablespaces require a capability that this version of +Cloud Native PostgreSQL is missing: definition and management of additional +persistent volumes. When dealing with base backup and tablespaces, PostgreSQL +itself requires that the exact mount points in the source instance +must also exist in the target instance, in our case, the pods in Kubernetes +that Cloud Native PostgreSQL manages. For this reason, you cannot directly +migrate in Cloud Native PostgreSQL a PostgreSQL instance that takes advantage +of tablespaces (you first need to remove them from the source or, if your +organization requires this feature, contact EDB to prioritize it). + +#### Snapshot copy + +The `pg_basebackup` method takes a snapshot of the source instance in the form of +a PostgreSQL base backup. All transactions written from the start of +the backup to the correct termination of the backup will be streamed to the target +instance using a second connection (see the `--wal-method=stream` option for +`pg_basebackup`). + +Once the backup is completed, the new instance will be started on a new timeline +and diverge from the source. +For this reason, it is advised to stop all write operations to the source database +before migrating to the target database in Kubernetes. + +!!! Important + Before you attempt a migration, you must test both the procedure + and the applications. In particular, it is fundamental that you run the migration + procedure as many times as needed to systematically measure the downtime of your + applications in production. Feel free to contact EDB for assistance. + +Future versions of Cloud Native PostgreSQL will enable users to control +PostgreSQL's continuous recovery mechanism via Write-Ahead Log (WAL) shipping +by creating a new cluster that is a replica of another PostgreSQL instance. +This will open up two main use cases: + +- replication over different Kubernetes clusters in Cloud Native PostgreSQL +- *0 cutover time* migrations to Cloud Native PostgreSQL with the `pg_basebackup` + bootstrap method diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx new file mode 100644 index 00000000000..30169f5b0a1 --- /dev/null +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx @@ -0,0 +1,160 @@ +--- +title: 'Certificates' +originalFilePath: 'src/certificates.md' +product: 'Cloud Native Operator' +--- + +Cloud Native PostgreSQL has been designed to natively support TLS certificates. +In order to set up a `Cluster`, the operator requires: + +- a server Certification Authority (CA) certificate +- a server TLS certificate signed by the server Certification Authority +- a client Certification Authority certificate +- a streaming replication client certificate generated by the client Certification Authority + +!!! Note + You can find all the secrets used by the cluster and their expiration dates + in the cluster's status. + +## Operator managed mode + +By default, the operator generates a single Certification Authority and uses it +for both client and server certificates, which are then managed and renewed +automatically. + +### Server CA Secret + +The operator generates a self-signed CA and stores it in a generic secret +containing the following keys: + +- `ca.crt`: CA certificate used to validate the server certificate, used as `sslrootcert` in clients' connection strings. +- `ca.key`: the key used to sign Server SSL certificate automatically + +### Server TLS Secret + +The operator uses the generated self-signed CA to sign a server TLS +certificate, stored in a Secret of type `kubernetes.io/tls` and configured to +be used as `ssl_cert_file` and `ssl_key_file` by the instances so that clients +can verify their identity and connect securely. + +### Server alternative DNS names + +You can specify DNS server alternative names that will be part of the +generated server TLS secret in addition to the default ones. + +## User-provided server certificate mode + +If required, you can also provide the two server certificates, generating them +using a separate component such as [cert-manager](https://cert-manager.io/). In +order to use a custom server TLS certificate for a Cluster, you must specify +the following parameters: + +- `serverTLSSecret`: the name of a Secret of type `kubernetes.io/tls`, + containing the server TLS certificate. It must contain both the standard + `tls.crt` and `tls.key` keys. +- `serverCASecret`: the name of a Secret containing the `ca.crt` key. + +!!! Note + The operator will still create and manage the two secrets related to client + certificates. + +See below for a complete example. + +### Example + +Given the following files: + +- `server-ca.crt`: the certificate of the CA that signed the server TLS certificate. +- `server.crt`: the certificate of the server TLS certificate. +- `server.key`: the private key of the server TLS certificate. + +Create a secret containing the CA certificate: + +``` +kubectl create secret generic my-postgresql-server-ca \ + --from-file=ca.crt=./server-ca.crt +``` + +Create a secret with the TLS certificate: + +``` +kubectl create secret tls my-postgresql-server \ + --cert=./server.crt --key=./server.key +``` + +Create a `Cluster` referencing those secrets: + +```bash +kubectl apply -f - < @@ -196,9 +198,10 @@ Native PostgreSQL's exporter: Similarly, the `pg_version` field of a column definition is not implemented. -# Monitoring the operator +## Monitoring the operator -The operator exposes [Prometheus](https://prometheus.io/) metrics via HTTP on port 8080, named `metrics`. +The operator internally exposes [Prometheus](https://prometheus.io/) metrics +via HTTP on port 8080, named `metrics`. Metrics can be accessed as follows: @@ -209,9 +212,9 @@ curl http://:8080/metrics Currently, the operator exposes default `kubebuilder` metrics, see [kubebuilder documentation](https://book.kubebuilder.io/reference/metrics.html) for more details. -## Prometheus Operator example +### Prometheus Operator example -The deployment operator can be monitored using the +The operator deployment can be monitored using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) by defining the following [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/v0.47.1/Documentation/api.md#podmonitor) resource: diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx index d2d4c9f44fa..14867b69545 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx @@ -58,7 +58,8 @@ Community and published on Quay.io by EnterpriseDB. You can use any compatible image of PostgreSQL supporting the primary/standby architecture directly by setting the `imageName` attribute in the CR. The operator also supports `imagePullSecretsNames` -to access private container registries. +to access private container registries, as well as digests in addition to +tags for finer control of container image immutability. ### Labels and annotations @@ -130,8 +131,11 @@ allocated UID and SELinux context. The operator supports basic pod affinity/anti-affinity rules to deploy PostgreSQL pods on different nodes, based on the selected `topologyKey` (for example `node` or -`zone`). Additionally, it supports node affinity through the `nodeSelector` -configuration attribute, as [expected by Kubernetes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). +`zone`). it supports node affinity/anti-affinity through the `nodeSelector` +configuration attribute, to be specified as [expected by Kubernetes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) +and tolerations through the `tolerations` configuration attribute, which will be added for all the pods created by the +operator related to a specific Cluster, using kubernetes [standard syntax](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + ### License keys diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx index 613db920160..df0455940ff 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx @@ -71,6 +71,17 @@ The **default parameters for PostgreSQL 10 to 12** are: wal_keep_segments = '32' ``` +!!! Warning + It is your duty to plan for WAL segments retention in your PostgreSQL + cluster and properly configure either `wal_keep_segments` or `wal_keep_size`, + depending on the server version, based on the expected and observed workloads. + Until Cloud Native PostgreSQL supports replication slots, and if you don't have + continuous backup in place, this is the only way at the moment that protects + from the case of a standby falling out of sync and returning error messages like: + `"could not receive data from WAL stream: ERROR: requested WAL segment ************************ has already been removed"`. + This will require you to dedicate a part of your `PGDATA` to keep older + WAL segments for streaming replication purposes. + The following parameters are **fixed** and exclusively controlled by the operator: ```text @@ -82,7 +93,7 @@ hot_standby = 'true' listen_addresses = '*' port = '5432' ssl = 'on' -ssl_ca_file = '/controller/certificates/ca.crt' +ssl_ca_file = '/controller/certificates/client-ca.crt' ssl_cert_file = '/controller/certificates/server.crt' ssl_key_file = '/controller/certificates/server.key' unix_socket_directories = '/var/run/postgresql' diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx index 3918b32167c..fcd151e0b49 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx @@ -181,3 +181,5 @@ spec: Never use tags like `latest` or `13` in a production environment as it might lead to unpredictable scenarios in terms of update policies and version consistency in the cluster. + For strict deterministic and repeatable deployments, you can add the digests + to the image name, through the `:@sha256:` format. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx index 666072a7322..114a8851414 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx @@ -6,6 +6,55 @@ product: 'Cloud Native Operator' History of user-visible changes for Cloud Native PostgreSQL. +## Version 1.5.0 + +**Release date:** 11 June 2021 + +Features: + +- Introduce the `pg_basebackup` bootstrap method to create a new PostgreSQL + cluster as a copy of an existing PostgreSQL instance of the same major + version, even outside Kubernetes +- Add support for Kubernetes’ tolerations in the `Affinity` section of the + `Cluster` resource, allowing users to distribute PostgreSQL instances on + Kubernetes nodes with the required taint +- Enable specification of a digest to an image name, through the + `:@sha256:` format, for more deterministic and + repeatable deployments + +Security Enhancements: + +- Customize TLS certificates to authenticate the PostgreSQL server by defining + secrets for the server certificate and the related Certification Authority + that signed it +- Raise the `sslmode` for the WAL receiver process of internal and + automatically managed streaming replicas from `require` to `verify-ca` + +Changes: + +- Enhance the `promote` subcommand of the `cnp` plugin for `kubectl` to accept + just the node number rather than the whole name of the pod +- Adopt DNS-1035 validation scheme for cluster names (from which service names + are inherited) +- Enforce streaming replication connection when cloning a standby instance or + when bootstrapping using the `pg_basebackup` method +- Integrate the `Backup` resource with `beginWal`, `endWal`, `beginLSN`, + `endLSN`, `startedAt` and `stoppedAt` regarding the physical base backup +- Documentation improvements: + - Provide a list of ports exposed by the operator and the operand container + - Introduce the `cnp-bench` helm charts and guidelines for benchmarking the + storage and PostgreSQL for database workloads +- E2E tests enhancements: + - Test Kubernetes 1.21 + - Add test for High Availability of the operator + - Add test for node draining +- Minor bug fixes, including: + - Timeout to pg_ctl start during recovery operations too short + - Operator not watching over direct events on PVCs + - Fix handling of `immediateCheckpoint` and `jobs` parameter in + `barmanObjectStore` backups + - Empty logs when recovering from a backup + ## Version 1.4.0 **Release date:** 18 May 2021 @@ -139,4 +188,3 @@ Kubernetes with the following main capabilities: - Support for synchronous replicas - Support for node affinity via `nodeSelector` property - Standard output logging of PostgreSQL error messages - diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx index 6257b775957..9fd4381fbe9 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx @@ -54,7 +54,8 @@ while creating a cluster: - Specify your required PostgreSQL memory parameters consistently with the pod resources (as you would do in a VM or physical machine scenario - see below). - Set up database server pods on a dedicated node using nodeSelector. - See the ["nodeSelector field of the affinityconfiguration resource on the API reference page"](api_reference.md#affinityconfiguration). + See the "nodeSelector" and "tolerations" fields of the + [“affinityconfiguration"](api_reference.md#affinityconfiguration) resource on the API reference page. You can refer to the following example manifest: diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml new file mode 100644 index 00000000000..5783b0c38c1 --- /dev/null +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml @@ -0,0 +1,28 @@ +# IMPORTANT: this configuration requires an appropriate line +# in the host-based access rules allowing replication connections +# to the postgres user. +# +# The following line met the requisites +# - "host replication postgres all md5" +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-clone-basicauth +spec: + instances: 3 + + bootstrap: + pg_basebackup: + source: cluster-example + + storage: + size: 1Gi + + externalClusters: + - name: cluster-example + connectionParameters: + host: cluster-example-rw.default.svc + user: postgres + password: + name: cluster-example-superuser + key: password \ No newline at end of file diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml new file mode 100644 index 00000000000..2b509e63c7f --- /dev/null +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml @@ -0,0 +1,29 @@ +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-clone-tls +spec: + instances: 3 + + bootstrap: + pg_basebackup: + source: cluster-example + + storage: + size: 1Gi + + externalClusters: + - name: cluster-example + connectionParameters: + host: cluster-example-rw.default.svc + user: streaming_replica + sslmode: verify-full + sslKey: + name: cluster-example-replication + key: tls.key + sslCert: + name: cluster-example-replication + key: tls.crt + sslRootCert: + name: cluster-example-ca + key: ca.crt diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml index 71e497e2baf..ce7e649538f 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml @@ -33,7 +33,7 @@ metadata: name: cluster-example-full spec: description: "Example of cluster" - imageName: quay.io/enterprisedb/postgresql:13.2 + imageName: quay.io/enterprisedb/postgresql:13.3 # imagePullSecret is only required if the images are located in a private registry # imagePullSecrets: # - name: private_registry_access diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx index c867b96398c..f811f77de14 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx @@ -113,13 +113,28 @@ to enable/disable inbound and outbound network access at IP and TCP level. !!! Important The operator needs to communicate to each instance on TCP port 8000 - to get information about the status of the PostgreSQL server. Make sure - you keep this in mind in case you add any network policy. + to get information about the status of the PostgreSQL server. Please + make sure you keep this in mind in case you add any network policy, + and refer to the "Exposed Ports" section below for a list of ports used by + Cloud Native PostgreSQL for finer control. Network policies are beyond the scope of this document. Please refer to the ["Network policies"](https://kubernetes.io/docs/concepts/services-networking/network-policies/) section of the Kubernetes documentation for further information. +#### Exposed Ports + +Cloud Native PostgreSQL exposes ports at operator, instance manager and operand +levels, as listed in the table below: + +System | Port number | Exposing | Name | Certificates | Authentication +:--------------- | :----------- | :------------------ | :------------------ | :------------ | :-------------- +operator | 9443 | webhook server | `webhook-server` | TLS | Yes +operator | 8080 | metrics | `metrics` | no TLS | No +instance manager | 9187 | metrics | `metrics` | no TLS | No +instance manager | 8000 | status | `status` | no TLS | No +operand | 5432 | PostgreSQL instance | `postgresql` | optional TLS | Yes + ### PostgreSQL The current implementation of Cloud Native PostgreSQL automatically creates diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx index bda8a53b6bb..3e422be50b8 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx @@ -84,7 +84,7 @@ spec: app: webtest spec: containers: - - image: leonardoce/webtest:1.0.0 + - image: quay.io/leonardoce/webtest:1.3.0 name: cert-test volumeMounts: - name: secret-volume-root-ca @@ -163,7 +163,7 @@ Output : version -------------------------------------------------------------------------------------- ------------------ -PostgreSQL 13.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat +PostgreSQL 13.3 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5), 64-bit (1 row) ``` diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx index c3028831f95..a26ed1b545c 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx @@ -35,11 +35,37 @@ guarantees higher and more predictable performance. !!! Warning Before you deploy a PostgreSQL cluster with Cloud Native PostgreSQL, - make sure that the storage you are using is recommended for database + ensure that the storage you are using is recommended for database workloads. Our advice is to clearly set performance expectations by first benchmarking the storage using tools such as [fio](https://fio.readthedocs.io/en/latest/fio_doc.html), and then the database using [pgbench](https://www.postgresql.org/docs/current/pgbench.html). +## Benchmarking Cloud Native PostgreSQL + +EDB maintains [cnp-bench](https://github.com/EnterpriseDB/cnp-bench), +an open source set of guidelines and Helm charts for benchmarking Cloud Native PostgreSQL +in a controlled Kubernetes environment, before deploying the database in production. + +Briefly, `cnp-bench` is designed to operate at two levels: + +- measuring the performance of the underlying storage using `fio`, with relevant + metrics for database workloads such as throughput for sequential reads, sequential + writes, random reads and random writes +- measuring the performance of the database using the default benchmarking tool + distributed along with PostgreSQL: `pgbench` + +!!! Important + Measuring both the storage and database performance is an activity that + must be done **before the database goes in production**. However, such results + are extremely valuable not only in the planning phase (e.g., capacity planning), + but also in the production lifecycle, especially in emergency situations + (when we don't have the luxury anymore to run this kind of tests). Databases indeed + change and evolve over time, so does the distribution of data, potentially affecting + performance: knowing the theoretical maximum throughput of sequential reads or + writes will turn out to be extremely useful in those situations. Especially in + shared-nothing contexts, where results do not vary due to the influence of external workloads. + **Know your system, benchmark it.** + ## Persistent Volume Claim The operator creates a persistent volume claim (PVC) for each PostgreSQL @@ -77,6 +103,11 @@ spec: size: 1Gi ``` +!!! Important + Cloud Native PostgreSQL has been designed to be storage class agnostic. + As usual, our recommendation is to properly benchmark the storage class + in a controlled environment, before hitting production. + ## Configuration via a PVC template To further customize the generated PVCs, you can provide a PVC template inside the Custom Resource, diff --git a/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx b/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx index f943440f93b..b9d1f93f494 100644 --- a/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx +++ b/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx @@ -65,7 +65,7 @@ You will see one node called `minikube`. If the status isn't yet "Ready", wait f Now that the Minikube cluster is running, you can proceed with Cloud Native PostgreSQL installation as described in the ["Installation"](installation_upgrade.md) section: ```shell -kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.4.0.yaml +kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.5.0.yaml __OUTPUT__ namespace/postgresql-operator-system created customresourcedefinition.apiextensions.k8s.io/backups.postgresql.k8s.enterprisedb.io created @@ -245,7 +245,7 @@ curl -sSfL \ sudo sh -s -- -b /usr/local/bin __OUTPUT__ EnterpriseDB/kubectl-cnp info checking GitHub for latest tag -EnterpriseDB/kubectl-cnp info found version: 1.4.0 for v1.4.0/linux/x86_64 +EnterpriseDB/kubectl-cnp info found version: 1.5.0 for v1.5.0/linux/x86_64 EnterpriseDB/kubectl-cnp info installed /usr/local/bin/kubectl-cnp ``` diff --git a/product_docs/docs/epas/12/epas_compat_ora_dev_guide/index.mdx b/product_docs/docs/epas/12/epas_compat_ora_dev_guide/index.mdx index 565ac395002..bc8068886c5 100644 --- a/product_docs/docs/epas/12/epas_compat_ora_dev_guide/index.mdx +++ b/product_docs/docs/epas/12/epas_compat_ora_dev_guide/index.mdx @@ -1,6 +1,6 @@ --- navTitle: User Guide -title: "Database Compatibility for Oracle Developer's Guide" +title: "Database Compatibility for Oracle Developers Guide" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/13/index.html" diff --git a/product_docs/docs/epas/12/epas_compat_tools_guide/02_edb_loader.mdx b/product_docs/docs/epas/12/epas_compat_tools_guide/02_edb_loader.mdx index 8014fa788f5..d47d62c7dd1 100644 --- a/product_docs/docs/epas/12/epas_compat_tools_guide/02_edb_loader.mdx +++ b/product_docs/docs/epas/12/epas_compat_tools_guide/02_edb_loader.mdx @@ -24,9 +24,7 @@ These features are explained in detail in the following sections. !!! Note The following are important version compatibility restrictions between the EDB\*Loader client and the database server. -- When you invoke the EDB\*Loader program (called `edbldr`), you pass in parameters and directive information to the database server. **We strongly recommend that the version 12 EDB\*Loader client (the edbldr program supplied with Advanced Server 12) be used to load data only into version 12 of the database server. In general, the EDB\*Loader client and database server should be the same version.** - -- Use of a version 12, 11, 10, or 9.6 EDB\*Loader client is not supported for Advanced Server with version 9.2 or earlier. +When you invoke the EDB\*Loader program (called `edbldr`), you pass in parameters and directive information to the database server. **We strongly recommend that the version 12 EDB\*Loader client (the edbldr program supplied with Advanced Server 12) be used to load data only into version 12 of the database server. In general, the EDB\*Loader client and database server should be the same version.** diff --git a/product_docs/docs/epas/12/epas_compat_tools_guide/index.mdx b/product_docs/docs/epas/12/epas_compat_tools_guide/index.mdx index 42126bf0504..06f493cd0e4 100644 --- a/product_docs/docs/epas/12/epas_compat_tools_guide/index.mdx +++ b/product_docs/docs/epas/12/epas_compat_tools_guide/index.mdx @@ -1,6 +1,6 @@ --- navTitle: Tools and Utilities Guide -title: "Database Compatibility for Oracle Developer’s Tools and Utilities Guide" +title: "Database Compatibility for Oracle Developers Tools and Utilities Guide" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/13/index.html" diff --git a/product_docs/docs/epas/12/epas_rel_notes/index.mdx b/product_docs/docs/epas/12/epas_rel_notes/index.mdx index b1688971515..f289efea3d8 100644 --- a/product_docs/docs/epas/12/epas_rel_notes/index.mdx +++ b/product_docs/docs/epas/12/epas_rel_notes/index.mdx @@ -1,6 +1,6 @@ --- navTitle: Release Notes -title: "EDB Postgres Advanced Server 12 Release Notes" +title: "EDB Postgres Advanced Server Release Notes" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. diff --git a/product_docs/docs/epas/13/edb_pgadmin_linux_qs/index.mdx b/product_docs/docs/epas/13/edb_pgadmin_linux_qs/index.mdx index 3f65778bdba..890ce29fbef 100644 --- a/product_docs/docs/epas/13/edb_pgadmin_linux_qs/index.mdx +++ b/product_docs/docs/epas/13/edb_pgadmin_linux_qs/index.mdx @@ -1,5 +1,5 @@ --- -title: "EDB pgAdmin4 Quickstart Linux Guide for EPAS" +title: "EDB pgAdmin4 Quickstart Linux Guide" legacyRedirects: - "/edb-docs/d/pgadmin-4/quick-start/quick-start-guide/4.26/index.html" --- diff --git a/product_docs/docs/epas/13/epas_compat_ora_dev_guide/index.mdx b/product_docs/docs/epas/13/epas_compat_ora_dev_guide/index.mdx index d340bafe379..0911a75f66c 100644 --- a/product_docs/docs/epas/13/epas_compat_ora_dev_guide/index.mdx +++ b/product_docs/docs/epas/13/epas_compat_ora_dev_guide/index.mdx @@ -1,6 +1,6 @@ --- navTitle: User Guide -title: "Database Compatibility for Oracle Developer's Guide" +title: "Database Compatibility for Oracle Developers Guide" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/13/index.html" diff --git a/product_docs/docs/epas/13/epas_compat_tools_guide/02_edb_loader.mdx b/product_docs/docs/epas/13/epas_compat_tools_guide/02_edb_loader.mdx index ba46c1c30da..31b6f3cfc58 100644 --- a/product_docs/docs/epas/13/epas_compat_tools_guide/02_edb_loader.mdx +++ b/product_docs/docs/epas/13/epas_compat_tools_guide/02_edb_loader.mdx @@ -34,8 +34,6 @@ These features are explained in detail in the following sections. psycopg2 copy_from ``` -- Use of a version 13, 12, 11, 10, or 9.6 EDB\*Loader client is not supported for Advanced Server with version 9.2 or earlier. - ## Data Loading Methods diff --git a/product_docs/docs/epas/13/epas_compat_tools_guide/index.mdx b/product_docs/docs/epas/13/epas_compat_tools_guide/index.mdx index feb3a2d897f..b0b83983c5a 100644 --- a/product_docs/docs/epas/13/epas_compat_tools_guide/index.mdx +++ b/product_docs/docs/epas/13/epas_compat_tools_guide/index.mdx @@ -1,6 +1,6 @@ --- navTitle: Tools and Utilities Guide -title: "Database Compatibility for Oracle Developer’s Tools and Utilities Guide" +title: "Database Compatibility for Oracle Developers Tools and Utilities Guide" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/13/index.html" diff --git a/product_docs/docs/epas/13/epas_qs_windows/index.mdx b/product_docs/docs/epas/13/epas_qs_windows/index.mdx index 9b64d41c4d8..7bc8729be2e 100644 --- a/product_docs/docs/epas/13/epas_qs_windows/index.mdx +++ b/product_docs/docs/epas/13/epas_qs_windows/index.mdx @@ -30,8 +30,6 @@ Among the components that make up an Advanced Server deployment are: **Supporting Functions, Procedures, Data Types, Index Types, Operators, Utilities, and Aggregates** - Advanced Server includes a number of features that help you manage your data. -Please note: The `data` directory of a production database should not be stored on an NFS file system. - **Installation Prerequisites** **User Privileges** diff --git a/product_docs/docs/epas/13/epas_rel_notes/index.mdx b/product_docs/docs/epas/13/epas_rel_notes/index.mdx index 254f0438167..d68086306a3 100644 --- a/product_docs/docs/epas/13/epas_rel_notes/index.mdx +++ b/product_docs/docs/epas/13/epas_rel_notes/index.mdx @@ -1,6 +1,6 @@ --- navTitle: Release Notes -title: "EDB Postgres Advanced Server 13 Release Notes" +title: "EDB Postgres Advanced Server Release Notes" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx index 4c9969fe4e6..afaf5d7b204 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx @@ -14,7 +14,7 @@ The Hadoop Foreign Data Wrapper is supported on the following platforms: > - RHEL 8.x and 7.x > - CentOS 8.x and 7.x -> - OEL 8.x and 7.x +> - OL 8.x and 7.x > - Ubuntu 20.04 and 18.04 LTS > - Debian 10.x and 9.x diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx index 810cc135c95..6078203da1c 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx @@ -14,7 +14,7 @@ The MongoDB Foreign Data Wrapper is supported on the following platforms: > - RHEL 8.x/7.x > - CentOS 8.x/7.x -> - OEL 8.x/7.x +> - OL 8.x/7.x > - Ubuntu 20.04/18.04 LTS > - Debian 10.x/9.x diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx index 3b8ed138c80..298bdbc1073 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx @@ -14,7 +14,7 @@ The MySQL Foreign Data Wrapper is supported on the following platforms: > - RHEL 8.x/7.x > - CentOS 8.x/7.x -> - OEL 8.x/7.x +> - OL 8.x/7.x > - Ubuntu 20.04/18.04 LTS > - Debian 10.x/9.x diff --git a/product_docs/docs/mysql_data_adapter/2.6.0/02_requirements_overview.mdx b/product_docs/docs/mysql_data_adapter/2.6.0/02_requirements_overview.mdx index c5d8ce0b2f9..76e502543e2 100644 --- a/product_docs/docs/mysql_data_adapter/2.6.0/02_requirements_overview.mdx +++ b/product_docs/docs/mysql_data_adapter/2.6.0/02_requirements_overview.mdx @@ -14,7 +14,7 @@ The MySQL Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9. - RHEL 8.x/7.x - CentOS 8.x/7.x -- OEL 8.x/7.x +- OL 8.x/7.x - Ubuntu 20.04/18.04 LTS - Debian 10.x/9.x @@ -24,7 +24,7 @@ The MySQL Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9. - RHEL 7.x - CentOS 7.x -- OEL 7.x +- OL 7.x - Ubuntu 18.04 LTS - Debian 10.x/9.x From 1a2d9855092171e20710b3bf20d547b80f24dd12 Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Fri, 11 Jun 2021 10:20:19 -0600 Subject: [PATCH 5/5] Merge pull request #1358 from EnterpriseDB/release/2021-05-11 (#1456) This reverts commit 1f657d76bf5b5155cb77ae3e842140cf175c179a [formerly 27453723f0215cf2049a466685fd4a5c6a4deeb3]. Former-commit-id: 039daa7d6bfcf4696262e7476a8e3916a85385f9 --- .../cloud_native_postgresql/api_reference.mdx | 270 ++++++------------ .../cloud_native_postgresql/architecture.mdx | 1 - .../cloud_native_postgresql/bootstrap.mdx | 268 +---------------- .../cloud_native_postgresql/certificates.mdx | 160 ----------- .../cloud_native_postgresql/cnp-plugin.mdx | 6 +- .../cloud_native_postgresql/e2e.mdx | 5 +- .../cloud_native_postgresql/failure_modes.mdx | 2 +- .../cloud_native_postgresql/index.mdx | 1 - .../installation_upgrade.mdx | 4 +- .../interactive_demo.mdx | 4 +- .../cloud_native_postgresql/logging.mdx | 8 +- .../cloud_native_postgresql/monitoring.mdx | 21 +- .../operator_capability_levels.mdx | 10 +- .../postgresql_conf.mdx | 13 +- .../cloud_native_postgresql/quickstart.mdx | 2 - .../cloud_native_postgresql/release_notes.mdx | 50 +--- .../resource_management.mdx | 3 +- .../samples/cluster-clone-basicauth.yaml | 28 -- .../samples/cluster-clone-tls.yaml | 29 -- .../samples/cluster-example-full.yaml | 2 +- .../cloud_native_postgresql/security.mdx | 19 +- .../ssl_connections.mdx | 4 +- .../cloud_native_postgresql/storage.mdx | 33 +-- .../interactive_demo.mdx | 4 +- .../12/epas_compat_ora_dev_guide/index.mdx | 2 +- .../epas_compat_tools_guide/02_edb_loader.mdx | 4 +- .../epas/12/epas_compat_tools_guide/index.mdx | 2 +- .../docs/epas/12/epas_rel_notes/index.mdx | 2 +- .../epas/13/edb_pgadmin_linux_qs/index.mdx | 2 +- .../13/epas_compat_ora_dev_guide/index.mdx | 2 +- .../epas_compat_tools_guide/02_edb_loader.mdx | 2 + .../epas/13/epas_compat_tools_guide/index.mdx | 2 +- .../docs/epas/13/epas_qs_windows/index.mdx | 2 + .../docs/epas/13/epas_rel_notes/index.mdx | 2 +- .../2.0.7/02_requirements_overview.mdx | 2 +- .../5.2.8/02_requirements_overview.mdx | 2 +- .../2.5.5/02_requirements_overview.mdx | 2 +- .../2.6.0/02_requirements_overview.mdx | 4 +- 38 files changed, 136 insertions(+), 843 deletions(-) delete mode 100644 advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx delete mode 100644 advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml delete mode 100644 advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx index 2e9c7e0d537..0173c9dc96c 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx @@ -30,18 +30,12 @@ Below you will find a description of the defined resources: - [BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration) - [BootstrapConfiguration](#BootstrapConfiguration) - [BootstrapInitDB](#BootstrapInitDB) -- [BootstrapPgBaseBackup](#BootstrapPgBaseBackup) - [BootstrapRecovery](#BootstrapRecovery) -- [CertificatesConfiguration](#CertificatesConfiguration) -- [CertificatesStatus](#CertificatesStatus) - [Cluster](#Cluster) - [ClusterList](#ClusterList) - [ClusterSpec](#ClusterSpec) - [ClusterStatus](#ClusterStatus) -- [ConfigMapKeySelector](#ConfigMapKeySelector) - [DataBackupConfiguration](#DataBackupConfiguration) -- [ExternalCluster](#ExternalCluster) -- [LocalObjectReference](#LocalObjectReference) - [MonitoringConfiguration](#MonitoringConfiguration) - [NodeMaintenanceWindow](#NodeMaintenanceWindow) - [PostgresConfiguration](#PostgresConfiguration) @@ -52,7 +46,6 @@ Below you will find a description of the defined resources: - [ScheduledBackupList](#ScheduledBackupList) - [ScheduledBackupSpec](#ScheduledBackupSpec) - [ScheduledBackupStatus](#ScheduledBackupStatus) -- [SecretKeySelector](#SecretKeySelector) - [SecretsResourceVersion](#SecretsResourceVersion) - [StorageConfiguration](#StorageConfiguration) - [WalBackupConfiguration](#WalBackupConfiguration) @@ -64,12 +57,11 @@ Below you will find a description of the defined resources: AffinityConfiguration contains the info we need to create the affinity rules for Pods -Name | Description | Type ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- -`enablePodAntiAffinity` | Activates anti-affinity for the pods. The operator will define pods anti-affinity unless this field is explicitly set to false | *bool -`topologyKey ` | TopologyKey to use for anti-affinity configuration. See k8s documentation for more info on that - *mandatory* | string -`nodeSelector ` | NodeSelector is map of key-value pairs used to define the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | map[string]string -`tolerations ` | Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run on tainted nodes. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | []corev1.Toleration +Name | Description | Type +--------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------- +`enablePodAntiAffinity` | Activates anti-affinity for the pods. The operator will define pods anti-affinity unless this field is explicitly set to false | *bool +`topologyKey ` | TopologyKey to use for anti-affinity configuration. See k8s documentation for more info on that - *mandatory* | string +`nodeSelector ` | NodeSelector is map of key-value pairs used to define the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | map[string]string @@ -79,7 +71,7 @@ Backup is the Schema for the backups API Name | Description | Type -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ -`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta) +`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta) `spec ` | Specification of the desired behavior of the backup. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [BackupSpec](#BackupSpec) `status ` | Most recently observed status of the backup. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [BackupStatus](#BackupStatus) @@ -101,7 +93,7 @@ BackupList contains a list of Backup Name | Description | Type -------- | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- -`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta) +`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#listmeta-v1-meta) `items ` | List of backups - *mandatory* | [[]Backup](#Backup) @@ -110,9 +102,9 @@ Name | Description BackupSpec defines the desired state of Backup -Name | Description | Type -------- | --------------------- | --------------------------------------------- -`cluster` | The cluster to backup | [LocalObjectReference](#LocalObjectReference) +Name | Description | Type +------- | --------------------- | ---------------------------------------------------------------------------------------------------------------------------- +`cluster` | The cluster to backup | [v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) @@ -129,15 +121,11 @@ Name | Description `encryption ` | Encryption method required to S3 API | string `backupId ` | The ID of the Barman backup | string `phase ` | The last backup status | BackupPhase -`startedAt ` | When the backup was started | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) -`stoppedAt ` | When the backup was terminated | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) -`beginWal ` | The starting WAL | string -`endWal ` | The ending WAL | string -`beginLSN ` | The starting xlog | string -`endLSN ` | The ending xlog | string +`startedAt ` | When the backup was started | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) +`stoppedAt ` | When the backup was terminated | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) `error ` | The detected error | string -`commandOutput ` | Unused. Retained for compatibility with old versions. | string -`commandError ` | The backup command output in case of error | string +`commandOutput ` | The backup command output | string +`commandError ` | The backup command output | string @@ -160,11 +148,10 @@ Name | Description BootstrapConfiguration contains information about how to create the PostgreSQL cluster. Only a single bootstrap method can be defined among the supported ones. `initdb` will be used as the bootstrap method if left unspecified. Refer to the Bootstrap page of the documentation for more information. -Name | Description | Type -------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------ -`initdb ` | Bootstrap the cluster via initdb | [*BootstrapInitDB](#BootstrapInitDB) -`recovery ` | Bootstrap the cluster from a backup | [*BootstrapRecovery](#BootstrapRecovery) -`pg_basebackup` | Bootstrap the cluster taking a physical backup of another compatible PostgreSQL instance | [*BootstrapPgBaseBackup](#BootstrapPgBaseBackup) +Name | Description | Type +-------- | ----------------------------------- | ---------------------------------------- +`initdb ` | Bootstrap the cluster via initdb | [*BootstrapInitDB](#BootstrapInitDB) +`recovery` | Bootstrap the cluster from a backup | [*BootstrapRecovery](#BootstrapRecovery) @@ -172,23 +159,13 @@ Name | Description BootstrapInitDB is the configuration of the bootstrap process when initdb is used Refer to the Bootstrap page of the documentation for more information. -Name | Description | Type --------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------- -`database` | Name of the database used by the application. Default: `app`. - *mandatory* | string -`owner ` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. - *mandatory* | string -`secret ` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | [*LocalObjectReference](#LocalObjectReference) -`redwood ` | If we need to enable/disable Redwood compatibility. Requires EPAS and for EPAS defaults to true | *bool -`options ` | The list of options that must be passed to initdb when creating the cluster | []string - - - -## BootstrapPgBaseBackup - -BootstrapPgBaseBackup contains the configuration required to take a physical backup of an existing PostgreSQL cluster - -Name | Description | Type ------- | ----------------------------------------------------------------- | ------ -`source` | The name of the server of which we need to take a physical backup - *mandatory* | string +Name | Description | Type +-------- | -------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- +`database` | Name of the database used by the application. Default: `app`. - *mandatory* | string +`owner ` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. - *mandatory* | string +`secret ` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | [*corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) +`redwood ` | If we need to enable/disable Redwood compatibility. Requires EPAS and for EPAS defaults to true | *bool +`options ` | The list of options that must be passed to initdb when creating the cluster | []string @@ -196,45 +173,10 @@ Name | Description | Typ BootstrapRecovery contains the configuration required to restore the backup with the specified name and, after having changed the password with the one chosen for the superuser, will use it to bootstrap a full cluster cloning all the instances from the restored primary. Refer to the Bootstrap page of the documentation for more information. -Name | Description | Type --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------- -`backup ` | The backup we need to restore - *mandatory* | [LocalObjectReference](#LocalObjectReference) -`recoveryTarget` | By default, the recovery process applies all the available WAL files in the archive (full recovery). However, you can also end the recovery as soon as a consistent state is reached or recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET | [*RecoveryTarget](#RecoveryTarget) - - - -## CertificatesConfiguration - -CertificatesConfiguration contains the needed configurations to handle server certificates. - -Name | Description | Type ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- -`serverCASecret ` | The secret containing the Server CA certificate. If not defined, a new secret will be created with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret. - -Contains: - -- `ca.crt`: CA that should be used to validate the server certificate, - used as `sslrootcert` in client connection strings. -- `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, - this can be omitted. | string -`serverTLSSecret ` | The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. If not defined, ServerCASecret must provide also `ca.key` and a new secret will be created using the provided CA. | string -`serverAltDNSNames` | The list of the server alternative DNS names to be added to the generated server TLS certificates, when required. | []string - - - -## CertificatesStatus - -CertificatesStatus contains configuration certificates and related expiration dates. - -Name | Description | Type --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- -`clientCASecret ` | The secret containing the Client CA certificate. This secret contains a self-signed CA and is used to sign TLS certificates used for client authentication. - -Contains: - -- `ca.crt`: CA that should be used to validate the client certificate, used as `ssl_ca_file`. - `ca.key`: key used to sign client SSL certs. | string -`replicationTLSSecret` | The secret of type kubernetes.io/tls containing the TLS client certificate to authenticate as `streaming_replica` user. | string -`expirations ` | Expiration dates for all certificates. | map[string]string +Name | Description | Type +-------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- +`backup ` | The backup we need to restore - *mandatory* | [corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) +`recoveryTarget` | By default the recovery will end as soon as a consistent state is reached: in this case that means at the end of a backup. This option allows to fine tune the recovery process | [*RecoveryTarget](#RecoveryTarget) @@ -244,7 +186,7 @@ Cluster is the Schema for the PostgreSQL API Name | Description | Type -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ -`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta) +`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta) `spec ` | Specification of the desired behavior of the cluster. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ClusterSpec](#ClusterSpec) `status ` | Most recently observed status of the cluster. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ClusterStatus](#ClusterStatus) @@ -256,7 +198,7 @@ ClusterList contains a list of Cluster Name | Description | Type -------- | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- -`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta) +`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#listmeta-v1-meta) `items ` | List of clusters - *mandatory* | [[]Cluster](#Cluster) @@ -265,31 +207,29 @@ Name | Description ClusterSpec defines the desired state of Cluster -Name | Description | Type ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- -`description ` | Description of this PostgreSQL cluster | string -`imageName ` | Name of the container image, supporting both tags (`:`) and digests for deterministic and repeatable deployments (`:@sha256:`) | string -`postgresUID ` | The UID of the `postgres` user inside the image, defaults to `26` | int64 -`postgresGID ` | The GID of the `postgres` user inside the image, defaults to `26` | int64 -`instances ` | Number of instances required in the cluster - *mandatory* | int32 -`minSyncReplicas ` | Minimum number of instances required in synchronous replication with the primary. Undefined or 0 allow writes to complete when no standby is available. | int32 -`maxSyncReplicas ` | The target value for the synchronous replication quorum, that can be decreased if the number of ready standbys is lower than this. Undefined or 0 disable synchronous replication. | int32 -`postgresql ` | Configuration of the PostgreSQL server | [PostgresConfiguration](#PostgresConfiguration) -`bootstrap ` | Instructions to bootstrap this cluster | [*BootstrapConfiguration](#BootstrapConfiguration) -`superuserSecret ` | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | [*LocalObjectReference](#LocalObjectReference) -`certificates ` | The configuration for the CA and related certificates | [*CertificatesConfiguration](#CertificatesConfiguration) -`imagePullSecrets ` | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | [[]LocalObjectReference](#LocalObjectReference) -`storage ` | Configuration of the storage of the instances | [StorageConfiguration](#StorageConfiguration) -`startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32 -`stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32 -`affinity ` | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#AffinityConfiguration) -`resources ` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#resourcerequirements-v1-core) -`primaryUpdateStrategy` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy -`backup ` | The configuration to be used for backups | [*BackupConfiguration](#BackupConfiguration) -`nodeMaintenanceWindow` | Define a maintenance window for the Kubernetes nodes | [*NodeMaintenanceWindow](#NodeMaintenanceWindow) -`licenseKey ` | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string -`monitoring ` | The configuration of the monitoring infrastructure of this cluster | [*MonitoringConfiguration](#MonitoringConfiguration) -`externalClusters ` | The list of external clusters which are used in the configuration | [[]ExternalCluster](#ExternalCluster) +Name | Description | Type +--------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- +`description ` | Description of this PostgreSQL cluster | string +`imageName ` | Name of the container image | string +`postgresUID ` | The UID of the `postgres` user inside the image, defaults to `26` | int64 +`postgresGID ` | The GID of the `postgres` user inside the image, defaults to `26` | int64 +`instances ` | Number of instances required in the cluster - *mandatory* | int32 +`minSyncReplicas ` | Minimum number of instances required in synchronous replication with the primary. Undefined or 0 allow writes to complete when no standby is available. | int32 +`maxSyncReplicas ` | The target value for the synchronous replication quorum, that can be decreased if the number of ready standbys is lower than this. Undefined or 0 disable synchronous replication. | int32 +`postgresql ` | Configuration of the PostgreSQL server | [PostgresConfiguration](#PostgresConfiguration) +`bootstrap ` | Instructions to bootstrap this cluster | [*BootstrapConfiguration](#BootstrapConfiguration) +`superuserSecret ` | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | [*corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) +`imagePullSecrets ` | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | [[]corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) +`storage ` | Configuration of the storage of the instances | [StorageConfiguration](#StorageConfiguration) +`startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32 +`stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32 +`affinity ` | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#AffinityConfiguration) +`resources ` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core) +`primaryUpdateStrategy` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy +`backup ` | The configuration to be used for backups | [*BackupConfiguration](#BackupConfiguration) +`nodeMaintenanceWindow` | Define a maintenance window for the Kubernetes nodes | [*NodeMaintenanceWindow](#NodeMaintenanceWindow) +`licenseKey ` | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string +`monitoring ` | The configuration of the monitoring infrastructure of this cluster | [*MonitoringConfiguration](#MonitoringConfiguration) @@ -315,17 +255,6 @@ Name | Description `phase ` | Current phase of the cluster | string `phaseReason ` | Reason for the current phase | string `secretsResourceVersion` | The list of resource versions of the secrets managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the secret data | [SecretsResourceVersion](#SecretsResourceVersion) -`certificates ` | The configuration for the CA and related certificates, initialized with defaults. | [CertificatesStatus](#CertificatesStatus) - - - -## ConfigMapKeySelector - -ConfigMapKeySelector contains enough information to let you locate the key of a ConfigMap - -Name | Description | Type ---- | ----------------- | ------ -`key` | The key to select - *mandatory* | string @@ -340,41 +269,16 @@ Name | Description `immediateCheckpoint` | Control whether the I/O workload for the backup initial checkpoint will be limited, according to the `checkpoint_completion_target` setting on the PostgreSQL server. If set to true, an immediate checkpoint will be used, meaning PostgreSQL will complete the checkpoint as soon as possible. `false` by default. | bool `jobs ` | The number of parallel jobs to be used to upload the backup, defaults to 2 | *int32 - - -## ExternalCluster - -ExternalCluster represents the connection parameters of an external server which is used in the cluster configuration - -Name | Description | Type --------------------- | ---------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------- -`name ` | The server name, required - *mandatory* | string -`connectionParameters` | The list of connection parameters, such as dbname, host, username, etc | map[string]string -`sslCert ` | The reference to an SSL certificate to be used to connect to this instance | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) -`sslKey ` | The reference to an SSL private key to be used to connect to this instance | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) -`sslRootCert ` | The reference to an SSL CA public key to be used to connect to this instance | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) -`password ` | The reference to the password to be used to connect to the server | [*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#secretkeyselector-v1-core) - - - -## LocalObjectReference - -LocalObjectReference contains enough information to let you locate a local object with a known type inside the same namespace - -Name | Description | Type ----- | --------------------- | ------ -`name` | Name of the referent. - *mandatory* | string - ## MonitoringConfiguration MonitoringConfiguration is the type containing all the monitoring configuration for a certain cluster -Name | Description | Type ----------------------- | ----------------------------------------------------- | ----------------------------------------------- -`customQueriesConfigMap` | The list of config maps containing the custom queries | [[]ConfigMapKeySelector](#ConfigMapKeySelector) -`customQueriesSecret ` | The list of secrets containing the custom queries | [[]SecretKeySelector](#SecretKeySelector) +Name | Description | Type +---------------------- | ----------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- +`customQueriesConfigMap` | The list of config maps containing the custom queries | [[]corev1.ConfigMapKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#configmapkeyselector-v1-core) +`customQueriesSecret ` | The list of secrets containing the custom queries | [[]corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) @@ -425,7 +329,7 @@ RollingUpdateStatus contains the information about an instance which is being up Name | Description | Type --------- | ----------------------------------- | ------------------------------------------------------------------------------------------------ `imageName` | The image which we put into the Pod - *mandatory* | string -`startedAt` | When the update has been started | [metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) +`startedAt` | When the update has been started | [metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) @@ -433,10 +337,10 @@ Name | Description | Type S3Credentials is the type for the credentials to be used to upload files to S3 -Name | Description | Type ---------------- | -------------------------------------- | --------------------------------------- -`accessKeyId ` | The reference to the access key id - *mandatory* | [SecretKeySelector](#SecretKeySelector) -`secretAccessKey` | The reference to the secret access key - *mandatory* | [SecretKeySelector](#SecretKeySelector) +Name | Description | Type +--------------- | -------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- +`accessKeyId ` | The reference to the access key id - *mandatory* | [corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) +`secretAccessKey` | The reference to the secret access key - *mandatory* | [corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) @@ -446,7 +350,7 @@ ScheduledBackup is the Schema for the scheduledbackups API Name | Description | Type -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ -`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#objectmeta-v1-meta) +`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta) `spec ` | Specification of the desired behavior of the ScheduledBackup. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ScheduledBackupSpec](#ScheduledBackupSpec) `status ` | Most recently observed status of the ScheduledBackup. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ScheduledBackupStatus](#ScheduledBackupStatus) @@ -458,7 +362,7 @@ ScheduledBackupList contains a list of ScheduledBackup Name | Description | Type -------- | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- -`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#listmeta-v1-meta) +`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#listmeta-v1-meta) `items ` | List of clusters - *mandatory* | [[]ScheduledBackup](#ScheduledBackup) @@ -467,11 +371,11 @@ Name | Description ScheduledBackupSpec defines the desired state of ScheduledBackup -Name | Description | Type --------- | -------------------------------------------------------------------- | --------------------------------------------- -`suspend ` | If this backup is suspended of not | *bool -`schedule` | The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - *mandatory* | string -`cluster ` | The cluster to backup | [LocalObjectReference](#LocalObjectReference) +Name | Description | Type +-------- | -------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- +`suspend ` | If this backup is suspended of not | *bool +`schedule` | The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - *mandatory* | string +`cluster ` | The cluster to backup | [v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) @@ -481,19 +385,9 @@ ScheduledBackupStatus defines the observed state of ScheduledBackup Name | Description | Type ---------------- | -------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- -`lastCheckTime ` | The latest time the schedule | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) -`lastScheduleTime` | Information when was the last time that backup was successfully scheduled. | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) -`nextScheduleTime` | Next time we will run a backup | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#time-v1-meta) - - - -## SecretKeySelector - -SecretKeySelector contains enough information to let you locate the key of a Secret - -Name | Description | Type ---- | ----------------- | ------ -`key` | The key to select - *mandatory* | string +`lastCheckTime ` | The latest time the schedule | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) +`lastScheduleTime` | Information when was the last time that backup was successfully scheduled. | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) +`nextScheduleTime` | Next time we will run a backup | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) @@ -501,15 +395,13 @@ Name | Description | Type SecretsResourceVersion is the resource versions of the secrets managed by the operator -Name | Description | Type ------------------------- | -------------------------------------------------------------------- | ------ -`superuserSecretVersion ` | The resource version of the "postgres" user secret - *mandatory* | string -`replicationSecretVersion` | The resource version of the "streaming_replication" user secret - *mandatory* | string -`applicationSecretVersion` | The resource version of the "app" user secret - *mandatory* | string -`caSecretVersion ` | Unused. Retained for compatibility with old versions. | string -`clientCaSecretVersion ` | The resource version of the PostgreSQL client-side CA secret version - *mandatory* | string -`serverCaSecretVersion ` | The resource version of the PostgreSQL server-side CA secret version - *mandatory* | string -`serverSecretVersion ` | The resource version of the PostgreSQL server-side secret version - *mandatory* | string +Name | Description | Type +------------------------ | ----------------------------------------------------------------- | ------ +`superuserSecretVersion ` | The resource version of the "postgres" user secret - *mandatory* | string +`replicationSecretVersion` | The resource version of the "streaming_replication" user secret - *mandatory* | string +`applicationSecretVersion` | The resource version of the "app" user secret - *mandatory* | string +`caSecretVersion ` | The resource version of the "ca" secret version - *mandatory* | string +`serverSecretVersion ` | The resource version of the PostgreSQL server-side secret version - *mandatory* | string @@ -522,7 +414,7 @@ Name | Description `storageClass ` | StorageClass to use for database data (`PGDATA`). Applied after evaluating the PVC template, if available. If not specified, generated PVCs will be satisfied by the default storage class | *string `size ` | Size of the storage. Required if not already specified in the PVC template. Changes to this field are automatically reapplied to the created PVCs. Size cannot be decreased. - *mandatory* | string `resizeInUseVolumes` | Resize existent PVCs, defaults to true | *bool -`pvcTemplate ` | Template to be used to generate the Persistent Volume Claim | [*corev1.PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#persistentvolumeclaim-v1-core) +`pvcTemplate ` | Template to be used to generate the Persistent Volume Claim | [*corev1.PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#persistentvolumeclaim-v1-core) diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx index 814957ab9e9..4dccd20f717 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/architecture.mdx @@ -124,4 +124,3 @@ The `-app` credentials are the ones that should be used by applications connecting to the PostgreSQL cluster. The `-superuser` ones are supposed to be used only for administrative purposes. - diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx index 11e1806e562..a6288dce3c4 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/bootstrap.mdx @@ -4,11 +4,6 @@ originalFilePath: 'src/bootstrap.md' product: 'Cloud Native Operator' --- -!!! Note - When referring to "PostgreSQL cluster" in this section, the same - concepts apply to both PostgreSQL and EDB Postgres Advanced, unless - differently stated. - This section describes the options you have to create a new PostgreSQL cluster and the design rationale behind them. @@ -39,13 +34,9 @@ The `initdb` bootstrap method is used. We currently support the following bootstrap methods: -- `initdb`: initialize an empty PostgreSQL cluster -- `recovery`: create a PostgreSQL cluster by restoring from an existing backup - and replaying all the available WAL files or up to a given point in time -- `pg_basebackup`: create a PostgreSQL cluster by cloning an existing one of the - same major version using `pg_basebackup` via streaming replication protocol - - useful if you want to migrate databases to Cloud Native PostgreSQL, even - from outside Kubernetes. +- `initdb`: initialise an empty PostgreSQL cluster +- `recovery`: create a PostgreSQL cluster restoring from an existing backup + and replaying all the available WAL files. ## initdb @@ -315,256 +306,3 @@ spec: targetName: "maintenance-activity" exclusive: false ``` - -## pg_basebackup - -The `pg_basebackup` bootstrap mode lets you create a new cluster (*target*) as -an exact physical copy of an existing and **binary compatible** PostgreSQL -instance (*source*), through a valid *streaming replication* connection. -The source instance can be either a primary or a standby PostgreSQL server. - -The primary use case for this method is represented by **migrations** to Cloud Native PostgreSQL, -either from outside Kubernetes or within Kubernetes (e.g., from another operator). - -!!! Warning - The current implementation creates a *snapshot* of the origin PostgreSQL - instance when the cloning process terminates and immediately starts - the created cluster. See ["Current limitations"](#current-limitations) below for details. - -Similar to the case of the `recovery` bootstrap method, once the clone operation -completes, the operator will take ownership of the target cluster, starting from -the first instance. This includes overriding some configuration parameters, as -required by Cloud Native PostgreSQL, resetting the superuser password, creating -the `streaming_replica` user, managing the replicas, and so on. The resulting -cluster will be completely independent of the source instance. - -!!! Important - Configuring the network between the target instance and the source instance - goes beyond the scope of Cloud Native PostgreSQL documentation, as it depends - on the actual context and environment. - -The streaming replication client on the target instance, which will be -transparently managed by `pg_basebackup`, can authenticate itself on the source -instance in any of the following ways: - -1. via [username/password](#usernamepassword-authentication) -2. via [TLS client certificate](#tls-certificate-authentication) - -The latter is the recommended one if you connect to a source managed -by Cloud Native PostgreSQL or configured for TLS authentication. -The first option is, however, the most common form of authentication to a -PostgreSQL server in general, and might be the easiest way if the source -instance is on a traditional environment outside Kubernetes. -Both cases are explained below. - -### Requirements - -The following requirements apply to the `pg_basebackup` bootstrap method: - -- target and source must have the same hardware architecture -- target and source must have the same major PostgreSQL version -- source must not have any tablespace defined (see ["Current limitations"](#current-limitations) below) -- source must be configured with enough `max_wal_senders` to grant - access from the target for this one-off operation by providing at least - one *walsender* for the backup plus one for WAL streaming -- the network between source and target must be configured to enable the target - instance to connect to the PostgreSQL port on the source instance -- source must have a role with `REPLICATION LOGIN` privileges and must accept - connections from the target instance for this role in `pg_hba.conf`, preferably - via TLS (see ["About the replication user"](#about-the-replication-user) below) -- target must be able to successfully connect to the source PostgreSQL instance - using a role with `REPLICATION LOGIN` privileges - -!!! Seealso - For further information, please refer to the - ["Planning" section for Warm Standby](https://www.postgresql.org/docs/current/warm-standby.html#STANDBY-PLANNING), - the - [`pg_basebackup` page](https://www.postgresql.org/docs/current/app-pgbasebackup.html) - and the - ["High Availability, Load Balancing, and Replication" chapter](https://www.postgresql.org/docs/current/high-availability.html) - in the PostgreSQL documentation. - -### About the replication user - -As explained in the requirements section, you need to have a user -with either the `SUPERUSER` or, preferably, just the `REPLICATION` -privilege in the source instance. - -If the source database is created with Cloud Native PostgreSQL, you -can reuse the `streaming_replica` user and take advantage of client -TLS certificates authentication (which, by default, is the only allowed -connection method for `streaming_replica`). - -For all other cases, including outside Kubernetes, please verify that -you already have a user with the `REPLICATION` privilege, or create -a new one by following the instructions below. - -As `postgres` user on the source system, please run: - -```console -createuser -P --replication streaming_replica -``` - -Enter the password at the prompt and save it for later, as you -will need to add it to a secret in the target instance. - -!!! Note - Although the name is not important, we will use `streaming_replica` - for the sake of simplicity. Feel free to change it as you like, - provided you adapt the instructions in the following sections. - -### Username/Password authentication - -The first authentication method supported by Cloud Native PostgreSQL -with the `pg_basebackup` bootstrap is based on username and password matching. - -Make sure you have the following information before you start the procedure: - -- location of the source instance, identified by a hostname or an IP address - and a TCP port -- replication username (`streaming_replica` for simplicity) -- password - -You might need to add a line similar to the following to the `pg_hba.conf` -file on the source PostgreSQL instance: - -``` -# A more restrictive rule for TLS and IP of origin is recommended -host replication streaming_replica all md5 -``` - -The following manifest creates a new PostgreSQL 13.3 cluster, -called `target-db`, using the `pg_basebackup` bootstrap method -to clone an external PostgreSQL cluster defined as `source-db` -(in the `externalClusters` array). As you can see, the `source-db` -definition points to the `source-db.foo.com` host and connects as -the `streaming_replica` user, whose password is stored in the -`password` key of the `source-db-replica-user` secret. - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: target-db -spec: - instances: 3 - imageName: quay.io/enterprisedb/postgresql:13.3 - - bootstrap: - pg_basebackup: - source: source-db - - storage: - size: 1Gi - - externalClusters: - - name: source-db - connectionParameters: - host: source-db.foo.com - user: streaming_replica - password: - name: source-db-replica-user - key: password -``` - -All the requirements must be met for the clone operation to work, including -the same PostgreSQL version (in our case 13.3). - -### TLS certificate authentication - -The second authentication method supported by Cloud Native PostgreSQL -with the `pg_basebackup` bootstrap is based on TLS client certificates. -This is the recommended approach from a security standpoint. - -The following example clones an existing PostgreSQL cluster (`cluster-example`) -in the same Kubernetes cluster. - -!!! Note - This example can be easily adapted to cover an instance that resides - outside the Kubernetes cluster. - -The manifest defines a new PostgreSQL 13.3 cluster called `cluster-clone-tls`, -which is bootstrapped using the `pg_basebackup` method from the `cluster-example` -external cluster. The host is identified by the read/write service -in the same cluster, while the `streaming_replica` user is authenticated -thanks to the provided keys, certificate, and certification authority -information (respectively in the `cluster-example-replication` and -`cluster-example-ca` secrets). - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-clone-tls -spec: - instances: 3 - imageName: quay.io/enterprisedb/postgresql:13.3 - - bootstrap: - pg_basebackup: - source: cluster-example - - storage: - size: 1Gi - - externalClusters: - - name: cluster-example - connectionParameters: - host: cluster-example-rw.default.svc - user: streaming_replica - sslmode: verify-full - sslKey: - name: cluster-example-replication - key: tls.key - sslCert: - name: cluster-example-replication - key: tls.crt - sslRootCert: - name: cluster-example-ca - key: ca.crt -``` - -### Current limitations - -#### Missing tablespace support - -Cloud Native PostgreSQL does not currently include full declarative management -of PostgreSQL global objects, namely roles, databases, and tablespaces. -While roles and databases are copied from the source instance to the target -cluster, tablespaces require a capability that this version of -Cloud Native PostgreSQL is missing: definition and management of additional -persistent volumes. When dealing with base backup and tablespaces, PostgreSQL -itself requires that the exact mount points in the source instance -must also exist in the target instance, in our case, the pods in Kubernetes -that Cloud Native PostgreSQL manages. For this reason, you cannot directly -migrate in Cloud Native PostgreSQL a PostgreSQL instance that takes advantage -of tablespaces (you first need to remove them from the source or, if your -organization requires this feature, contact EDB to prioritize it). - -#### Snapshot copy - -The `pg_basebackup` method takes a snapshot of the source instance in the form of -a PostgreSQL base backup. All transactions written from the start of -the backup to the correct termination of the backup will be streamed to the target -instance using a second connection (see the `--wal-method=stream` option for -`pg_basebackup`). - -Once the backup is completed, the new instance will be started on a new timeline -and diverge from the source. -For this reason, it is advised to stop all write operations to the source database -before migrating to the target database in Kubernetes. - -!!! Important - Before you attempt a migration, you must test both the procedure - and the applications. In particular, it is fundamental that you run the migration - procedure as many times as needed to systematically measure the downtime of your - applications in production. Feel free to contact EDB for assistance. - -Future versions of Cloud Native PostgreSQL will enable users to control -PostgreSQL's continuous recovery mechanism via Write-Ahead Log (WAL) shipping -by creating a new cluster that is a replica of another PostgreSQL instance. -This will open up two main use cases: - -- replication over different Kubernetes clusters in Cloud Native PostgreSQL -- *0 cutover time* migrations to Cloud Native PostgreSQL with the `pg_basebackup` - bootstrap method diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx deleted file mode 100644 index 30169f5b0a1..00000000000 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: 'Certificates' -originalFilePath: 'src/certificates.md' -product: 'Cloud Native Operator' ---- - -Cloud Native PostgreSQL has been designed to natively support TLS certificates. -In order to set up a `Cluster`, the operator requires: - -- a server Certification Authority (CA) certificate -- a server TLS certificate signed by the server Certification Authority -- a client Certification Authority certificate -- a streaming replication client certificate generated by the client Certification Authority - -!!! Note - You can find all the secrets used by the cluster and their expiration dates - in the cluster's status. - -## Operator managed mode - -By default, the operator generates a single Certification Authority and uses it -for both client and server certificates, which are then managed and renewed -automatically. - -### Server CA Secret - -The operator generates a self-signed CA and stores it in a generic secret -containing the following keys: - -- `ca.crt`: CA certificate used to validate the server certificate, used as `sslrootcert` in clients' connection strings. -- `ca.key`: the key used to sign Server SSL certificate automatically - -### Server TLS Secret - -The operator uses the generated self-signed CA to sign a server TLS -certificate, stored in a Secret of type `kubernetes.io/tls` and configured to -be used as `ssl_cert_file` and `ssl_key_file` by the instances so that clients -can verify their identity and connect securely. - -### Server alternative DNS names - -You can specify DNS server alternative names that will be part of the -generated server TLS secret in addition to the default ones. - -## User-provided server certificate mode - -If required, you can also provide the two server certificates, generating them -using a separate component such as [cert-manager](https://cert-manager.io/). In -order to use a custom server TLS certificate for a Cluster, you must specify -the following parameters: - -- `serverTLSSecret`: the name of a Secret of type `kubernetes.io/tls`, - containing the server TLS certificate. It must contain both the standard - `tls.crt` and `tls.key` keys. -- `serverCASecret`: the name of a Secret containing the `ca.crt` key. - -!!! Note - The operator will still create and manage the two secrets related to client - certificates. - -See below for a complete example. - -### Example - -Given the following files: - -- `server-ca.crt`: the certificate of the CA that signed the server TLS certificate. -- `server.crt`: the certificate of the server TLS certificate. -- `server.key`: the private key of the server TLS certificate. - -Create a secret containing the CA certificate: - -``` -kubectl create secret generic my-postgresql-server-ca \ - --from-file=ca.crt=./server-ca.crt -``` - -Create a secret with the TLS certificate: - -``` -kubectl create secret tls my-postgresql-server \ - --cert=./server.crt --key=./server.key -``` - -Create a `Cluster` referencing those secrets: - -```bash -kubectl apply -f - < @@ -198,10 +196,9 @@ Native PostgreSQL's exporter: Similarly, the `pg_version` field of a column definition is not implemented. -## Monitoring the operator +# Monitoring the operator -The operator internally exposes [Prometheus](https://prometheus.io/) metrics -via HTTP on port 8080, named `metrics`. +The operator exposes [Prometheus](https://prometheus.io/) metrics via HTTP on port 8080, named `metrics`. Metrics can be accessed as follows: @@ -212,9 +209,9 @@ curl http://:8080/metrics Currently, the operator exposes default `kubebuilder` metrics, see [kubebuilder documentation](https://book.kubebuilder.io/reference/metrics.html) for more details. -### Prometheus Operator example +## Prometheus Operator example -The operator deployment can be monitored using the +The deployment operator can be monitored using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) by defining the following [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/v0.47.1/Documentation/api.md#podmonitor) resource: diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx index 14867b69545..d2d4c9f44fa 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx @@ -58,8 +58,7 @@ Community and published on Quay.io by EnterpriseDB. You can use any compatible image of PostgreSQL supporting the primary/standby architecture directly by setting the `imageName` attribute in the CR. The operator also supports `imagePullSecretsNames` -to access private container registries, as well as digests in addition to -tags for finer control of container image immutability. +to access private container registries. ### Labels and annotations @@ -131,11 +130,8 @@ allocated UID and SELinux context. The operator supports basic pod affinity/anti-affinity rules to deploy PostgreSQL pods on different nodes, based on the selected `topologyKey` (for example `node` or -`zone`). it supports node affinity/anti-affinity through the `nodeSelector` -configuration attribute, to be specified as [expected by Kubernetes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) -and tolerations through the `tolerations` configuration attribute, which will be added for all the pods created by the -operator related to a specific Cluster, using kubernetes [standard syntax](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). - +`zone`). Additionally, it supports node affinity through the `nodeSelector` +configuration attribute, as [expected by Kubernetes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). ### License keys diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx index df0455940ff..613db920160 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx @@ -71,17 +71,6 @@ The **default parameters for PostgreSQL 10 to 12** are: wal_keep_segments = '32' ``` -!!! Warning - It is your duty to plan for WAL segments retention in your PostgreSQL - cluster and properly configure either `wal_keep_segments` or `wal_keep_size`, - depending on the server version, based on the expected and observed workloads. - Until Cloud Native PostgreSQL supports replication slots, and if you don't have - continuous backup in place, this is the only way at the moment that protects - from the case of a standby falling out of sync and returning error messages like: - `"could not receive data from WAL stream: ERROR: requested WAL segment ************************ has already been removed"`. - This will require you to dedicate a part of your `PGDATA` to keep older - WAL segments for streaming replication purposes. - The following parameters are **fixed** and exclusively controlled by the operator: ```text @@ -93,7 +82,7 @@ hot_standby = 'true' listen_addresses = '*' port = '5432' ssl = 'on' -ssl_ca_file = '/controller/certificates/client-ca.crt' +ssl_ca_file = '/controller/certificates/ca.crt' ssl_cert_file = '/controller/certificates/server.crt' ssl_key_file = '/controller/certificates/server.key' unix_socket_directories = '/var/run/postgresql' diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx index fcd151e0b49..3918b32167c 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx @@ -181,5 +181,3 @@ spec: Never use tags like `latest` or `13` in a production environment as it might lead to unpredictable scenarios in terms of update policies and version consistency in the cluster. - For strict deterministic and repeatable deployments, you can add the digests - to the image name, through the `:@sha256:` format. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx index 114a8851414..666072a7322 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx @@ -6,55 +6,6 @@ product: 'Cloud Native Operator' History of user-visible changes for Cloud Native PostgreSQL. -## Version 1.5.0 - -**Release date:** 11 June 2021 - -Features: - -- Introduce the `pg_basebackup` bootstrap method to create a new PostgreSQL - cluster as a copy of an existing PostgreSQL instance of the same major - version, even outside Kubernetes -- Add support for Kubernetes’ tolerations in the `Affinity` section of the - `Cluster` resource, allowing users to distribute PostgreSQL instances on - Kubernetes nodes with the required taint -- Enable specification of a digest to an image name, through the - `:@sha256:` format, for more deterministic and - repeatable deployments - -Security Enhancements: - -- Customize TLS certificates to authenticate the PostgreSQL server by defining - secrets for the server certificate and the related Certification Authority - that signed it -- Raise the `sslmode` for the WAL receiver process of internal and - automatically managed streaming replicas from `require` to `verify-ca` - -Changes: - -- Enhance the `promote` subcommand of the `cnp` plugin for `kubectl` to accept - just the node number rather than the whole name of the pod -- Adopt DNS-1035 validation scheme for cluster names (from which service names - are inherited) -- Enforce streaming replication connection when cloning a standby instance or - when bootstrapping using the `pg_basebackup` method -- Integrate the `Backup` resource with `beginWal`, `endWal`, `beginLSN`, - `endLSN`, `startedAt` and `stoppedAt` regarding the physical base backup -- Documentation improvements: - - Provide a list of ports exposed by the operator and the operand container - - Introduce the `cnp-bench` helm charts and guidelines for benchmarking the - storage and PostgreSQL for database workloads -- E2E tests enhancements: - - Test Kubernetes 1.21 - - Add test for High Availability of the operator - - Add test for node draining -- Minor bug fixes, including: - - Timeout to pg_ctl start during recovery operations too short - - Operator not watching over direct events on PVCs - - Fix handling of `immediateCheckpoint` and `jobs` parameter in - `barmanObjectStore` backups - - Empty logs when recovering from a backup - ## Version 1.4.0 **Release date:** 18 May 2021 @@ -188,3 +139,4 @@ Kubernetes with the following main capabilities: - Support for synchronous replicas - Support for node affinity via `nodeSelector` property - Standard output logging of PostgreSQL error messages + diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx index 9fd4381fbe9..6257b775957 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/resource_management.mdx @@ -54,8 +54,7 @@ while creating a cluster: - Specify your required PostgreSQL memory parameters consistently with the pod resources (as you would do in a VM or physical machine scenario - see below). - Set up database server pods on a dedicated node using nodeSelector. - See the "nodeSelector" and "tolerations" fields of the - [“affinityconfiguration"](api_reference.md#affinityconfiguration) resource on the API reference page. + See the ["nodeSelector field of the affinityconfiguration resource on the API reference page"](api_reference.md#affinityconfiguration). You can refer to the following example manifest: diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml deleted file mode 100644 index 5783b0c38c1..00000000000 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# IMPORTANT: this configuration requires an appropriate line -# in the host-based access rules allowing replication connections -# to the postgres user. -# -# The following line met the requisites -# - "host replication postgres all md5" -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-clone-basicauth -spec: - instances: 3 - - bootstrap: - pg_basebackup: - source: cluster-example - - storage: - size: 1Gi - - externalClusters: - - name: cluster-example - connectionParameters: - host: cluster-example-rw.default.svc - user: postgres - password: - name: cluster-example-superuser - key: password \ No newline at end of file diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml deleted file mode 100644 index 2b509e63c7f..00000000000 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-clone-tls -spec: - instances: 3 - - bootstrap: - pg_basebackup: - source: cluster-example - - storage: - size: 1Gi - - externalClusters: - - name: cluster-example - connectionParameters: - host: cluster-example-rw.default.svc - user: streaming_replica - sslmode: verify-full - sslKey: - name: cluster-example-replication - key: tls.key - sslCert: - name: cluster-example-replication - key: tls.crt - sslRootCert: - name: cluster-example-ca - key: ca.crt diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml index ce7e649538f..71e497e2baf 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml @@ -33,7 +33,7 @@ metadata: name: cluster-example-full spec: description: "Example of cluster" - imageName: quay.io/enterprisedb/postgresql:13.3 + imageName: quay.io/enterprisedb/postgresql:13.2 # imagePullSecret is only required if the images are located in a private registry # imagePullSecrets: # - name: private_registry_access diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx index f811f77de14..c867b96398c 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx @@ -113,28 +113,13 @@ to enable/disable inbound and outbound network access at IP and TCP level. !!! Important The operator needs to communicate to each instance on TCP port 8000 - to get information about the status of the PostgreSQL server. Please - make sure you keep this in mind in case you add any network policy, - and refer to the "Exposed Ports" section below for a list of ports used by - Cloud Native PostgreSQL for finer control. + to get information about the status of the PostgreSQL server. Make sure + you keep this in mind in case you add any network policy. Network policies are beyond the scope of this document. Please refer to the ["Network policies"](https://kubernetes.io/docs/concepts/services-networking/network-policies/) section of the Kubernetes documentation for further information. -#### Exposed Ports - -Cloud Native PostgreSQL exposes ports at operator, instance manager and operand -levels, as listed in the table below: - -System | Port number | Exposing | Name | Certificates | Authentication -:--------------- | :----------- | :------------------ | :------------------ | :------------ | :-------------- -operator | 9443 | webhook server | `webhook-server` | TLS | Yes -operator | 8080 | metrics | `metrics` | no TLS | No -instance manager | 9187 | metrics | `metrics` | no TLS | No -instance manager | 8000 | status | `status` | no TLS | No -operand | 5432 | PostgreSQL instance | `postgresql` | optional TLS | Yes - ### PostgreSQL The current implementation of Cloud Native PostgreSQL automatically creates diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx index 3e422be50b8..bda8a53b6bb 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx @@ -84,7 +84,7 @@ spec: app: webtest spec: containers: - - image: quay.io/leonardoce/webtest:1.3.0 + - image: leonardoce/webtest:1.0.0 name: cert-test volumeMounts: - name: secret-volume-root-ca @@ -163,7 +163,7 @@ Output : version -------------------------------------------------------------------------------------- ------------------ -PostgreSQL 13.3 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat +PostgreSQL 13.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5), 64-bit (1 row) ``` diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx index a26ed1b545c..c3028831f95 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx @@ -35,37 +35,11 @@ guarantees higher and more predictable performance. !!! Warning Before you deploy a PostgreSQL cluster with Cloud Native PostgreSQL, - ensure that the storage you are using is recommended for database + make sure that the storage you are using is recommended for database workloads. Our advice is to clearly set performance expectations by first benchmarking the storage using tools such as [fio](https://fio.readthedocs.io/en/latest/fio_doc.html), and then the database using [pgbench](https://www.postgresql.org/docs/current/pgbench.html). -## Benchmarking Cloud Native PostgreSQL - -EDB maintains [cnp-bench](https://github.com/EnterpriseDB/cnp-bench), -an open source set of guidelines and Helm charts for benchmarking Cloud Native PostgreSQL -in a controlled Kubernetes environment, before deploying the database in production. - -Briefly, `cnp-bench` is designed to operate at two levels: - -- measuring the performance of the underlying storage using `fio`, with relevant - metrics for database workloads such as throughput for sequential reads, sequential - writes, random reads and random writes -- measuring the performance of the database using the default benchmarking tool - distributed along with PostgreSQL: `pgbench` - -!!! Important - Measuring both the storage and database performance is an activity that - must be done **before the database goes in production**. However, such results - are extremely valuable not only in the planning phase (e.g., capacity planning), - but also in the production lifecycle, especially in emergency situations - (when we don't have the luxury anymore to run this kind of tests). Databases indeed - change and evolve over time, so does the distribution of data, potentially affecting - performance: knowing the theoretical maximum throughput of sequential reads or - writes will turn out to be extremely useful in those situations. Especially in - shared-nothing contexts, where results do not vary due to the influence of external workloads. - **Know your system, benchmark it.** - ## Persistent Volume Claim The operator creates a persistent volume claim (PVC) for each PostgreSQL @@ -103,11 +77,6 @@ spec: size: 1Gi ``` -!!! Important - Cloud Native PostgreSQL has been designed to be storage class agnostic. - As usual, our recommendation is to properly benchmark the storage class - in a controlled environment, before hitting production. - ## Configuration via a PVC template To further customize the generated PVCs, you can provide a PVC template inside the Custom Resource, diff --git a/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx b/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx index b9d1f93f494..f943440f93b 100644 --- a/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx +++ b/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx @@ -65,7 +65,7 @@ You will see one node called `minikube`. If the status isn't yet "Ready", wait f Now that the Minikube cluster is running, you can proceed with Cloud Native PostgreSQL installation as described in the ["Installation"](installation_upgrade.md) section: ```shell -kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.5.0.yaml +kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.4.0.yaml __OUTPUT__ namespace/postgresql-operator-system created customresourcedefinition.apiextensions.k8s.io/backups.postgresql.k8s.enterprisedb.io created @@ -245,7 +245,7 @@ curl -sSfL \ sudo sh -s -- -b /usr/local/bin __OUTPUT__ EnterpriseDB/kubectl-cnp info checking GitHub for latest tag -EnterpriseDB/kubectl-cnp info found version: 1.5.0 for v1.5.0/linux/x86_64 +EnterpriseDB/kubectl-cnp info found version: 1.4.0 for v1.4.0/linux/x86_64 EnterpriseDB/kubectl-cnp info installed /usr/local/bin/kubectl-cnp ``` diff --git a/product_docs/docs/epas/12/epas_compat_ora_dev_guide/index.mdx b/product_docs/docs/epas/12/epas_compat_ora_dev_guide/index.mdx index bc8068886c5..565ac395002 100644 --- a/product_docs/docs/epas/12/epas_compat_ora_dev_guide/index.mdx +++ b/product_docs/docs/epas/12/epas_compat_ora_dev_guide/index.mdx @@ -1,6 +1,6 @@ --- navTitle: User Guide -title: "Database Compatibility for Oracle Developers Guide" +title: "Database Compatibility for Oracle Developer's Guide" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/13/index.html" diff --git a/product_docs/docs/epas/12/epas_compat_tools_guide/02_edb_loader.mdx b/product_docs/docs/epas/12/epas_compat_tools_guide/02_edb_loader.mdx index d47d62c7dd1..8014fa788f5 100644 --- a/product_docs/docs/epas/12/epas_compat_tools_guide/02_edb_loader.mdx +++ b/product_docs/docs/epas/12/epas_compat_tools_guide/02_edb_loader.mdx @@ -24,7 +24,9 @@ These features are explained in detail in the following sections. !!! Note The following are important version compatibility restrictions between the EDB\*Loader client and the database server. -When you invoke the EDB\*Loader program (called `edbldr`), you pass in parameters and directive information to the database server. **We strongly recommend that the version 12 EDB\*Loader client (the edbldr program supplied with Advanced Server 12) be used to load data only into version 12 of the database server. In general, the EDB\*Loader client and database server should be the same version.** +- When you invoke the EDB\*Loader program (called `edbldr`), you pass in parameters and directive information to the database server. **We strongly recommend that the version 12 EDB\*Loader client (the edbldr program supplied with Advanced Server 12) be used to load data only into version 12 of the database server. In general, the EDB\*Loader client and database server should be the same version.** + +- Use of a version 12, 11, 10, or 9.6 EDB\*Loader client is not supported for Advanced Server with version 9.2 or earlier. diff --git a/product_docs/docs/epas/12/epas_compat_tools_guide/index.mdx b/product_docs/docs/epas/12/epas_compat_tools_guide/index.mdx index 06f493cd0e4..42126bf0504 100644 --- a/product_docs/docs/epas/12/epas_compat_tools_guide/index.mdx +++ b/product_docs/docs/epas/12/epas_compat_tools_guide/index.mdx @@ -1,6 +1,6 @@ --- navTitle: Tools and Utilities Guide -title: "Database Compatibility for Oracle Developers Tools and Utilities Guide" +title: "Database Compatibility for Oracle Developer’s Tools and Utilities Guide" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/13/index.html" diff --git a/product_docs/docs/epas/12/epas_rel_notes/index.mdx b/product_docs/docs/epas/12/epas_rel_notes/index.mdx index f289efea3d8..b1688971515 100644 --- a/product_docs/docs/epas/12/epas_rel_notes/index.mdx +++ b/product_docs/docs/epas/12/epas_rel_notes/index.mdx @@ -1,6 +1,6 @@ --- navTitle: Release Notes -title: "EDB Postgres Advanced Server Release Notes" +title: "EDB Postgres Advanced Server 12 Release Notes" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. diff --git a/product_docs/docs/epas/13/edb_pgadmin_linux_qs/index.mdx b/product_docs/docs/epas/13/edb_pgadmin_linux_qs/index.mdx index 890ce29fbef..3f65778bdba 100644 --- a/product_docs/docs/epas/13/edb_pgadmin_linux_qs/index.mdx +++ b/product_docs/docs/epas/13/edb_pgadmin_linux_qs/index.mdx @@ -1,5 +1,5 @@ --- -title: "EDB pgAdmin4 Quickstart Linux Guide" +title: "EDB pgAdmin4 Quickstart Linux Guide for EPAS" legacyRedirects: - "/edb-docs/d/pgadmin-4/quick-start/quick-start-guide/4.26/index.html" --- diff --git a/product_docs/docs/epas/13/epas_compat_ora_dev_guide/index.mdx b/product_docs/docs/epas/13/epas_compat_ora_dev_guide/index.mdx index 0911a75f66c..d340bafe379 100644 --- a/product_docs/docs/epas/13/epas_compat_ora_dev_guide/index.mdx +++ b/product_docs/docs/epas/13/epas_compat_ora_dev_guide/index.mdx @@ -1,6 +1,6 @@ --- navTitle: User Guide -title: "Database Compatibility for Oracle Developers Guide" +title: "Database Compatibility for Oracle Developer's Guide" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/13/index.html" diff --git a/product_docs/docs/epas/13/epas_compat_tools_guide/02_edb_loader.mdx b/product_docs/docs/epas/13/epas_compat_tools_guide/02_edb_loader.mdx index 31b6f3cfc58..ba46c1c30da 100644 --- a/product_docs/docs/epas/13/epas_compat_tools_guide/02_edb_loader.mdx +++ b/product_docs/docs/epas/13/epas_compat_tools_guide/02_edb_loader.mdx @@ -34,6 +34,8 @@ These features are explained in detail in the following sections. psycopg2 copy_from ``` +- Use of a version 13, 12, 11, 10, or 9.6 EDB\*Loader client is not supported for Advanced Server with version 9.2 or earlier. + ## Data Loading Methods diff --git a/product_docs/docs/epas/13/epas_compat_tools_guide/index.mdx b/product_docs/docs/epas/13/epas_compat_tools_guide/index.mdx index b0b83983c5a..feb3a2d897f 100644 --- a/product_docs/docs/epas/13/epas_compat_tools_guide/index.mdx +++ b/product_docs/docs/epas/13/epas_compat_tools_guide/index.mdx @@ -1,6 +1,6 @@ --- navTitle: Tools and Utilities Guide -title: "Database Compatibility for Oracle Developers Tools and Utilities Guide" +title: "Database Compatibility for Oracle Developer’s Tools and Utilities Guide" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-tools-and-utilities-guide/13/index.html" diff --git a/product_docs/docs/epas/13/epas_qs_windows/index.mdx b/product_docs/docs/epas/13/epas_qs_windows/index.mdx index 7bc8729be2e..9b64d41c4d8 100644 --- a/product_docs/docs/epas/13/epas_qs_windows/index.mdx +++ b/product_docs/docs/epas/13/epas_qs_windows/index.mdx @@ -30,6 +30,8 @@ Among the components that make up an Advanced Server deployment are: **Supporting Functions, Procedures, Data Types, Index Types, Operators, Utilities, and Aggregates** - Advanced Server includes a number of features that help you manage your data. +Please note: The `data` directory of a production database should not be stored on an NFS file system. + **Installation Prerequisites** **User Privileges** diff --git a/product_docs/docs/epas/13/epas_rel_notes/index.mdx b/product_docs/docs/epas/13/epas_rel_notes/index.mdx index d68086306a3..254f0438167 100644 --- a/product_docs/docs/epas/13/epas_rel_notes/index.mdx +++ b/product_docs/docs/epas/13/epas_rel_notes/index.mdx @@ -1,6 +1,6 @@ --- navTitle: Release Notes -title: "EDB Postgres Advanced Server Release Notes" +title: "EDB Postgres Advanced Server 13 Release Notes" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. diff --git a/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx b/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx index afaf5d7b204..4c9969fe4e6 100644 --- a/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx +++ b/product_docs/docs/hadoop_data_adapter/2.0.7/02_requirements_overview.mdx @@ -14,7 +14,7 @@ The Hadoop Foreign Data Wrapper is supported on the following platforms: > - RHEL 8.x and 7.x > - CentOS 8.x and 7.x -> - OL 8.x and 7.x +> - OEL 8.x and 7.x > - Ubuntu 20.04 and 18.04 LTS > - Debian 10.x and 9.x diff --git a/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx b/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx index 6078203da1c..810cc135c95 100644 --- a/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx +++ b/product_docs/docs/mongo_data_adapter/5.2.8/02_requirements_overview.mdx @@ -14,7 +14,7 @@ The MongoDB Foreign Data Wrapper is supported on the following platforms: > - RHEL 8.x/7.x > - CentOS 8.x/7.x -> - OL 8.x/7.x +> - OEL 8.x/7.x > - Ubuntu 20.04/18.04 LTS > - Debian 10.x/9.x diff --git a/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx b/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx index 298bdbc1073..3b8ed138c80 100644 --- a/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx +++ b/product_docs/docs/mysql_data_adapter/2.5.5/02_requirements_overview.mdx @@ -14,7 +14,7 @@ The MySQL Foreign Data Wrapper is supported on the following platforms: > - RHEL 8.x/7.x > - CentOS 8.x/7.x -> - OL 8.x/7.x +> - OEL 8.x/7.x > - Ubuntu 20.04/18.04 LTS > - Debian 10.x/9.x diff --git a/product_docs/docs/mysql_data_adapter/2.6.0/02_requirements_overview.mdx b/product_docs/docs/mysql_data_adapter/2.6.0/02_requirements_overview.mdx index 76e502543e2..c5d8ce0b2f9 100644 --- a/product_docs/docs/mysql_data_adapter/2.6.0/02_requirements_overview.mdx +++ b/product_docs/docs/mysql_data_adapter/2.6.0/02_requirements_overview.mdx @@ -14,7 +14,7 @@ The MySQL Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9. - RHEL 8.x/7.x - CentOS 8.x/7.x -- OL 8.x/7.x +- OEL 8.x/7.x - Ubuntu 20.04/18.04 LTS - Debian 10.x/9.x @@ -24,7 +24,7 @@ The MySQL Foreign Data Wrapper is certified with EDB Postgres Advanced Server 9. - RHEL 7.x - CentOS 7.x -- OL 7.x +- OEL 7.x - Ubuntu 18.04 LTS - Debian 10.x/9.x