diff --git a/.github/workflows/deploy-develop.yml b/.github/workflows/deploy-develop.yml
index 803a9d07ac0..1986c4459a7 100644
--- a/.github/workflows/deploy-develop.yml
+++ b/.github/workflows/deploy-develop.yml
@@ -42,6 +42,16 @@ jobs:
run: |
npm rebuild
+ - name: Checking Gatsby cache
+ id: gatsby-cache-build
+ uses: actions/cache@v3
+ with:
+ path: |
+ public/*
+ !public/pdfs
+ .cache
+ key: ${{ runner.os }}-gatsby-build-develop
+
- uses: actions/setup-python@v4
with:
python-version: "3.x"
diff --git a/.github/workflows/deploy-draft.yml b/.github/workflows/deploy-draft.yml
index 5e499e6a8b3..a343c3137ac 100644
--- a/.github/workflows/deploy-draft.yml
+++ b/.github/workflows/deploy-draft.yml
@@ -56,9 +56,12 @@ jobs:
uses: actions/cache@v3
with:
path: |
- public
+ public/*
+ !public/pdfs
.cache
key: ${{ runner.os }}-gatsby-build-draft-${{ github.head_ref }}
+ restore-keys: |
+ ${{ runner.os }}-gatsby-build-develop
- name: Fix mtimes
run: npm run fix-mtimes
diff --git a/.husky/post-checkout b/.husky/post-checkout
index c37815e2b56..ca7fcb40088 100755
--- a/.husky/post-checkout
+++ b/.husky/post-checkout
@@ -1,3 +1,3 @@
#!/bin/sh
-command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting '.git/hooks/post-checkout'.\n"; exit 2; }
+command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; }
git lfs post-checkout "$@"
diff --git a/.husky/post-commit b/.husky/post-commit
index e5230c305f9..52b339cb3f4 100755
--- a/.husky/post-commit
+++ b/.husky/post-commit
@@ -1,3 +1,3 @@
#!/bin/sh
-command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting '.git/hooks/post-commit'.\n"; exit 2; }
+command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; }
git lfs post-commit "$@"
diff --git a/.husky/post-merge b/.husky/post-merge
index c99b752a527..a912e667aa3 100755
--- a/.husky/post-merge
+++ b/.husky/post-merge
@@ -1,3 +1,3 @@
#!/bin/sh
-command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting '.git/hooks/post-merge'.\n"; exit 2; }
+command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; }
git lfs post-merge "$@"
diff --git a/.husky/pre-push b/.husky/pre-push
index 216e91527e6..0f0089bc25d 100755
--- a/.husky/pre-push
+++ b/.husky/pre-push
@@ -1,3 +1,3 @@
#!/bin/sh
-command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting '.git/hooks/pre-push'.\n"; exit 2; }
+command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\n"; exit 2; }
git lfs pre-push "$@"
diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 00000000000..907737c5090
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1,5 @@
+# First pass basic codeowners file
+
+product_docs/docs/pgd/ @djw-m
+product_docs/docs/epas/ @nidhibhammar
+product_docs/docs/biganimal/ @drothery-edb
diff --git a/README.md b/README.md
index 569ae835259..047b5a9ca1d 100644
--- a/README.md
+++ b/README.md
@@ -45,7 +45,7 @@ At this point you have a couple options.
1. Run `npm run start`. The application will start in the background and take a few minutes to load.
-1. You can view logs and monitor the startup process by running `npm run logs`. Once it's finished it can be accessed at `http://localhost:8000/`.
+1. You can view logs and monitor the startup process by running `npm run logs`. Once it's finished it can be accessed at [http://localhost:8000/](http://localhost:8000).
#### Additional Commands and Options for the Docker Environment
@@ -63,7 +63,7 @@ At this point you have a couple options.
If you find that the container crashes frequently or see that your container has exited with code 137, increasing the Docker memory should help. Allocating at least 4GB is recommended.
-1. open Docker Desktop
+1. Open Docker Desktop
1. Go to Preferences (gear icon in the top right corner)
@@ -283,7 +283,7 @@ If you need to setup a redirect from Docs 1.0 to Docs 2.0 manually, this is the
# MDX Format
-Documentation must be formatted as an [MDX file](https://www.gatsbyjs.com/docs/mdx/writing-pages/) with the `.mdx` extension. MDX is a superset of [Markdown](https://www.markdownguide.org/).
+Documentation must be formatted as an [MDX file](https://www.gatsbyjs.com/docs/how-to/routing/mdx/) with the `.mdx` extension. MDX is a superset of [Markdown](https://www.markdownguide.org/). See [What is MDX?](https://mdxjs.com/docs/what-is-mdx/) for a detailed explanation of the format.
## Frontmatter
@@ -367,18 +367,18 @@ Content is indexed for search when the production site builds.
To contribute content to this site submit as a pull request (PR). There are two options for this:
-Option 1: locally
+Option 1: Making changes locally
1. [Clone](https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/cloning-a-repository) this repository.
2. [Make a new branch.](https://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging)
3. Add commits to branch and [push to GitHub](https://git-scm.com/book/en/v2/Git-Branching-Remote-Branches).
4. Create a new PR on GitHub.
-Option 2: on GitHub
+Option 2: Editing files on GitHub
1. Edit a file on GitHub.
2. Submit changes as a PR on a new branch.
### Style Guide for EDB contributors
-See [EDB documentation style guide](https://enterprisedb.atlassian.net/wiki/spaces/DCBC/pages/2387870239/Documentation+Style+Guide).
+See [EDB documentation style guide](https://www.enterprisedb.com/docs/community/contributing/styleguide/).
diff --git a/advocacy_docs/community/contributing/index.mdx b/advocacy_docs/community/contributing/index.mdx
index 7c09679cf3e..1144730282a 100644
--- a/advocacy_docs/community/contributing/index.mdx
+++ b/advocacy_docs/community/contributing/index.mdx
@@ -1,25 +1,27 @@
---
-title: We love feedback!
-navTitle: Feedback
+title: Contributing Feedback and More
+navTitle: Contributing
description: Guidelines for offering feedback to EDB Docs.
iconName: IdeaSharing
---
All feedback is welcome:
-1. an idea for something new
-2. you feel there's something missing
-3. a mistake
+Is it a simple edit for a typo? Use the **Suggest Edits** button and you can make a change to the docs which we'll review before including in the next docs update.
-Github Issues is the fastest way to get that feedback in front of us.
+If, on the other hand you:
-[Open an issue on Github →](https://github.com/rocketinsights/edb_docs_advocacy/issues)
+1. have an idea for something new
+2. you feel there's something missing
+3. you can see a mistake thats more than a typo
----
+Then Github Issues is the fastest way to get that feedback in front of us.
-### Have code or content to contribute?
+[Open an issue on Github →](https://github.com/EnterpriseDB/docs/issues)
-The only things we love more than feedback are pull requests. Check out [the README on Github](https://github.com/EnterpriseDB/docs) to get started.
+### Have code or content to contribute?
+The only things we love more than feedback are pull requests.
+Check out [the README on Github](https://github.com/EnterpriseDB/docs) to get started.
diff --git a/advocacy_docs/community/contributing/repo.mdx b/advocacy_docs/community/contributing/repo.mdx
index 879e228bd52..36c5d4ad8be 100644
--- a/advocacy_docs/community/contributing/repo.mdx
+++ b/advocacy_docs/community/contributing/repo.mdx
@@ -88,4 +88,4 @@ You'll make edits and additions via your IDE (VS Code). We recommend using [Gith
## Further reading
-- [Format & Style](/community/contribute/style)
+- [Format & Style](/community/contributing/styleguide)
diff --git a/advocacy_docs/community/contributing/styleguide.mdx b/advocacy_docs/community/contributing/styleguide.mdx
new file mode 100644
index 00000000000..6ea7ba2c3d9
--- /dev/null
+++ b/advocacy_docs/community/contributing/styleguide.mdx
@@ -0,0 +1,530 @@
+---
+title: Documentation Style Guide
+navTitle: Documentation Style Guide
+iconName: Docs
+description: Our style guide to help you get your writing right
+indexdepth: 3
+rootisheading: true
+deepToC: true
+---
+
+
+## Introduction
+
+EDB docs follow the 5 Cs of technical writing:
+
+* Clear
+
+* Correct
+
+* Concise
+
+* Complete
+
+* Consistent
+
+
+Follow these guidelines to ensure consistency.
+
+Included in this guide:
+
+* 1 [Language and tone](#language-and-tone)
+ * 1.1 [Tense and voice](#tense-and-voice)
+ * 1.2 [Person](#person)
+ * 1.3 [Sentence length](#sentence-length)
+ * 1.4 [Contractions](#contractions)
+ * 1.5 [Latin abbreviations](#latin-abbreviations)
+ * 1.6 [Em-dashes and en-dashes](#em-dashes-and-en-dashes)
+ * 1.7 [Numbers](#numbers)
+* 2 [Capitalization and punctuation](#capitalization-and-punctuation)
+* 3 [Topic structure](#topic-structure)
+* 4 [Verbiage](#verbiage)
+ * 4.1 [Future and conditional tenses](#future-and-conditional-tenses)
+ * 4.2 [Empty phrases](#empty-phrases)
+ * 4.3 [Weak sentence starters](#weak-sentence-starters)
+ * 4.4 [“This” without a noun](#this-without-a-noun)
+ * 4.5 [Misplaced modifiers](#misplaced-modifiers)
+ * 4.6 [Hyphen use](#hyphen-use)
+ * 4.6.1 [With a prefix](#with-a-prefix)
+ * 4.6.2 [With compound adjectives](#with-compound-adjectives)
+ * 4.7 [Directing users up and down through a topic](#directing-users-up-and-down-through-a-topic)
+ * 4.8 [Select versus click](#select-versus-click)
+* 5 [Common errors/words to avoid](#common-errors/words-to-avoid)
+ * 5.1 [Login and log in](#login-and-log-in)
+ * 5.2 [Setup and set up](#setup-and-set-up)
+ * 5.3 [Words to avoid](#words-to-avoid)
+* 6 [Headings](#headings)
+* 7 [Font treatments](#font-treatments)
+ * 7.1 [Bold (\*\*text\*\*)](#bold-text))
+ * 7.2 [Courier aka code or monospace ('text')](#courier-aka-code-or-monospace-text)
+ * 7.3 [Italics (*text*)](#italics-text)
+ * 7.4 [Underline](#underline)
+* 8 [Links](#links)
+* 9 [Admonitions: notes, tips, and warnings](#admonitions:-notes,-tips,-and-warnings)
+ * 9.1 [Notes](#notes)
+ * 9.2 [Tips](#tips)
+ * 9.3 [Warnings](#warnings)
+ * 9.4 [Code](#inlinextensionCode)
+ * 9.4.1 [Inline code](#inline-code)
+ * 9.4.2 [Code blocks](#code-blocks)
+* 10 [Tables](#tables)
+ * 10.1 [Markdown](#markdown)
+* 11 [Lists](#lists)
+* 12 [Images](#images)
+* 13 [Dates](#dates)
+
+## Language and tone
+
+EDB docs are helpful, humble, positive, and friendly. To achieve this, write topics that are factual and free of hyperbole and wordiness.
+
+Where possible, use active voice instead of passive.
+
+### Tense and voice
+
+For reference and general task-based docs, use the second-person [imperative present tense](https://en.wikipedia.org/wiki/Imperative_mood), also known as "imperative mood." These docs should be straightforward and conventional.
+
+**Example:** Use the following command to create a user:
+
+`CREATE USER john IDENTIFIED BY abc;`
+
+For tutorials, the docs can be more casual and conversational but must also be straightforward and clear.
+
+**Example:** In this lab, start with a fresh cluster. Make sure to stop and clean up the cluster from the previous labs.
+
+### Person
+
+Use second person (you) when referring to the user. Don’t use “the user,” which is third person, unless you are talking about the customer’s user.
+
+Use first person plural (we) to refer to EDB. For example, use:
+
+We recommend that you restart your server.
+
+Instead of
+
+EDB recommends that you restart your server.
+
+However, don’t use first person plural when talking about how the software works or in an example. For example:
+
+Instead of:
+
+Next, we process the instruction.
+
+Use:
+
+Next, Barman processes the instruction.
+
+Instead of:
+
+Next, we enter the following the information:
+
+Use:
+
+Enter the following information:
+
+### Sentence length
+
+Use simple and direct language and keep your sentences short. Avoid combining sentences, which makes the content complicated. Maximum sentence length is 26 words when possible.
+
+### Contractions
+
+In keeping with the casual and friendly tone, use contractions. However, use common contractions (isn’t, can’t, don’t). Don't use contractions that are unclear or difficult to pronounce (there’ll).
+
+### Latin abbreviations
+
+Don’t use the Latin abbreviations i.e. and e.g. Use “that is” and “for example” instead.
+
+### Em-dashes and en-dashes
+
+Avoid using em-dashes to set off phrases within a sentence, which creates a complicated sentence structure and can be difficult to translate. You can use em-dashes for definition lists such as:
+
+* Autonomous — Use to create autonomous calls to the server.
+
+
+Use spaces around em-dashes in a definition list. Otherwise, don't put spaces around em-dashes.
+
+To create an em-dash, use the character entity —.
+
+Use en-dashes to mean “through,” for example, items 1–10. Don’t use en-dashes otherwise. (There's only one other use of en-dashes that doesn’t typically come up in technical writing.). To create an en-dash, use the character entity –.
+
+### Numbers
+
+Spell out numbers zero through nine. Use digits for numbers 10 and greater. Spell out any number that starts a sentence. For this reason, avoid starting a sentence with a long or complex number.
+
+Capitalization and punctuation
+------------------------------
+
+Capitalization rules:
+
+* Use sentence-case for headings (including column headings in tables).
+
+* Capitalize the first letter in each list item except for function and command names that are naturally lower case
+
+* Capitalize link labels to match the case of the topic you're linking to.
+
+* Capitalize proper nouns and match the case of UI features:
+
+ **Examples:** EDB, the Overview dashboard, the SQL Queries graph
+
+* Don’t capitalize the words that make up an initialization unless they're part of proper noun. For example, single sign-on is not a proper noun even though it is usually written as the initialism SSO.
+
+
+Punctuation rules:
+
+* Avoid semicolons. Instead, use two sentences.
+
+* Don’t join related sentences using a comma. This syntax is incorrect.
+
+* Don't end headings with a period or colon
+
+* Use periods at the end of list items that are a sentence or that complete a sentence. If one item in a list uses a period, use a period for all the items in that list.
+
+* Use the [Oxford (a.k.a. serial) comma](https://en.wikipedia.org/wiki/Serial_comma).
+
+
+Topic structure
+---------------
+
+* Procedure headings use gerunds, for example, _Modifying your cluster_.
+
+* Use a stem sentence to introduce a procedure only if multiple paragraphs of text fall between the head and the start of the procedure. The stem sentence helps to reorient the user when the heading might have scrolled of the screen. A stem sentence starts with “To” and ends with a colon:
+ To modify your cluster:
+
+* In general, include text between a heading and any subheadings. However, if such text is superfluous, a subhead can directly follow the head.
+
+
+See also [Headings](https://enterprisedb.atlassian.net/wiki/spaces/DCBC/pages/2387870239/Documentation+Style+Guide#Headings).
+
+## Verbiage
+
+Use language that's precise and informative.
+
+### Future and conditional tenses
+
+Avoid future tense (will) and conditional tenses (would, could, should). These tenses lack precision and can create passive voice. You can use future tense when an action occurs in the future, for example, “This feature will be removed in a future release.”
+
+### Empty phrases
+
+Phrases like, “This section tells you about how to \[do something\]” are empty and don’t impart any real information. The title of the chapter or section tells you what the section is about. These phrases describe the documentation (documenting the documentation) rather than the product or user actions.
+
+Replace these empty phrases with wording that focuses on the product or process. So instead of:
+
+This chapter is divided into five sections. Each section tells you about part of the process.
+
+Write:
+
+To complete the process, perform these five steps:
+
+You can then link to each of the sections.
+
+### Weak sentence starters
+
+“There is” and “there are” are [weak sentence starters](http://www2.ensc.sfu.ca/~whitmore/courses/style/templates/sentence.html). Avoid starting sentences this way.
+
+### “This” without a noun
+
+Avoid using “this” without a noun following. Doing so can lead to ambiguity. For example, instead of:
+
+This happens when…
+
+Write:
+
+This error happens when…
+
+### Misplaced modifiers
+
+Make sure the word “only” precedes the word or expression you mean to modify. For example, instead of:
+
+This condition only happens after you select **Okay**.
+
+Write:
+
+This condition happens only after you select **Okay**.
+
+### Hyphen use
+
+#### With a prefix
+
+Don't use hyphens with prefixes such as re, non, multi, and pre unless needed for readability or to eliminate ambiguity. Often, when two vowels end up up together, a hyphen is needed, for example, multi-instance. However, preexisting is a legitimate word; don’t hyphenate it. Re-create (create again) requires a hyphen to avoid confusion with recreate (play). You can check many words using a spell checker. For example, nonexistent is not flagged by the spell checker.
+
+If you're unsure whether to include a hyphen, check with your editor or google the word without the hyphen
+
+#### With compound adjectives
+
+A compound adjective is formed when two words together describe a noun, for example, _red-bellied warbler_. Don’t use a hyphen when and adverb and a verb together describe a noun. The adverb describes the verb and doesn’t need a hyphen to create the relationship between the words. An example is _finely tuned settings_.
+
+### Directing users up and down through a topic
+
+Don’t use words like “below” and “above” to refer to previous and following sections. Link to the section instead.
+
+It also isn't necessary to use the words “the following” to refer to list items. These words are empty. So, for example, instead of:
+
+The palette includes the following colors:
+
+Write:
+
+The color palette includes:
+
+### Select versus click
+
+House style is to use “select” instead of “click” to allow for mobile-device use.
+
+Common errors/words to avoid
+----------------------------
+
+Avoid these common errors and wording issues.
+
+### Login and log in
+
+The verb form is “log in”:
+
+To log in to the system…
+
+The adjective form is “login”:
+
+At the login screen, enter your username.
+
+### Setup and set up
+
+The verb form is “set up”:
+
+To set up your environment…
+
+The noun form is “setup”:
+
+Check your setup for errors.
+
+### Words to avoid
+
+Don't use:
+
+* Please
+
+* Note that
+
+* In order to (just use “to”)
+
+
+## Headings
+
+Use headings to create a hierarchy for readers to navigate to more easily find information.
+
+In Markdown, headings are denoted by number signs (`#`) followed by one space. Enter a line break between a heading and its content. EDB docs use Heading 2 (`##`), Heading 3 (`###`) and Heading 4 (`####`). Use Heading 4 sparingly.
+
+Heading 1 is reserved for page titles. You can denote anything below Heading 4 using bold text or other layout options. (Consider redesigning the material.)
+
+Examples:
+
+* `## This is heading 2`
+
+* `### This is heading 3`
+
+* `## Step 2. This is a step in a tutorial`
+
+
+## Font treatments
+
+Don’t use any font treatments for:
+
+* Roles
+
+* User names (e.g. edb_admin)
+
+* Permissions
+
+* Window or dialog box names
+
+
+### Bold (\*\*text\*\*)
+
+Use for UI elements. For menu items, include a greater-than sign: Select **File > Save**.
+
+### Courier aka code or monospace (`'text'`)
+
+Use for text entered in text boxes, parameters, commands, text in configuration files, and file paths. Don’t use for utility names.
+
+If you need to enter a `value in a field`, type the `ls` or `dd` command, add a setting to a `configuration=file` or just refer to `/etc/passwd`, then this is the font treatment to use.
+
+See [Code](#code) for more information.
+
+### Italics (*_text_*)
+
+Use for book titles and first instance of terms. Do not use Italics for keywords.
+
+### Underline
+
+Do not use underlined text in EDB docs.
+
+## Links
+
+Whenever an EDB feature is referenced, provide a link to the relevant documentation. For example, “BDR (Bi-Directional Replication) dashboards and probes to monitor status and activities for Admin, Nodes, and Groups. See [Monitoring BDR Nodes](http://localhost:8000/pem/latest/pem_ent_feat/17_monitoring_BDR_nodes/).”
+
+Avoid using the URL as the label. For example,
+
+**Best practice:** For information about the platforms and versions supported by PEM, see [Product Compatibility](https://www.enterprisedb.com/services-support/edb-supported-products-and-platforms) on the EnterpriseDB website**.**
+
+**Avoid:** For information about the platforms and versions supported by PEM, visit the EnterpriseDB website at: [https://www.enterprisedb.com/services-support/edb-supported-products-and-platforms](https://www.enterprisedb.com/services-support/edb-supported-products-and-platforms).
+
+You can also provide links to external resources, but only if the resource is vetted and no EDB documentation covers the topic. For example: “Information about managing authentication is also available in the [Postgres core documentation](https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html).”
+
+If you're referring to a guide on Docs 2.0, the label is the name of the guide and in italics. For example, “For information about modifying the `pg_hba.conf` file, see the [_PEM Administrator's Guide_](https://www.enterprisedb.com/docs/pem/latest/pem_admin/).”
+
+Link capitalization can be either title- or sentence-case:
+
+* **Use title-case** and _italics_ when referring to the linked doc by name. For example. “For information about modifying the `pg_hba.conf` file, see the [_PEM Administrator's Guide_](https://www.enterprisedb.com/docs/pem/latest/pem_admin/).”).
+
+* **Use sentence-case** when linking in the middle of a sentence. For example, “\[…\] follow the identifier rules when creating \[…\]“).
+
+
+Addresses are relative. In these examples of links to topics, “folder” means the folder in the repo such as the product folder or the guide folder. For the destination topic, use the name of the file without the .mdx extension. If the destination includes a topic\_identifier (sub-section of a file), include the topic\_identifier prefixed with a # sign, such as in “/09\_controlling\_logging/#enabling_syslog.”
+
+| | | | | |
+| --- | --- | --- | --- | --- |
+| **Link type** | **Syntax** | **Example** | **Source path** | **Destination path** |
+
+| | | | | |
+| --- | --- | --- | --- | --- |
+| **Link type** | **Syntax** | **Example** | **Source path** | **Destination path** |
+| Another topic in the same folder | `[here](file_name)` | `[Using the EFM Utility](07_using_efm_utility/#using_efm_utility)` | /efm/4.2/efm\_user/07\_using_efm.mdx | /efm/4.2/efm\_user/07\_using\_efm\_utility.mdx |
+| Another topic in a different folder at the same level | `[here](../dest_folder_name/file_name)` | \[The ERD Tool\](../pem\_ent\_feat/04\_pem\_erd_tool/) | /pem/8/pem\_rel\_notes/08\_810\_rel_notes.mdx | /pem/8/pem\_ent\_feat04\_pem\_erd_tool/ |
+| Another topic in a different folder at a different level | `[here](../../folder_name/file_name`) | \[Enabling syslog Log File Entries\](../../09\_controlling\_logging/#enabling_syslog) | /efm/4.2/efm\_user/04\_configuring\_efm/01\_cluster_properties/index.mdx | /efm/4.2/efm\_user/09\_controlling\_logging.mdx/enabling\_syslog |
+
+* To link to a specific heading on another page, use the name of the file plus the heading.
+
+ **Example:** `[xyz](file_name#heading-on-page)`
+
+* To link to a specific heading on the current page, use just the heading.
+
+ **Example:** `[xyz](#heading-on-page)`
+
+* To link to a specific location on a page that isn't a heading (for example, a specific command-line flag in a table), add a manual anchor and use the `id` parameter:
+
+ **Example:**
+
+ ``# Anchor: `--max-offset` # Link: [--max-offset](#flags-max-offset)``
+
+
+## Admonitions: notes, tips, and warnings
+
+Our docs currently use notes, tips, and warnings.
+
+See [https://github.com/EnterpriseDB/docs/blob/develop/README.md](https://github.com/EnterpriseDB/docs/blob/develop/README.md) for more information on admonitions.
+
+For multiple, consecutive admonitions, use separate admonitions. If there are more than two consecutive admonitions, consider adding a subsection called **Additional notes** or **Additional information**. Admonitions can contain bullets and code, but consider instead adding a subsection (or whether an admonition is the appropriate mechanism) to keep the formatting simple.
+
+### Notes
+
+Use notes to call attention to a piece of clarifying information. This information isn't crucial to accomplishing the task in the document.
+
+For example, you might use a note to let users know that the `DELETE` command deletes only rows and that to delete columns you must use `ALTER TABLE`. This information helps to clarify the command's purpose and point users to the right place.
+
+### Tips
+
+Use for indicating a new version added in a particular release when working in the major version of the documentation for products that are adhering to semantic versioning. See the PEM documentation for examples. Also can use for information that might improve productivity.
+
+### Warnings
+
+Use warning to express that a piece of information is critical to preventing unexpected things from happening.
+
+For example, you might include a warning that using `CASCADE` in `DROP INDEX` drops dependent objects without warning. This is critical to prevent users from unexpectedly losing constraints or additional indexes.
+
+## Code
+
+Code can be shown inline or as a code block.
+
+### Inline code
+
+Inline `code` has `back-ticks (``) around` it and is used when referring to code, commands, or other technical syntax within a sentence.
+
+Example: The `CREATE TABLE` statement creates a new table in a database.
+
+### Code blocks
+
+Code blocks provide executable code samples, marked with an opening and closing set of three backticks (` ``` `). Code blocks can support syntax highlighting if you add the language name immediately after the first line of backticks. Use one returned line before and after a code block for better Markdown readability. For example:
+
+`This is a sample line of text. ``` {% include copy-clipboard.html %} ~~~shell $ go get -u github.com/lib/pq ~~~ ``` This is more sample text.`
+
+Use syntax highlighting for configuration file , shell, and SQL commands, where appropriate, as follows.
+
+**Shell code samples**
+
+Start shell code samples with ` ```shell` followed by a line break. Use the terminal marker `$` as the first character of the next line. For multi-line shell commands, use a backslash (`\`) at the end of each line to indicate a line break.
+
+**SQL code samples**
+
+SQL code samples are broken into two sections: commands and responses.
+
+* **Commands** (e.g., `SELECT`, `CREATE TABLE`) begin with ` ```sql` followed by a line break. The first character of the next line should be the terminal marker `>`. Capitalize commands properly. Use only one command per code sample.
+
+* **Responses** (e.g., retrieved tables) add (_ _ 0UTPUT _ _) on a line between the command and the output, you'll get highlights for the code but not the output. For example,
+
+ `
+ ```sql SELECT slot_name, slot_type, database, active FROM pg_replication_slots ORDER BY 1; __OUTPUT__ slot_name | slot_type | database | active -------------+-----------+-----------+-------- xdb_47877_5 | logical | MMRnode_a | t xdb_47878_5 | logical | MMRnode_b | t xdb_47879_5 | logical | MMRnode_c | t (3 rows) ``` `
+
+```
+sql SELECT slot_name, slot_type, database, active FROM pg_replication_slots ORDER BY 1; __OUTPUT__ slot_name | slot_type | database | active -------------+-----------+-----------+-------- xdb_47877_5 | logical | MMRnode_a | t xdb_47878_5 | logical | MMRnode_b | t xdb_47879_5 | logical | MMRnode_c | t (3 rows)
+```
+
+
+**Configuration file samples**
+
+For files that have key-value pairs use ` ```ini`. For example:
+
+` ```ini promotable=false auto.reconfigure=false ``` `
+
+Tables
+------
+
+Use tables to display structured information in an easy-to-read format. There are two types of tables we use: Markdown and HTML.
+
+## Markdown
+
+If table formatting can be kept simple (e.g., basic text formatting and using ` ` tags for paragraph breaks), create a table using Markdown. This is the preferred table format.
+
+To create a table, use pipes (`|`) between columns and at least 3 dashes (`-`) separating the header cells from the body cells. A return denotes the start of the next row. The text within each column does not need to align in order to be rendered correctly, and you can inline Markdown or HTML.
+
+We don’t use outer pipes.
+
+Example:
+
+```
+Term | Description | Example ----------|-----------------------------|---------------- `term_1` | This is a description. | `3.14` `term_2` | This is also a description. | `"lola mcdog"`
+```
+
+## Lists
+
+EDB docs uses two types of lists:
+
+* **Numbered** (ordered) — Use to list information that should appear in order, like tutorial steps.
+
+ **Bulleted** (unordered) — Use to list related information in an easy-to-read way.
+
+ Introduce lists with a sentence and a colon. Use periods at the end of list items if it is a sentence or completes a sentence.
+
+
+For each item of a **numbered list**, use `1.` followed by a period and a space, for example,`1. This is a numbered list`. Markdown renders the steps in the correct order.
+
+For each item of a **bulleted list**, use one dash followed by one space to denote a list item, e.g., `- This is a bulleted list`.
+
+## Images
+
+Use images to clarify a topic, but use them only as needed. Images are either:
+
+* **Screenshots** — Provide a UI visual. Don’t use to show dialog boxes and parts of the UI a user can see for themselves, as these are hard to maintain and don’t provide useful information. If a screenshot needs an annotation, use a red box.
+
+* **Diagrams** — Provide a visual of a complicated theory. Diagrams must be simple and easy to read.
+
+
+**Syntax:**
+
+! \[Alternate\_text\](<path\_to\_image\_file>/<image_filename.png>)
+
+**Example:**
+
+`![PEM Architecture](../images/pem_architecture.png)`
+
+## Dates
+
+When specifying dates for human readability, use the DD mmm YYYY format with a short month name in English. Where the date is being used in a column in a table, use a leading 0 on the day of month, e.g. 01 Jan 2024, for easier alignment.
+
+When specifying dates as solely numbers, use [ISO8601](https://www.iso.org/iso-8601-date-and-time-format.html) format; YYYY/MM/DD. This is the internationally accepted, disambiguous format and should be used where you may expect the date to be read by automated systems.
+
diff --git a/advocacy_docs/community/index.mdx b/advocacy_docs/community/index.mdx
new file mode 100644
index 00000000000..6d5c00feeab
--- /dev/null
+++ b/advocacy_docs/community/index.mdx
@@ -0,0 +1,12 @@
+---
+title: Community
+navTitle: Community
+description: The EDB Docs Community
+iconName: IdeaSharing
+---
+
+All guides, notes and documentation for anyone working in the community of EDB documenters.
+
+* [Contributing Documentation](contributing)
+
+
diff --git a/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/02-PartnerInformation.mdx
index 1e7fc5e6322..60ad95e9d22 100644
--- a/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/02-PartnerInformation.mdx
+++ b/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/02-PartnerInformation.mdx
@@ -1,6 +1,6 @@
---
title: 'Partner information'
-description: 'Details for Cohesity DataProtect for PostgreSQL'
+description: 'Details of the partner'
---
| | |
diff --git a/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/04-ConfiguringCohesityDataProtectforPostgreSQL.mdx b/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/04-ConfiguringCohesityDataProtectforPostgreSQL.mdx
index f463b877357..a649e353e4a 100644
--- a/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/04-ConfiguringCohesityDataProtectforPostgreSQL.mdx
+++ b/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/04-ConfiguringCohesityDataProtectforPostgreSQL.mdx
@@ -1,6 +1,6 @@
---
title: 'Configuring'
-description: 'Walkthrough of configuring Cohesity DataProtect for PostgreSQL'
+description: 'Walkthrough of configuring the integration'
---
Implementing Cohesity DataProtect for PostgreSQL with EDB Postgres Advanced Server requires the following components:
diff --git a/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/05-UsingCohesityDataProtectforPostgreSQL.mdx b/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/05-UsingCohesityDataProtectforPostgreSQL.mdx
index 0ee222986b2..ae4d3ca210c 100644
--- a/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/05-UsingCohesityDataProtectforPostgreSQL.mdx
+++ b/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/05-UsingCohesityDataProtectforPostgreSQL.mdx
@@ -1,6 +1,6 @@
---
title: 'Using'
-description: 'Walkthroughs of multiple Cohesity DataProtect for PostgreSQL usage scenarios'
+description: 'Walkthrough of example usage scenarios'
---
These use cases show how backups are taken and restored from EDB Postgres Advanced Server using Cohesity DataProtect for PostgreSQL.
diff --git a/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/06-CertificationEnvironment.mdx
index 59344126b31..6a78be85955 100644
--- a/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/06-CertificationEnvironment.mdx
+++ b/advocacy_docs/partner_docs/CohesityDataProtectforPostgreSQL/06-CertificationEnvironment.mdx
@@ -1,6 +1,6 @@
---
title: 'Certification environment'
-description: 'Overview of the certification environment used in the certification of Cohesity DataProtect for PostgreSQL'
+description: 'Overview of the certification environment'
---
| | |
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/02-PartnerInformation.mdx
index 9ec0f28b705..a1ca60ece76 100644
--- a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/02-PartnerInformation.mdx
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/02-PartnerInformation.mdx
@@ -1,12 +1,12 @@
---
title: 'Partner information'
-description: 'Details for Commvault Backup & Recovery'
+description: 'Details of the partner'
---
| | |
| ----------- | ----------- |
-| **Partner Name** | Commvault |
+| **Partner name** | Commvault |
| **Partner product** | Commvault Backup & Recovery |
| **Website** | https://www.commvault.com/ |
-| **Version** | Commvault Backup & Recovery 11.24 |
-| **Product description** | Wherever your data resides, ensure availability by way of a single interface with Commvault Backup & Recovery. It offers domprehensive workload coverage, files, apps, and databases, including EDB Postgres Advanced Server and EDB Postgres Extended Server, from a single extensible platform and user interface. Commvault Backup & Recovery provides a comprehensive backup and archiving solution for your trusted recovery, ransomware protection, and security. |
+| **Version** | Commvault Backup & Recovery 11.32 |
+| **Product description** | Wherever your data resides, ensure availability by way of a single interface with Commvault Backup & Recovery. It offers comprehensive workload coverage, files, apps, and databases, including EDB Postgres Advanced Server, EDB Postgres Extended Server, and PostgreSQL, from a single extensible platform and user interface. Commvault Backup & Recovery provides a comprehensive backup and archiving solution for your trusted recovery, ransomware protection, and security. |
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/03-SolutionSummary.mdx b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/03-SolutionSummary.mdx
index 4c81ede5ab6..130c61bd9d6 100644
--- a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/03-SolutionSummary.mdx
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/03-SolutionSummary.mdx
@@ -1,11 +1,11 @@
---
title: 'Solution summary'
-description: 'Brief explanation of the solution and its purpose'
+description: 'Explanation of the solution and its purpose'
---
Commvault enables your business to streamline management of its continuously evolving data environment, whether the data is on premises or in the cloud.
Commvault PostgreSQL iDataAgent provides the flexibility to back up PostgreSQL, EDB Postgres Advanced Server, and EDB Postgres Extended Server databases in different modes and restore them when needed. You can perform a full/log backup or restore of database servers, individual databases, and archive logs at any time and have full control over the process.
-Managing your data means knowing that it's protected and being able to effectively report on success or failure. Through an easy-to-use interface, you can quickly check on the progress of your jobs to ensure things are moving as expected. To keep you in the know, you can also use prebuilt reports either on demand or on a scheduled basis.
+Managing your data means knowing that it's protected and being able to effectively report on success or failure. Through an easy-to-use interface, you can quickly check on the progress of your jobs to ensure things are moving as expected. To keep you in the know, you can also use prebuilt reports either on demand or on a scheduled basis.
![Commvault Architecture](Images/Final-SolutionSummaryImage.png)
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/04-ConfiguringCommvaultBackupandRecovery.mdx b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/04-ConfiguringCommvaultBackupandRecovery.mdx
index 5e629a4d158..2d350d56b2b 100644
--- a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/04-ConfiguringCommvaultBackupandRecovery.mdx
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/04-ConfiguringCommvaultBackupandRecovery.mdx
@@ -1,28 +1,31 @@
---
title: 'Configuring'
-description: 'Walkthrough of configuring Commvault Backup & Recovery'
+description: 'Walkthrough of configuring the integration'
redirects:
- /partner_docs/CommVaultGuide/04-ConfiguringCommvaultBackupandRecovery/
---
-Implementing Commvault Backup & Recovery with an EDB database requires the following components:
+Implementing Commvault Backup & Recovery with an EDB Postgres Advanced Server, EDB Postgres Extended Server, or PostgreSQL Server database requires the following components:
-- EDB Postgres Advanced Server or EDB Postgres Extended Server
+- EDB Postgres Advanced Server, EDB Postgres Extended Server, or PostgreSQL Server
- Commvault Backup & Recovery software
+!!! Note
+ We refer to the EDB Postgres Advanced Server, EDB Postgres Extended Server, and PostgreSQL Server products as the Postgres distribution. The specific distribution type depends on your needs and preferences.
+
## Prerequisites
-- A running EDB Postgres Advanced Server or EDB Postgres Extended Server instance
+- A running Postgres distribution
- Commvault Backup & Recovery installed
-- EDB Postgres Advanced Server or EDB Postgres Extended Server application path and library directory path, for example, `c:\Program Files\edb\as13\bin` and `c:\Program Files\edb\as13\lib`
-- The login credentials used to access the EDB Postgres Advanced Server or EDB Postgres Extended Server database
-- EDB Postgres Advanced Server or EDB Postgres Extended Server archive log directory configured
+- Postgres distribution application path and library directory path, for example, `c:\Program Files\edb\as15\bin` and `c:\Program Files\edb\as15\lib`
+- The login credentials used to access the Postgres distribution database
+- Postgres distribution archive log directory configured
-## Configure Commvault Backup & Recovery for EDB Postgres Advanced Server or EDB Postgres Extended Server
+## Configure Commvault Backup & Recovery for Postgres distribution
### Set up a disk storage pool
-1. From the machine where Commvault Backup & Recovery is installed, run the Core Setup wizard from the Commvault Backup & Recovery's Command Center. The wizard helps to set up a disk storage pool and to modify the server backup plan according to your requirements.
+1. From the machine where Commvault Backup & Recovery is installed, run the Core Setup wizard from the Commvault Backup & Recovery Command Center. The wizard helps to set up a disk storage pool and to modify the server backup plan according to your requirements.
2. Set up storage pool/disk storage. From the Welcome page, select **Let's get started**. On the **Disk** tab, in the **Name** box, enter a name for the storage pool.
3. In the **MediaAgent** box, accept the default value.
4. For **Type**, select **Local**.
@@ -42,11 +45,11 @@ Next, create a server backup plan in Core Setup. To create a server backup plan
![Creating a Server Backup Plan in Core Setup](Images/BackupPlanConf.png)
-### Install a client on an EDB database
+### Install a client on a Postgres distribution database
-1. From the Navigation pane of the Commvault Backup & Recovery's Command Center, select **Protect > Databases**.
+1. From the Navigation pane of the Commvault Backup & Recovery Command Center, select **Protect > Databases**.
2. Select **Add server**.
-3. Select the database type for EDB database, which in this case is **PostgreSQL**.
+3. Select the database type for the Postgres distribution database, which in this case is **PostgreSQL**.
![Creating a Server Backup Plan in Core Setup](Images/ServerAdd1.png)
@@ -66,7 +69,7 @@ Next, create a server backup plan in Core Setup. To create a server backup plan
![Creating a Server Backup Plan in Core Setup](Images/ServerAdd6.png)
-### Configure the EDB database instances to back up and protect
+### Configure the Postgres distribution instances to back up and protect
1. From the navigation pane, select **Protect > Databases > DB Instances**.
2. Select **Add instance**, and then select **PostgreSQL**.
@@ -74,18 +77,18 @@ Next, create a server backup plan in Core Setup. To create a server backup plan
![Creating a Server Backup Plan in Core Setup](Images/CreateInstance1.png)
3. From the **Server name** list, select the server where you want to create the new instance.
-4. In the **Instance Name** box, enter the EDB database instance name.
-5. From the **Plan** list, select the server plan you set up for use with your EDB database.
+4. In the **Instance Name** box, enter the Postgres distribution database instance name.
+5. From the **Plan** list, select the server plan you set up for use with your Postgres distribution database.
6. Under **Connection details**, enter the following information.
- - In the **Database user** box, enter the user name to access the EDB database instance.
- - In the **Password** box, enter the EDB database user account password.
- - In the **Port** box, enter the port to open the communication between the EDB database and the clients.
+ - In the **Database user** box, enter the user name to access the Postgres distribution database instance.
+ - In the **Password** box, enter the Postgres distribution database user account password.
+ - In the **Port** box, enter the port to open the communication between the Postgres distribution and the clients.
- In the **Maintenance DB** box, enter the name of a system database that's used as a maintenance database.
- In the **PostgreSQL** section, enter paths for **Binary Directory**, **Lib Directory**, and **Archive Log Directory**.
![Creating a Server Backup Plan in Core Setup](Images/CreateInstance2.png)
- Your database instance to back up is now created. You can view its configuration.
+ Your Postgres distribution database instance to back up is now created. You can view its configuration.
![Creating a Server Backup Plan in Core Setup](Images/CreateInstance3.png)
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/05-UsingCommvaultBackupandRecovery.mdx b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/05-UsingCommvaultBackupandRecovery.mdx
index e24f6da2ed0..5305d86d7ff 100644
--- a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/05-UsingCommvaultBackupandRecovery.mdx
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/05-UsingCommvaultBackupandRecovery.mdx
@@ -1,29 +1,27 @@
---
title: 'Using'
-description: 'Walkthroughs of multiple Commvault Backup & Recovery usage scenarios'
-redirects:
- - /partner_docs/CommVaultGuide/05-UsingCommvaultBackupandRecovery/
+description: 'Walkthrough of example usage scenarios'
---
-You can back up and restore an EDB database using Commvault Backup & Recovery.
+You can back up and restore a Postgres distribution database using Commvault Backup & Recovery.
## Using Commvault Backup & Recovery
-Commvault provides two methods of taking the backup from an EDB database and restoring it:
+Commvault provides two methods of taking the backup from a Postgres distribution database and restoring it:
- DumpBasedBackupSet backup and restore
- FSBasedBackupSet backup and restore
!!! Note
- At this time there's a known issue with FSBased Restore that doesn't allow for proper restoration of the database. See [Known issues](#known-issues) for more information.
+ At this time, there's a known issue with FSBased Incremental/PITR Restore that doesn't allow for proper restoration of the database using the defined process. See [Known issues](#known-issues) for more information and workarounds.
### DumpBasedBackupSet backup and restore
Dump-based backup uses the pg_dump utility to take the backup.
-#### Taking DumpBasedBackupSet backup
+## Taking a DumpBasedBackupSet backup
-1. Open the Commvault Backup & Recovery's Command Center. From the navigation pane, select **Protect > Databases**.
+1. Open the Commvault Backup & Recovery Command Center. From the navigation pane, select **Protect > Databases**.
![Instances Page](Images/Dumpbackup1.png)
@@ -41,7 +39,7 @@ Dump-based backup uses the pg_dump utility to take the backup.
![Backup Section](Images/Dumpbackup4.png)
-6. in the Select Backup Level screen, select **Full**.
+6. In the Select Backup Level screen, select **Full**.
![Backup Level Screen](Images/Dumpbackup5.png)
@@ -53,7 +51,7 @@ Dump-based backup uses the pg_dump utility to take the backup.
![Backup Job](Images/Dumpbackup7.png)
-#### Restoring DumpBasedBackupSet backup
+## Restoring a DumpBasedBackupSet backup
You can use DumpBasedBackupSet to restore the individual databases.
@@ -64,7 +62,7 @@ You can use DumpBasedBackupSet to restore the individual databases.
![Recovery Points Calendar](Images/Dumprestore1.png)
4. Select a date from the calendar, and then select **Restore**.
-5. The Backup Content screen displays the databases to restore. Select the required database to restore or select all of them to restore all.
+5. The Backup Content screen displays the databases to restore. Select the required database to restore, or select all of them to restore all.
![Backup Content Screen](Images/Dumprestore2.png)
@@ -79,7 +77,7 @@ You can use DumpBasedBackupSet to restore the individual databases.
![Restore Job](Images/Dumprestore5.png)
-8. When the Restore has completed successfully, log in to the EDB database and check that the restore operation recovered the data. This example connected to an EDB Postgres Advanced Server instance:
+8. When the restore has completed successfully, log in to the Postgres distribution database and check that the restore operation recovered the data. This example connected to an EDB Postgres Advanced Server instance:
```bash
edb=#
@@ -160,5 +158,79 @@ epas13_test=#
```
+### FSBasedBackupSet backup and restore
+A file system backup backs up data that resides on a Windows or UNIX computer.
+
+## Taking a FSBasedBackupSet backup
+1. Open the Commvault Backup & Recovery Command Center. From the navigation pane, go to **Protect > Databases**.
+
+2. Select the required instance.
+
+3. In the **Backup sets** section, select the **FSBasedBackupSet** backup set.
+
+4. In the **Database groups** section, select the database group that you want to back up. In this case, it's **default**.
+
+5. In the **Backup** section, select **Back up**.
+
+6. In the **Select Backup Level** screen, choose between a full or incremental backup. This example uses an incremental backup.
+
+ ![Select FSBased Backup Type](Images/SelectFSBasedBackupType.png)
+
+7. A job is created to take the backup. View the job by selecting the **Job ID** for that backup.
+
+ ![FSBased Backup Job](Images/FSBasedBackupJob.png)
+
+8. When the job is complete, you can find the information about the job on the job's page.
+
+ ![FSBased Backup Job Details](Images/FSBasedBackupJobDetails.png)
+
+## Restoring a FSBasedBackupSet full backup
+You can restore backed-up data for a subclient or an entire backup set using either an in-place restore, out-of-place restore, or, for a Linux file server, to a standalone file server.
+
+1. Ensure that your Postgres distribution instance is stopped.
+
+2. Delete or rename the WAL directory and data directory.
+
+3. From the Commvault navigation pane, go to **Protect > Databases**.
+
+4. Select the instance you want to restore.
+
+5. In the **Recovery Points** calender, select **FSBasedBackupSet**.
+
+6. Select a date when a full FSBasedBackupSet was taken and select **Restore**.
+
+7. The Backup Content screen displays the databases to restore. Select the required database to restore, or select all of them to restore all.
+
+ ![FSBased Backup Restore Databases](Images/FSBasedFullRestoreDatabases.png)
+
+8. Select **Restore**.
+
+9. From the Restore Options screen, select where you want the restore to complete: in-place, out-of-place, or restore to disk for a Linux server.
+
+ ![FSBased Backup Restore Details](Images/FSBasedFullRestoreOptions.png)
+
+10. Select the **Destination Server** and **Destination Instance**, and select **Submit**.
+
+11. A job is created to restore the backup.
+
+12. When the restore has completed successfully, Commvault starts the server using the pg_ctl utility.
+
+ You can check that the server was started successfully with a command like, `"C:\Program Files\edb\as15\bin\pg_ctl" -U enterprisedb status -D "C:\Program Files\edb\as15\data"`. It returns a message that the server is running.
+
+```bash
+C:\Program Files\edb\as15>"C:\Program Files\edb\as15\bin\pg_ctl" -U enterprisedb status -D "C:\Program Files\edb\as15\data"
+pg_ctl: server is running (PID: 20896)
+C:/Program Files/edb/as15/bin/edb-postgres.exe "-D" "C:\Program Files\edb\as15\data"
+```
+
+14. If you want the server to be controlled by a Services utility, you need to stop it with the pg_ctl utility and start it again in your Services utility, for example, Services(Local) in Windows or systemctl in UNIX.
+
+15. After you start the service using your utility of choice, log in to your EDB database and check that the restore operation recovered the data.
+
## Known issues
-FSBaseBackupSet restore has an issue if the default edb directory, for example, `*:\Program files\edb`, was lost or deleted. If this occurs, then after a restore is performed, the permissions on the restored directories aren't recovered. Instead, the directory inherits the permissions from the parent directory, which doesn't allow EDB Postgres Advanced Server services to start on the restored directory. We're working with Commvault to resolve the issue.
+
+FSBaseBackupSet Incremental or Point-In-Time(PITR) Restore has issues with PostgreSQL server, EDB Postgres Advanced Server, and EDB Postgres Extended versions 13 and later. The behavior is due to a change that was made in Postgres version 13. From the release notes, `Generate an error if recovery does not reach the specified recovery target`. While Commvault does provide a time target for the recovery, the recovery is performed entirely by Postgres, and Postgres doesn't know how to reach that time target. Therefore, it fails. This can happen when there are no transactions between backup jobs.
+
+A workaround for this issue is to always introduce a transaction scenario, such as a create and drop database, before doing a log backup. Another workaround is to use `recovery_target_lsn` in the `postgresql.conf` file instead of using `recovery_target_time`.
+
+Commvault is aware of this error and is working on a fix.
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/06-CertificationEnvironment.mdx
index 5624b0b1107..237e72146d0 100644
--- a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/06-CertificationEnvironment.mdx
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/06-CertificationEnvironment.mdx
@@ -1,11 +1,11 @@
---
title: 'Certification environment'
-description: 'Overview of the certification environment used in the certification of Commvault Backup & Recovery'
+description: 'Overview of the certification environment'
---
| | |
| ----------- | ----------- |
-| **Certification test date** | June 16, 2022 |
-| **EDB Postgres Advanced Server** | 11, 12, 13, 14 |
-| **EDB Postgres Extended Server** | 11, 12, 13 |
-| **Commvault Backup & Recovery** | 11.24 |
+| **Certification test date** | August 22, 2023 |
+| **EDB Postgres Advanced Server** | 12, 13, 14, 15 |
+| **EDB Postgres Extended Server** | 12, 13 |
+| **Commvault Backup & Recovery** | 11.32 |
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/07-SupportandLogging.mdx b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/07-SupportandLogging.mdx
new file mode 100644
index 00000000000..65dec90fb94
--- /dev/null
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/07-SupportandLogging.mdx
@@ -0,0 +1,46 @@
+---
+title: 'Support and logging details'
+description: 'Details of the support process and logging information'
+---
+
+## Support
+
+Technical support for the use of these products is provided by both EDB and Commvault. A support contract must be in place at both EDB and Commvault. You can open a support ticket with either company to start the process. If it's determined through the support ticket that resources from the other vendor are required, open a support ticket with that vendor through normal support channels. This approach allows both companies to work together to help you as needed.
+
+## Logging
+
+The following logs are available.
+
+### EDB Postgres Advanced Server logs
+
+Navigate to the `Data` directory in your chosen EDB Postgres Advanced Server instance. From there, you can navigate to `log` or `current_logfiles`. Or, you can navigate to the `postgresql.conf` file where you can customize logging options or enable `edb_audit` logs.
+
+### EDB Postgres Extended Server logs
+
+Navigate to the `Data` directory in your chosen EDB Postgres Extended Server instance. From there you can navigate to `log`, or you can navigate to the `postgresql.conf` file where you can customize logging options. An example of the full path to view EDB Postgres Extended logs is `/var/lib/edb-pge/15/data/log`.
+
+### PostgreSQL Server logs
+
+The default log directories for PostgreSQL logs vary depending on the operating system:
+
+- Debian-based system: `/var/log/postgresql/postgresql-x.x.main.log. X.x.`
+
+- Red Hat-based system: `/var/lib/pgsql/data/pg_log`
+
+- Windows: `C:\Program Files\PostgreSQL\9.3\data\pg_log`
+
+### Commvault logs
+
+You can find Commvault logs using either of these methods:
+
+- Look in the Content Store directory/folder in the Commvault folder on your system. For example, on a Windows system, the log file path is `C:\Program Files\Commvault\ContentStore\LogFiles`. There you can see all of the different types of logs Commvault collects.
+
+- Gather some job-specific logs in the Commvault interface:
+
+ 1. Open the Commvault Backup & Recovery Command Center. From the navigation pane, go to **Jobs > Job History**.
+
+ 2. Find the specific job you want to view logs for, and select **...** next to that job.
+
+ 3. Select **View Logs**.
+
+ ![View Job Logs](Images/CommvaultViewLogsForJob.png)
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/CommvaultViewLogsForJob.png b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/CommvaultViewLogsForJob.png
new file mode 100644
index 00000000000..ed1093daafa
--- /dev/null
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/CommvaultViewLogsForJob.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:db6a73932cd532dad2bef06a09ba99f4e7c8184ae74ce204e2fab6747e270333
+size 1146108
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedBackupJob.png b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedBackupJob.png
new file mode 100644
index 00000000000..e4995df8065
--- /dev/null
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedBackupJob.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d43db9e5db0f0b0421541273f52d87736ae12e6879aee84bc8700a568d105691
+size 129298
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedBackupJobDetails.png b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedBackupJobDetails.png
new file mode 100644
index 00000000000..2676007aa44
--- /dev/null
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedBackupJobDetails.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5da67b5742886735b8f41fc1c46f4d67483a70f405eb4f80c2e5b0740a6e7c9a
+size 140432
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedFullRestoreDatabases.png b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedFullRestoreDatabases.png
new file mode 100644
index 00000000000..e537f6ca6af
--- /dev/null
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedFullRestoreDatabases.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:913d3eb2f127a84517ddab7039323da6ff2ac3eb67ec05a84a94cb993c40f6bf
+size 87505
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedFullRestoreOptions.png b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedFullRestoreOptions.png
new file mode 100644
index 00000000000..ae4a2a7023d
--- /dev/null
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/FSBasedFullRestoreOptions.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d2827c237d4db8cb80a94b996c367c34e26df54a0d9447b1334055bf9ca04791
+size 755537
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/SelectFSBasedBackupType.png b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/SelectFSBasedBackupType.png
new file mode 100644
index 00000000000..5f3f1924352
--- /dev/null
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/Images/SelectFSBasedBackupType.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:68bc5a3f22590ee1e7068157679cb258506d757aa1f8cf6c14a1431925c2593c
+size 117372
diff --git a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/index.mdx b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/index.mdx
index c4c8905d918..87c365f7f4b 100644
--- a/advocacy_docs/partner_docs/CommvaultBackupandRecovery/index.mdx
+++ b/advocacy_docs/partner_docs/CommvaultBackupandRecovery/index.mdx
@@ -9,3 +9,5 @@ directoryDefaults:
This document is intended to augment each vendor’s product documentation to guide you in getting the products working together. It isn't intended to show the optimal configuration for the certified integration.
\ No newline at end of file
diff --git a/advocacy_docs/partner_docs/DBeaverPRO/07-SupportandLogging.mdx b/advocacy_docs/partner_docs/DBeaverPRO/07-SupportandLogging.mdx
index 5f476add649..f78487e87ea 100644
--- a/advocacy_docs/partner_docs/DBeaverPRO/07-SupportandLogging.mdx
+++ b/advocacy_docs/partner_docs/DBeaverPRO/07-SupportandLogging.mdx
@@ -1,23 +1,25 @@
---
-title: 'Support and Logging Details'
+title: 'Support and logging details'
description: 'Details of the support process and logging information'
---
## Support
-Technical support for the use of these products is provided by both EDB and DBeaver. A proper support contract is required to be in place at both EDB and DBeaver. A support ticket can be opened on either side to start the process. If it is determined through the support ticket that resources from the other vendor is required, the customer should open a support ticket with that vendor through normal support channels. This will allow both companies to work together to help the customer as needed.
+Technical support for the use of these products is provided by both EDB and DBeaver. A support contract must be in place at both EDB and DBeaver. You can open a support ticket with either company to start the process. If it's determined through the support ticket that resources from the other vendor are required, open a support ticket with that vendor through normal support channels. This approach allows both companies to work together to help you as needed.
## Logging
-**EDB Postgres Advanced Server Logs:**
+The following logs are available.
-Navigate to the `Data` directory in your chosen EDB Postgres Advanced Server instance and from here you can navigate to `log`, `current_logfiles` or you can navigate to the `postgresql.conf` file where you can customize logging options or enable `edb_audit` logs.
+### EDB Postgres Advanced Server logs
-**EDB Postgres Extended Server Logs**
+Navigate to the `Data` directory in your chosen EDB Postgres Advanced Server instance. From there, you can navigate to `log` or `current_logfiles`. Or, you can navigate to the `postgresql.conf` file where you can customize logging options or enable `edb_audit` logs.
-Navigate to the `Data` directory in your chosen EDB Postgres Extended Server instance and from here you can navigate to `log`, or you can navigate to the `postgresql.conf` file where you can customize logging options. An example of the full path to view EDB Postgres Extended logs: `/var/lib/edb-pge/15/data/log`.
+### EDB Postgres Extended Server logs
-**PostgreSQL Server Logs**
+Navigate to the `Data` directory in your chosen EDB Postgres Extended Server instance. From there you can navigate to `log`, or you can navigate to the `postgresql.conf` file where you can customize logging options. An example of the full path to view EDB Postgres Extended logs is `/var/lib/edb-pge/15/data/log`.
+
+### PostgreSQL Server logs
The default log directories for PostgreSQL logs vary depending on the operating system:
@@ -27,15 +29,13 @@ The default log directories for PostgreSQL logs vary depending on the operating
- Windows: `C:\Program Files\PostgreSQL\9.3\data\pg_log`
-** DBeaver Logs**
-
-If you are experiencing errors during the DBeaver runtime you can gather those within the DBeaver Pro interface.
+## DBeaver logs
-1. On the top menu bar navigate to `Window` and select it.
+If you experience errors during the DBeaver runtime, you can gather them in the DBeaver Pro interface:
-2. Scroll down to `Show View`.
+1. At the top menu bar, select **Window**.
-3. In the menu options that come up select `Error Log` and this will contain all the errors that occur during the DBeaver runtime.
+2. Select **Show View > Error Log**. The log contains all the errors that occur during the DBeaver runtime.
![DBeaver Log Viewer](Images/DBeaverLogViewer.png)
4. You can save and attach these logs to a support ticket or bug report to DBeaver support.
diff --git a/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/02-PartnerInformation.mdx
index 38fd7c9672d..6c1803e05c1 100644
--- a/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/02-PartnerInformation.mdx
+++ b/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/02-PartnerInformation.mdx
@@ -1,6 +1,6 @@
---
title: 'Partner information'
-description: 'Details for Esri ArcGIS Pro and Esri ArcGIS Enterprise'
+description: 'Details of the partner'
---
| | |
diff --git a/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/04-ConfiguringEsriArcGISProandEsriArcGISEnterprise.mdx b/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/04-ConfiguringEsriArcGISProandEsriArcGISEnterprise.mdx
index 1562aced751..ac0448ffbc8 100644
--- a/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/04-ConfiguringEsriArcGISProandEsriArcGISEnterprise.mdx
+++ b/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/04-ConfiguringEsriArcGISProandEsriArcGISEnterprise.mdx
@@ -1,6 +1,6 @@
---
title: 'Configuring'
-description: 'Walkthrough of configuring Esri ArcGIS Pro and Esri ArcGIS Enterprise'
+description: 'Walkthrough of configuring the integration'
---
Implementing Esri ArcGIS Pro and Esri ArcGIS Enterprise with EDB Postgres Advanced Server requires the following components:
diff --git a/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/05-UsingEsriArcGISProandEsriArcGISEnterprise.mdx b/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/05-UsingEsriArcGISProandEsriArcGISEnterprise.mdx
index 20727db6af6..1df16dbc962 100644
--- a/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/05-UsingEsriArcGISProandEsriArcGISEnterprise.mdx
+++ b/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/05-UsingEsriArcGISProandEsriArcGISEnterprise.mdx
@@ -1,6 +1,6 @@
---
title: 'Using'
-description: 'Walkthroughs of multiple Esri ArcGIS Pro and Esri ArcGIS Enterprise usage scenarios'
+description: 'Walkthrough of example usage scenarios'
---
## Create file geodatabase
diff --git a/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/06-CertificationEnvironment.mdx
index 4eb20c5cecc..6c7b3d68f34 100644
--- a/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/06-CertificationEnvironment.mdx
+++ b/advocacy_docs/partner_docs/EsriArcGISProandEsriArcGISEnterprise/06-CertificationEnvironment.mdx
@@ -1,6 +1,6 @@
---
title: 'Certification environment'
-description: 'Overview of the certification environment used in the certification of Esri ArcGIS Pro and Esri ArcGIS Enterprise'
+description: 'Overview of the certification environment'
---
| | |
diff --git a/advocacy_docs/partner_docs/HashicorpVault/index.mdx b/advocacy_docs/partner_docs/HashicorpVault/index.mdx
index 69c4276def1..e32eabb75b0 100644
--- a/advocacy_docs/partner_docs/HashicorpVault/index.mdx
+++ b/advocacy_docs/partner_docs/HashicorpVault/index.mdx
@@ -6,7 +6,7 @@ directoryDefaults:
---
-[Partner Program Logo](Images/PartnerProgram.jpg.png)
+![Partner Program Logo](Images/PartnerProgram.jpg.png)
diff --git a/advocacy_docs/partner_docs/HashicorpVaultTransitSecretsEngine/04-ConfiguringTransitSecretsEngine.mdx b/advocacy_docs/partner_docs/HashicorpVaultTransitSecretsEngine/04-ConfiguringTransitSecretsEngine.mdx
index 5d6357b42ca..5475fd3cf5b 100644
--- a/advocacy_docs/partner_docs/HashicorpVaultTransitSecretsEngine/04-ConfiguringTransitSecretsEngine.mdx
+++ b/advocacy_docs/partner_docs/HashicorpVaultTransitSecretsEngine/04-ConfiguringTransitSecretsEngine.mdx
@@ -1,6 +1,6 @@
---
title: 'Configuring'
-description: 'Walkthrough on configuring the integration'
+description: 'Walkthrough of configuring the integration'
---
Implementing Hashicorp Vault with EDB Postgres Advanced Server version 15.2 and later or EDB Postgres Extended Server version 15.2 and later requires the following components:
diff --git a/advocacy_docs/partner_docs/ImpervaDataSecurityFabric/index.mdx b/advocacy_docs/partner_docs/ImpervaDataSecurityFabric/index.mdx
index ae4269e96ea..7ef8c02bb31 100644
--- a/advocacy_docs/partner_docs/ImpervaDataSecurityFabric/index.mdx
+++ b/advocacy_docs/partner_docs/ImpervaDataSecurityFabric/index.mdx
@@ -5,7 +5,7 @@ directoryDefaults:
iconName: handshake
---
-[Partner Program Logo](Images/PartnerProgram.jpg.png)
+![Partner Program Logo](Images/PartnerProgram.jpg.png)
diff --git a/advocacy_docs/partner_docs/LiquibasePro/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/LiquibasePro/02-PartnerInformation.mdx
index 24f8aa73b99..2f5b2ece766 100644
--- a/advocacy_docs/partner_docs/LiquibasePro/02-PartnerInformation.mdx
+++ b/advocacy_docs/partner_docs/LiquibasePro/02-PartnerInformation.mdx
@@ -1,6 +1,6 @@
---
title: 'Partner information'
-description: 'Details for Liquibase Pro'
+description: 'Details of the partner'
---
| | |
diff --git a/advocacy_docs/partner_docs/LiquibasePro/04-ConfiguringLiquibasePro.mdx b/advocacy_docs/partner_docs/LiquibasePro/04-ConfiguringLiquibasePro.mdx
index e7b6d8e6e62..dac57dc6cf0 100644
--- a/advocacy_docs/partner_docs/LiquibasePro/04-ConfiguringLiquibasePro.mdx
+++ b/advocacy_docs/partner_docs/LiquibasePro/04-ConfiguringLiquibasePro.mdx
@@ -1,6 +1,6 @@
---
title: 'Configuring'
-description: 'Walkthrough of configuring Liquibase Pro'
+description: 'Walkthrough of configuring the integration'
---
Implementing Liquibase with EDB Postgres Advanced Server requires the following components:
diff --git a/advocacy_docs/partner_docs/LiquibasePro/05-UsingLiquibasePro.mdx b/advocacy_docs/partner_docs/LiquibasePro/05-UsingLiquibasePro.mdx
index 30b0d520f29..b44a91a5246 100644
--- a/advocacy_docs/partner_docs/LiquibasePro/05-UsingLiquibasePro.mdx
+++ b/advocacy_docs/partner_docs/LiquibasePro/05-UsingLiquibasePro.mdx
@@ -1,6 +1,6 @@
---
title: 'Using'
-description: 'Walkthroughs of multiple Liquibase Pro usage scenarios'
+description: 'Walkthrough of example usage scenarios'
---
Liquibase is a development tool that allows you to apply changes to the EDB database using the Liquibase CLI and view them on the Liquibase Hub.
diff --git a/advocacy_docs/partner_docs/LiquibasePro/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/LiquibasePro/06-CertificationEnvironment.mdx
index 4192e3e5828..a8960b444a0 100644
--- a/advocacy_docs/partner_docs/LiquibasePro/06-CertificationEnvironment.mdx
+++ b/advocacy_docs/partner_docs/LiquibasePro/06-CertificationEnvironment.mdx
@@ -1,6 +1,6 @@
---
title: 'Certification environment'
-description: 'Overview of the certification environment used in the certification of Liquibase Pro'
+description: 'Overview of the certification environment'
---
| | |
diff --git a/advocacy_docs/partner_docs/PreciselyConnectCDC/04-Configuratingpreciselyconnectcdc.mdx b/advocacy_docs/partner_docs/PreciselyConnectCDC/04-Configuratingpreciselyconnectcdc.mdx
index 5d900d58895..719aaf20945 100644
--- a/advocacy_docs/partner_docs/PreciselyConnectCDC/04-Configuratingpreciselyconnectcdc.mdx
+++ b/advocacy_docs/partner_docs/PreciselyConnectCDC/04-Configuratingpreciselyconnectcdc.mdx
@@ -1,5 +1,5 @@
---
-title: 'ConfiguringC'
+title: 'Configuring'
description: 'Walkthrough of configuring the integration'
---
diff --git a/advocacy_docs/partner_docs/QuestToadEdge/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/QuestToadEdge/02-PartnerInformation.mdx
index 273342c48a9..48874d8d86e 100644
--- a/advocacy_docs/partner_docs/QuestToadEdge/02-PartnerInformation.mdx
+++ b/advocacy_docs/partner_docs/QuestToadEdge/02-PartnerInformation.mdx
@@ -1,6 +1,6 @@
---
title: 'Partner information'
-description: 'Details for Quest Toad Edge'
+description: 'Details of the partner'
---
| | |
diff --git a/advocacy_docs/partner_docs/QuestToadEdge/04-ConfiguringQuestToadEdge.mdx b/advocacy_docs/partner_docs/QuestToadEdge/04-ConfiguringQuestToadEdge.mdx
index 1c2cd26138f..44128048aa3 100644
--- a/advocacy_docs/partner_docs/QuestToadEdge/04-ConfiguringQuestToadEdge.mdx
+++ b/advocacy_docs/partner_docs/QuestToadEdge/04-ConfiguringQuestToadEdge.mdx
@@ -1,6 +1,6 @@
---
title: 'Configuring'
-description: 'Walkthrough of configuring Quest Toad Edge'
+description: 'Walkthrough of configuring the integration'
---
Implementing Quest Toad Edge with EDB Postgres Advanced Server or EDB Postgres Extended Server requires the following components:
diff --git a/advocacy_docs/partner_docs/QuestToadEdge/05-UsingQuestToadEdge.mdx b/advocacy_docs/partner_docs/QuestToadEdge/05-UsingQuestToadEdge.mdx
index 2f4b607cefe..b420540c62b 100644
--- a/advocacy_docs/partner_docs/QuestToadEdge/05-UsingQuestToadEdge.mdx
+++ b/advocacy_docs/partner_docs/QuestToadEdge/05-UsingQuestToadEdge.mdx
@@ -1,6 +1,6 @@
---
title: 'Using'
-description: 'Walkthroughs of multiple Quest Toad Edge usage scenarios'
+description: 'Walkthrough of example usage scenarios'
---
After you connect an instance of EDB Postgres Advanced Server or EDB Postgres Extended to Quest Toad Edge, you can access the capabilities of Toad Edge.
diff --git a/advocacy_docs/partner_docs/QuestToadEdge/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/QuestToadEdge/06-CertificationEnvironment.mdx
index a70d85b3bf1..484542b7533 100644
--- a/advocacy_docs/partner_docs/QuestToadEdge/06-CertificationEnvironment.mdx
+++ b/advocacy_docs/partner_docs/QuestToadEdge/06-CertificationEnvironment.mdx
@@ -1,6 +1,6 @@
---
title: 'Certification environment'
-description: 'Overview of the certification environment used in the certification of Quest Toad Edge'
+description: 'Overview of the certification environment'
redirects:
- /partner_docs/ToadEdgeGuide/06-CertificationEnvironment/
---
diff --git a/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/02-PartnerInformation.mdx
index 19f05b129e3..ecb1da7c521 100644
--- a/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/02-PartnerInformation.mdx
+++ b/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/02-PartnerInformation.mdx
@@ -1,6 +1,6 @@
---
title: 'Partner information'
-description: 'Details for Repostor Data Protector for PostgreSQL'
+description: 'Details of the partner'
redirects:
- /partner_docs/RepostorGuide/02-PartnerInformation/
---
diff --git a/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/04-ConfiguringRepostorDataProtectorforPostgreSQL.mdx b/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/04-ConfiguringRepostorDataProtectorforPostgreSQL.mdx
index 6e386a32fc9..948bc82ef22 100644
--- a/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/04-ConfiguringRepostorDataProtectorforPostgreSQL.mdx
+++ b/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/04-ConfiguringRepostorDataProtectorforPostgreSQL.mdx
@@ -1,6 +1,6 @@
---
title: 'Configuring'
-description: 'Walkthrough of configuring Repostor Data Protector for PostgreSQL'
+description: 'Walkthrough of configuring the integration'
---
## Prerequisites
diff --git a/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/05-UsingRepostorDataProtectorforPostgreSQL.mdx b/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/05-UsingRepostorDataProtectorforPostgreSQL.mdx
index 45ef071fca7..0f3d8d49404 100644
--- a/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/05-UsingRepostorDataProtectorforPostgreSQL.mdx
+++ b/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/05-UsingRepostorDataProtectorforPostgreSQL.mdx
@@ -1,6 +1,6 @@
---
title: 'Using'
-description: 'Walkthroughs of multiple Repostor Data Protector for PostgreSQL usage scenarios'
+description: 'Walkthrough of example usage scenarios'
---
The current RDP version is a command-line client. You run three commands:
`postgresbackup`, `postgresquery`, and `postgresrestore`. The logwriter and logreader tools are called by PostgreSQL during execution of `archive_command` and `restore_command`.
diff --git a/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/06-CertificationEnvironment.mdx
index dd700ca1c67..78bc46cc22c 100644
--- a/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/06-CertificationEnvironment.mdx
+++ b/advocacy_docs/partner_docs/RepostorDataProtectorforPostgreSQL/06-CertificationEnvironment.mdx
@@ -1,6 +1,6 @@
---
title: 'Certification environment'
-description: 'Overview of the certification environment used for certifying Repostor Data Protector for PostgreSQL'
+description: 'Overview of the certification environment'
redirects:
- /partner_docs/RepostorGuide/06-CertificationEnvironment/
---
diff --git a/advocacy_docs/partner_docs/SIBVisionsVisionX/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/SIBVisionsVisionX/02-PartnerInformation.mdx
index 2f238a1e9fc..be58e3e5eb5 100644
--- a/advocacy_docs/partner_docs/SIBVisionsVisionX/02-PartnerInformation.mdx
+++ b/advocacy_docs/partner_docs/SIBVisionsVisionX/02-PartnerInformation.mdx
@@ -1,6 +1,6 @@
---
title: 'Partner information'
-description: 'Details for SIB Visions VisionX'
+description: 'Details of the partner'
---
| | |
diff --git a/advocacy_docs/partner_docs/SIBVisionsVisionX/04-ConfiguringSIBVisionsVisionX.mdx b/advocacy_docs/partner_docs/SIBVisionsVisionX/04-ConfiguringSIBVisionsVisionX.mdx
index 559f6487c74..9a6e5027b26 100644
--- a/advocacy_docs/partner_docs/SIBVisionsVisionX/04-ConfiguringSIBVisionsVisionX.mdx
+++ b/advocacy_docs/partner_docs/SIBVisionsVisionX/04-ConfiguringSIBVisionsVisionX.mdx
@@ -1,6 +1,6 @@
---
title: 'Configuring'
-description: 'Walkthrough of configuring SIB Visions VisionX'
+description: 'Walkthrough of configuring the integration'
---
Implementing SIB Visions VisionX with EDB Postgres Advanced Server or EDB Postgres Extended Server requires the following components:
diff --git a/advocacy_docs/partner_docs/SIBVisionsVisionX/05-UsingSIBVisionsVisionX.mdx b/advocacy_docs/partner_docs/SIBVisionsVisionX/05-UsingSIBVisionsVisionX.mdx
index 459f669fc14..be6a1dbaed6 100644
--- a/advocacy_docs/partner_docs/SIBVisionsVisionX/05-UsingSIBVisionsVisionX.mdx
+++ b/advocacy_docs/partner_docs/SIBVisionsVisionX/05-UsingSIBVisionsVisionX.mdx
@@ -1,6 +1,6 @@
---
title: 'Using'
-description: 'Walkthroughs of multiple SIB Visions VisionX usage scenarios'
+description: 'Walkthrough of example usage scenarios'
---
These examples walk you though some of the common usage scenarios that can be used with EDB Postgres Advanced Server or EDB Postgres Extended Server. These are examples to help get you started and show how the products can work together.
diff --git a/advocacy_docs/partner_docs/SIBVisionsVisionX/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/SIBVisionsVisionX/06-CertificationEnvironment.mdx
index 1e32fc978a5..3104d19b267 100644
--- a/advocacy_docs/partner_docs/SIBVisionsVisionX/06-CertificationEnvironment.mdx
+++ b/advocacy_docs/partner_docs/SIBVisionsVisionX/06-CertificationEnvironment.mdx
@@ -1,6 +1,6 @@
---
title: 'Certification environment'
-description: 'Overview of the certification environment used for certifying SIB Visions VisionX'
+description: 'Overview of the certification environment'
---
| | |
diff --git a/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/02-PartnerInformation.mdx
index 1432e2264c4..159b187f912 100644
--- a/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/02-PartnerInformation.mdx
+++ b/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/02-PartnerInformation.mdx
@@ -1,6 +1,6 @@
---
title: 'Partner information'
-description: 'Details for Thales CipherTrust Transparent Encryption (CTE)'
+description: 'Details of the partner'
---
diff --git a/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/04-ConfiguringThalesCipherTrustTransparentEncryption.mdx b/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/04-ConfiguringThalesCipherTrustTransparentEncryption.mdx
index bfe650e1e87..d11d30109ce 100644
--- a/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/04-ConfiguringThalesCipherTrustTransparentEncryption.mdx
+++ b/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/04-ConfiguringThalesCipherTrustTransparentEncryption.mdx
@@ -1,6 +1,6 @@
---
title: 'Configuring'
-description: 'Walkthrough of configuring Thales CipherTrust Transparent Encryption (CTE)'
+description: 'Walkthrough of configuring the integration'
---
diff --git a/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/05-UsingThalesCipherTrustTransparentEncryption.mdx b/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/05-UsingThalesCipherTrustTransparentEncryption.mdx
index a61583ac745..0fc6d06ceac 100644
--- a/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/05-UsingThalesCipherTrustTransparentEncryption.mdx
+++ b/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/05-UsingThalesCipherTrustTransparentEncryption.mdx
@@ -1,6 +1,6 @@
---
title: 'Using'
-description: 'Walkthroughs of multiple Thales CipherTrust Transparent Encryption (CTE) usage scenarios'
+description: 'Walkthrough of example usage scenarios'
---
CTE protects data either at the file level or at the storage device level. A CTE agent running on the Postgres host manages the files behind a GuardPoint by enforcing the policy associated with it and communicates data access events to the CipherTrust Manager for logging. A GuardPoint is usually associated with a Linux mount point or a Windows volume but can also be associated with a directory subtree.
diff --git a/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/06-CertificationEnvironment.mdx
index 13966b47a9e..685f65c5d1e 100644
--- a/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/06-CertificationEnvironment.mdx
+++ b/advocacy_docs/partner_docs/ThalesCipherTrustTransparentEncryption/06-CertificationEnvironment.mdx
@@ -1,6 +1,6 @@
---
title: 'Certification environment'
-description: 'Overview of the certification environment used in the certification of Thales CipherTrust Transparent Encryption (CTE)'
+description: 'Overview of the certification environment'
---
| | |
diff --git a/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/02-PartnerInformation.mdx b/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/02-PartnerInformation.mdx
index 81606f06565..3941a3964af 100644
--- a/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/02-PartnerInformation.mdx
+++ b/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/02-PartnerInformation.mdx
@@ -1,13 +1,13 @@
---
title: 'Partner information'
-description: 'Details for Veritas NetBackup for PostgreSQL'
+description: 'Details of the partner'
redirects:
- /partner_docs/VeritasGuide/02-PartnerInformation/
---
| | |
| ----------- | ----------- |
| **Partner name** | Veritas |
-| **Partner Product** | NetBackup for PostgreSQL |
+| **Partner product** | NetBackup for PostgreSQL |
| **Website** | https://www.veritas.com/ |
| **Version & platform** | NetBackup for PostgreSQL 9.1: Linux, Windows |
| **Product description** | Veritas NetBackup gives enterprise IT a simple and powerful way to ensure the integrity and availability of their dataa—from edge to core to cloud. Veritas NetBackup for PostgreSQL Agent extends the capabilities of NetBackup to include backup and restore of PostgreSQL databases. |
diff --git a/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/04-ConfiguringVeritasNetBackupforPostgreSQL.mdx b/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/04-ConfiguringVeritasNetBackupforPostgreSQL.mdx
index b135cc2f49f..1756937df97 100644
--- a/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/04-ConfiguringVeritasNetBackupforPostgreSQL.mdx
+++ b/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/04-ConfiguringVeritasNetBackupforPostgreSQL.mdx
@@ -1,6 +1,6 @@
---
title: 'Configuring'
-description: 'Walkthrough of configuring Veritas NetBackup for PostgreSQL'
+description: 'Walkthrough of configuring the integration'
---
Implementing Veritas NetBackup solution for backup/restore of PostgreSQL databases requires the following components:
diff --git a/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/05-UsingVeritasNetBackupForPostgreSQL.mdx b/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/05-UsingVeritasNetBackupForPostgreSQL.mdx
index 2fbb3b67d4a..d2ea6f6ec77 100644
--- a/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/05-UsingVeritasNetBackupForPostgreSQL.mdx
+++ b/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/05-UsingVeritasNetBackupForPostgreSQL.mdx
@@ -1,6 +1,6 @@
---
title: 'Using'
-description: 'Walkthroughs of multiple Veritas NetBackup for PostgreSQL usage scenarios'
+description: 'Walkthrough of example usage scenarios'
---
Common backup/restore operations for PostgreSQL databases using Veritas NetBackup for PostgreSQL are:
diff --git a/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/06-CertificationEnvironment.mdx b/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/06-CertificationEnvironment.mdx
index 1539573025d..bcc4e553f23 100644
--- a/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/06-CertificationEnvironment.mdx
+++ b/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/06-CertificationEnvironment.mdx
@@ -1,6 +1,6 @@
---
title: 'Certification environment'
-description: 'Overview of the certification environment used in the certification of NetBackup for PostgreSQL'
+description: 'Overview of the certification environment'
---
| | |
diff --git a/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/index.mdx b/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/index.mdx
index d8e90d8b500..50f5645a5eb 100644
--- a/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/index.mdx
+++ b/advocacy_docs/partner_docs/VeritasNetBackupforPostgreSQL/index.mdx
@@ -6,7 +6,7 @@ directoryDefaults:
---
-[Partner Program Logo](Images/EDBPartnerProgram.png)
+![Partner Program Logo](Images/EDBPartnerProgram.png)
diff --git a/advocacy_docs/pg_extensions/advanced_storage_pack/rel_notes/index.mdx b/advocacy_docs/pg_extensions/advanced_storage_pack/rel_notes/index.mdx
index 8dcbf2d9680..1802a3e9ad8 100644
--- a/advocacy_docs/pg_extensions/advanced_storage_pack/rel_notes/index.mdx
+++ b/advocacy_docs/pg_extensions/advanced_storage_pack/rel_notes/index.mdx
@@ -11,7 +11,7 @@ about the release that introduced the feature.
| Version | Release Date |
| --------------------------- | ------------ |
-| [1.0.0](asp_1.0.0_rel_notes) | 2022 Nov 30 |
+| [1.0.0](asp_1.0.0_rel_notes) | 30 Nov 2022 |
diff --git a/advocacy_docs/pg_extensions/ldap_sync/rel_notes/index.mdx b/advocacy_docs/pg_extensions/ldap_sync/rel_notes/index.mdx
index 8d55874c4a2..363bad1651c 100644
--- a/advocacy_docs/pg_extensions/ldap_sync/rel_notes/index.mdx
+++ b/advocacy_docs/pg_extensions/ldap_sync/rel_notes/index.mdx
@@ -11,7 +11,7 @@ about the release that introduced the feature.
| Version | Release Date |
| --------------------------- | ------------ |
-| [1.0.0](ldap_sync_1.0.0_rel_notes) | 2022 Nov 30 |
+| [1.0.0](ldap_sync_1.0.0_rel_notes) | 30 Nov 2022 |
diff --git a/advocacy_docs/pg_extensions/pg_failover_slots/rel_notes/index.mdx b/advocacy_docs/pg_extensions/pg_failover_slots/rel_notes/index.mdx
index 52e15d35198..4d6373b08ca 100644
--- a/advocacy_docs/pg_extensions/pg_failover_slots/rel_notes/index.mdx
+++ b/advocacy_docs/pg_extensions/pg_failover_slots/rel_notes/index.mdx
@@ -11,7 +11,7 @@ about the release that introduced the feature.
| Version | Release Date |
| --------------------------- | ------------ |
-| [1.0.0](pg_failover_slots_1.0.0_rel_notes) | 2023 Mar 31 |
+| [1.0.0](pg_failover_slots_1.0.0_rel_notes) | 31 Mar 2023 |
diff --git a/advocacy_docs/pg_extensions/pg_tuner/rel_notes/index.mdx b/advocacy_docs/pg_extensions/pg_tuner/rel_notes/index.mdx
index 0669deef032..1fcdf91696d 100644
--- a/advocacy_docs/pg_extensions/pg_tuner/rel_notes/index.mdx
+++ b/advocacy_docs/pg_extensions/pg_tuner/rel_notes/index.mdx
@@ -14,8 +14,8 @@ about the release that introduced the feature.
| Version | Release Date |
| --------------------------- | ------------ |
-| [1.1.0](pg_tuner_1.1.0_rel_notes) | 2023 Feb 10 |
-| [1.0.0](pg_tuner_1.0.0_rel_notes) | 2022 Nov 30 |
+| [1.1.0](pg_tuner_1.1.0_rel_notes) | 10 Feb 2023 |
+| [1.0.0](pg_tuner_1.0.0_rel_notes) | 30 Nov 2022 |
diff --git a/advocacy_docs/pg_extensions/query_advisor/rel_notes/index.mdx b/advocacy_docs/pg_extensions/query_advisor/rel_notes/index.mdx
index 8560ac96b8e..2cb1d75b6c1 100644
--- a/advocacy_docs/pg_extensions/query_advisor/rel_notes/index.mdx
+++ b/advocacy_docs/pg_extensions/query_advisor/rel_notes/index.mdx
@@ -11,7 +11,7 @@ about the release that introduced the feature.
| Version | Release Date |
| --------------------------- | ------------ |
-| [1.0.0](query_advisor_1.0.0_rel_notes) | 2023 May 10 |
+| [1.0.0](query_advisor_1.0.0_rel_notes) | 10 May 2023 |
diff --git a/advocacy_docs/security/advisories/cve.mdx.template b/advocacy_docs/security/advisories/cve.mdx.template
index bbd2e96f5c9..bea078ec99c 100644
--- a/advocacy_docs/security/advisories/cve.mdx.template
+++ b/advocacy_docs/security/advisories/cve.mdx.template
@@ -39,7 +39,7 @@ OPTIONAL UPDATE NOTE
## References
-* [https://www.first.org/cvss/calculator/3.1](https://www.first.org/cvss/calculator/3.1)
+* [CVSS Calculator v3.1](https://www.first.org/cvss/calculator/3.1)
* LINKS TO REFERENCES
@@ -55,7 +55,7 @@ Source: SOURCE
## Change history
-DD mmmm YYYY: ACTION
+* DD mmmm YYYY: ACTION
## Disclaimer
diff --git a/advocacy_docs/security/advisories/cve20074639.mdx b/advocacy_docs/security/advisories/cve20074639.mdx
index c6bcadfe967..87c6c3bb631 100644
--- a/advocacy_docs/security/advisories/cve20074639.mdx
+++ b/advocacy_docs/security/advisories/cve20074639.mdx
@@ -1,5 +1,5 @@
---
-title: EDB Advanced Server 8.2 improperly handles debugging function calls
+title: CVE-2007-4639 - EDB Advanced Server 8.2 improperly handles debugging function calls
navTitle: CVE-2007-4639
---
diff --git a/advocacy_docs/security/advisories/cve201910128.mdx b/advocacy_docs/security/advisories/cve201910128.mdx
index c1d8586d38f..1625190c580 100644
--- a/advocacy_docs/security/advisories/cve201910128.mdx
+++ b/advocacy_docs/security/advisories/cve201910128.mdx
@@ -1,5 +1,5 @@
---
-title: EDB supplied PostgreSQL inherits ACL for installation directory
+title: CVE-2019-10128 - EDB supplied PostgreSQL inherits ACL for installation directory
navTitle: CVE-2019-10128
---
diff --git a/advocacy_docs/security/advisories/cve202331043.mdx b/advocacy_docs/security/advisories/cve202331043.mdx
index 5bae8969359..bbdace27441 100644
--- a/advocacy_docs/security/advisories/cve202331043.mdx
+++ b/advocacy_docs/security/advisories/cve202331043.mdx
@@ -1,5 +1,5 @@
---
-title: EDB Postgres Advanced Server (EPAS) logs unredacted passwords prior to 14.6.0
+title: CVE-2023-31043 - EDB Postgres Advanced Server (EPAS) logs unredacted passwords prior to 14.6.0
navTitle: CVE-2023-31043
---
diff --git a/advocacy_docs/security/advisories/cve2023xxxxx3.mdx b/advocacy_docs/security/advisories/cve202341113.mdx
similarity index 59%
rename from advocacy_docs/security/advisories/cve2023xxxxx3.mdx
rename to advocacy_docs/security/advisories/cve202341113.mdx
index 9eeffc826cb..b81a774e300 100644
--- a/advocacy_docs/security/advisories/cve2023xxxxx3.mdx
+++ b/advocacy_docs/security/advisories/cve202341113.mdx
@@ -1,19 +1,19 @@
---
-title: EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory()
-navTitle: CVE-2023-XXXXX-3
+title: CVE-2023-41113 - EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory()
+navTitle: CVE-2023-41113
---
First Published: 2023/08/21
-Last Updated: 2023/08/21
+Last Updated: 2023/08/30
## Summary
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 allow an authenticated user to to obtain information about whether certain files exist on disk, what errors if any occur when attempting to read them, and some limited information about their contents regardless of permissions. This can occur when a superuser has configured one or more directories for filesystem access via CREATE DIRECTORY and adopted certain non-default settings for log_line_prefix and log_connections.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It allows an authenticated user to to obtain information about whether certain files exist on disk, what errors if any occur when attempting to read them, and some limited information about their contents (regardless of permissions). This can occur when a superuser has configured one or more directories for filesystem access via CREATE DIRECTORY and adopted certain non-default settings for log_line_prefix and log_connections.
## Vulnerability details
-CVE-ID: CVE-2023-XXXXX-3
+CVE-ID: CVE-2023-41113
CVSS Base Score: 4.3
@@ -34,18 +34,18 @@ EnterpriseDB Postgres Advanced Server (EPAS)
## Remediation/fixes
-Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
+Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters using edb_sqlpatch. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
| Product | VRMF | Remediation/First Fix |
|---------|------|-----------------------|
-| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/))
-| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) |
-| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/))
-| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/))
-| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/))
+| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/)) and patch existing clusters. |
!!! Note Update
-No updates as of August 21st 2023
+30 Aug 2023 - Added recommendation to Remediation to patch existing clusters
!!!
!!! Warning Warning:
@@ -54,7 +54,7 @@ The patch modifies the definitions of system objects inside the database, some b
## References
-* [https://www.first.org/cvss/calculator/3.1](https://www.first.org/cvss/calculator/3.1)
+* [CVSS Calculator v3.1](https://www.first.org/cvss/calculator/3.1)
## Related information
@@ -70,7 +70,9 @@ EnterpriseDB
## Change history
-21 August 2023: Original Copy Published
+* 21 August 2023: Original Copy Published
+* 28 August 2023: Updated with assigned CVE number
+* 30 August 2023: Updated with Remediation to patch existing clusters
## Disclaimer
diff --git a/advocacy_docs/security/advisories/cve2023xxxxx6.mdx b/advocacy_docs/security/advisories/cve202341114.mdx
similarity index 60%
rename from advocacy_docs/security/advisories/cve2023xxxxx6.mdx
rename to advocacy_docs/security/advisories/cve202341114.mdx
index 4d12fc9d97b..c4dcb6de550 100644
--- a/advocacy_docs/security/advisories/cve2023xxxxx6.mdx
+++ b/advocacy_docs/security/advisories/cve202341114.mdx
@@ -1,19 +1,19 @@
---
-title: EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL
-navTitle: CVE-2023-XXXXX-6
+title: CVE-2023-41114 - EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL
+navTitle: CVE-2023-41114
---
First Published: 2023/08/21
-Last Updated: 2023/08/21
+Last Updated: 2023/08/30
## Summary
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 contain the functions get_url_as_text and get_url_as_bytea. These functions are publicly executable, thus permitting an authenticated user to read any file from the local filesystem or remote system regardless of that user's permissions.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It contains the functions get_url_as_text and get_url_as_bytea that are publicly executable, thus permitting an authenticated user to read any file from the local filesystem or remote system regardless of that user's permissions.
## Vulnerability details
-CVE-ID: CVE-2023-XXXXX-6
+CVE-ID: CVE-2023-41114
CVSS Base Score: 6.5
@@ -34,16 +34,18 @@ EnterpriseDB Postgres Advanced Server (EPAS)
## Remediation/fixes
+Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters using edb_sqlpatch. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
+
| Product | VRMF | Remediation/First Fix |
|---------|------|-----------------------|
-| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/))
-| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) |
-| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/))
-| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/))
-| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/))
+| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/)) and patch existing clusters. |
!!! Note Update
-No updates as of August 21st 2023
+30 Aug 2023 - Added recommendation to Remediation to patch existing clusters
!!!
!!! Warning Warning:
@@ -52,8 +54,7 @@ The patch modifies the definitions of system objects inside the database, some b
## References
-* [https://www.first.org/cvss/calculator/3.1](https://www.first.org/cvss/calculator/3.1)
-
+* [CVSS Calculator v3.1](https://www.first.org/cvss/calculator/3.1)
## Related information
@@ -68,7 +69,9 @@ EnterpriseDB
## Change history
-21 August 2023: Original Copy Published
+* 21 August 2023: Original Copy Published
+* 28 August 2023: Updated with assigned CVE number
+* 30 August 2023: Updated with Remediation to patch existing clusters
## Disclaimer
diff --git a/advocacy_docs/security/advisories/cve2023xxxxx7.mdx b/advocacy_docs/security/advisories/cve202341115.mdx
similarity index 64%
rename from advocacy_docs/security/advisories/cve2023xxxxx7.mdx
rename to advocacy_docs/security/advisories/cve202341115.mdx
index f4be16f2e63..83c847237dd 100644
--- a/advocacy_docs/security/advisories/cve2023xxxxx7.mdx
+++ b/advocacy_docs/security/advisories/cve202341115.mdx
@@ -1,19 +1,19 @@
---
-title: EDB Postgres Advanced Server (EPAS) permission bypass for large objects
-navTitle: CVE-2023-XXXXX-7
+title: CVE-2023-41115 - EDB Postgres Advanced Server (EPAS) permission bypass for large objects
+navTitle: CVE-2023-41115
---
First Published: 2023/08/21
-Last Updated: 2023/08/21
+Last Updated: 2023/08/30
## Summary
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0, using UTL_ENCODE allows an authenticated user to read any large object, regardless of that users permissions.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. When using UTL_ENCODE, an authenticated user can read any large object, regardless of that user's permissions.
## Vulnerability details
-CVE-ID: CVE-2023-XXXXX-7
+CVE-ID: CVE-2023-41115
CVSS Base Score: 6.5
@@ -34,18 +34,18 @@ EnterpriseDB Postgres Advanced Server (EPAS)
## Remediation/fixes
-Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
+Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters using edb_sqlpatch. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
| Product | VRMF | Remediation/First Fix |
|---------|------|-----------------------|
-| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/))
-| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) |
-| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/))
-| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/))
-| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/))
+| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/)) and patch existing clusters. |
!!! Note Update
-No updates as of August 14th 2023
+30 Aug 2023 - Added recommendation to Remediation to patch existing clusters
!!!
!!! Warning Warning:
@@ -54,8 +54,7 @@ The patch modifies the definitions of system objects inside the database, some b
## References
-* [https://www.first.org/cvss/calculator/3.1](https://www.first.org/cvss/calculator/3.1)
-
+* [CVSS Calculator v3.1](https://www.first.org/cvss/calculator/3.1)
## Related information
@@ -70,8 +69,11 @@ EnterpriseDB
## Change history
-21 August 2023: Original Copy Published
+* 21 August 2023: Original Copy Published
+* 28 August 2023: Updated with assigned CVE number
+* 30 August 2023: Updated with Remediation to patch existing clusters
+
## Disclaimer
-This document is provided on an "as is" basis and does not imply any kind of guarantee or warranty, including the warranties of merchantability or fitness for a particular use. Your use of the information on the document is at your own risk. EDB reserves the right to change or update this document at any time. Customers are therefore recommended to always view the latest version of this document.
\ No newline at end of file
+This document is provided on an "as is" basis and does not imply any kind of guarantee or warranty, including the warranties of merchantability or fitness for a particular use. Your use of the information on the document is at your own risk. EDB reserves the right to change or update this document at any time. Customers are therefore recommended to always view the latest version of this document.
diff --git a/advocacy_docs/security/advisories/cve2023xxxxx5.mdx b/advocacy_docs/security/advisories/cve202341116.mdx
similarity index 64%
rename from advocacy_docs/security/advisories/cve2023xxxxx5.mdx
rename to advocacy_docs/security/advisories/cve202341116.mdx
index 66ae38b4c2d..13a342a4114 100644
--- a/advocacy_docs/security/advisories/cve2023xxxxx5.mdx
+++ b/advocacy_docs/security/advisories/cve202341116.mdx
@@ -1,19 +1,22 @@
---
-title: EDB Postgres Advanced Server (EPAS) permission bypass for materialized views
-navTitle: CVE-2023-XXXXX-5
+title: CVE-2023-41116 - EDB Postgres Advanced Server (EPAS) permission bypass for materialized views
+navTitle: CVE-2023-41116
---
First Published: 2023/08/21
-Last Updated: 2023/08/21
+Last Updated: 2023/08/30
## Summary
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) up to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 using DBMS_MVIEW allows an authenticated user to refresh any materialized view, regardless of that user’s permissions.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before
+11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and
+15.x before 15.4.0. It allows an authenticated user to refresh any materialized
+view, regardless of that user's permissions.
## Vulnerability details
-CVE-ID: CVE-2023-XXXXX-5
+CVE-ID: CVE-2023-41116
CVSS Base Score: 4.3
@@ -34,18 +37,18 @@ EnterpriseDB Postgres Advanced Server (EPAS)
## Remediation/fixes
-Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
+Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters using edb_sqlpatch. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
| Product | VRMF | Remediation/First Fix |
|---------|------|-----------------------|
-| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/))
-| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) |
-| EPAS | All versions prior to 13.12.16 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/))
-| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/))
-| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/))
+| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 13.12.16 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/)) and patch existing clusters. |
!!! Note Update
-No updates as of August 21st 2023
+30 Aug 2023 - Added recommendation to Remediation to patch existing clusters
!!!
!!! Warning Warning:
@@ -54,7 +57,7 @@ The patch modifies the definitions of system objects inside the database, some b
## References
-* [https://www.first.org/cvss/calculator/3.1](https://www.first.org/cvss/calculator/3.1)
+* [CVSS Calculator v3.1](https://www.first.org/cvss/calculator/3.1)
## Related information
@@ -70,7 +73,9 @@ EnterpriseDB
## Change history
-21 August 2023: Original Copy Published
+* 21 August 2023: Original Copy Published
+* 28 August 2023: Updated with assigned CVE number
+* 30 August 2023: Updated with Remediation to patch existing clusters
## Disclaimer
diff --git a/advocacy_docs/security/advisories/cve2023xxxxx1.mdx b/advocacy_docs/security/advisories/cve202341117.mdx
similarity index 61%
rename from advocacy_docs/security/advisories/cve2023xxxxx1.mdx
rename to advocacy_docs/security/advisories/cve202341117.mdx
index 869b2456e8b..8fcbaacd774 100644
--- a/advocacy_docs/security/advisories/cve2023xxxxx1.mdx
+++ b/advocacy_docs/security/advisories/cve202341117.mdx
@@ -1,19 +1,19 @@
---
-title: EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path
-navTitle: CVE-2023-XXXXX-1
+title: CVE-2023-41117 - EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path
+navTitle: CVE-2023-41117
---
First Published: 2023/08/21
-Last Updated: 2023/08/21
+Last Updated: 2023/08/30
## Summary
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 contain packages, standalone packages and functions that run SECURITY DEFINER but are inadequately secured against search_path attacks.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It contain packages, standalone packages, and functions that run SECURITY DEFINER but are inadequately secured against search_path attacks.
## Vulnerability details
-CVE-ID: CVE-2023-XXXXX-1
+CVE-ID: CVE-2023-41117
CVSS Base Score: 8.8
@@ -34,16 +34,18 @@ EnterpriseDB Postgres Advanced Server (EPAS)
## Remediation/fixes
+Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters using edb_sqlpatch. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
+
| Product | VRMF | Remediation/First Fix |
|---------|------|-----------------------|
-| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/))
-| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) |
-| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/))
-| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/))
-| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/))
+| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/)) and patch existing clusters. |
!!! Note Update
-No updates as of August 21st 2023
+30 Aug 2023 - Added recommendation to Remediation to patch existing clusters
!!!
!!! Warning Warning:
@@ -52,7 +54,7 @@ The patch modifies the definitions of system objects inside the database, some b
## References
-* [https://www.first.org/cvss/calculator/3.1](https://www.first.org/cvss/calculator/3.1)
+* [CVSS Calculator v3.1](https://www.first.org/cvss/calculator/3.1)
## Related information
@@ -67,7 +69,9 @@ Source: TBD
## Change history
-21 August 2023: Original Copy Published
+* 21 August 2023: Original Copy Published
+* 28 August 2023: Updated with assigned CVE number
+* 30 August 2023: Updated with Remediation to patch existing clusters
## Disclaimer
diff --git a/advocacy_docs/security/advisories/cve2023xxxxx4.mdx b/advocacy_docs/security/advisories/cve202341118.mdx
similarity index 61%
rename from advocacy_docs/security/advisories/cve2023xxxxx4.mdx
rename to advocacy_docs/security/advisories/cve202341118.mdx
index 8df45b62dfa..5953cc41dde 100644
--- a/advocacy_docs/security/advisories/cve2023xxxxx4.mdx
+++ b/advocacy_docs/security/advisories/cve202341118.mdx
@@ -1,19 +1,19 @@
---
-title: EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass
-navTitle: CVE-2023-XXXXX-4
+title: CVE-2023-41118 - EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass
+navTitle: CVE-2023-41118
---
First Published: 2023/08/21
-Last Updated: 2023/08/21
+Last Updated: 2023/08/30
## Summary
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 may allow an authenticated user to bypass authorization requirements and access underlying implementation functions. When a superuser has configured file locations using CREATE DIRECTORY, these functions allow users to take a wide range of actions, including read, write, copy, rename, and delete.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It may allow an authenticated user to bypass authorization requirements and access underlying implementation functions. When a superuser has configured file locations using CREATE DIRECTORY, these functions allow users to take a wide range of actions, including read, write, copy, rename, and delete.
## Vulnerability details
-CVE-ID: CVE-2023-XXXXX-4
+CVE-ID: CVE-2023-41118
CVSS Base Score: 8.8
@@ -34,18 +34,18 @@ EnterpriseDB Postgres Advanced Server (EPAS)
## Remediation/fixes
-Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
+Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters using edb_sqlpatch and patch existing clusters. |. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
| Product | VRMF | Remediation/First Fix |
|---------|------|-----------------------|
-| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/))
-| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) |
-| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/))
-| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/))
-| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/))
+| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/)) and patch existing clusters. |
!!! Note Update
-No updates as of August 21st 2023
+30 Aug 2023 - Added recommendation to Remediation to patch existing clusters
!!!
!!! Warning Warning:
@@ -54,7 +54,7 @@ The patch modifies the definitions of system objects inside the database, some b
## References
-* [https://www.first.org/cvss/calculator/3.1](https://www.first.org/cvss/calculator/3.1)
+* [CVSS Calculator v3.1](https://www.first.org/cvss/calculator/3.1)
## Related information
@@ -70,7 +70,9 @@ EnterpriseDB
## Change history
-21 August 2023: Original Copy Published
+* 21 August 2023: Original Copy Published
+* 28 August 2023: Updated with assigned CVE number
+* 30 August 2023: Updated with Remediation to patch existing clusters
## Disclaimer
diff --git a/advocacy_docs/security/advisories/cve2023xxxxx2.mdx b/advocacy_docs/security/advisories/cve202341119.mdx
similarity index 60%
rename from advocacy_docs/security/advisories/cve2023xxxxx2.mdx
rename to advocacy_docs/security/advisories/cve202341119.mdx
index db7df82c0a1..ca40495a3ec 100644
--- a/advocacy_docs/security/advisories/cve2023xxxxx2.mdx
+++ b/advocacy_docs/security/advisories/cve202341119.mdx
@@ -1,19 +1,19 @@
---
-title: EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser
-navTitle: CVE-2023-XXXXX-2
+title: CVE-2023-41119 - EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser
+navTitle: CVE-2023-41119
---
First Published: 2023/08/21
-Last Updated: 2023/08/21
+Last Updated: 2023/08/30
## Summary
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 contain the function _dbms_aq_move_to_exception_queue which may be used to elevate a user’s privileges to superuser. This function accepts the OID of a table, then accesses that table as the superuser using SELECT and DML commands.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It contains the function _dbms_aq_move_to_exception_queue that may be used to elevate a user's privileges to superuser. This function accepts the OID of a table, and then accesses that table as the superuser by using SELECT and DML commands.
## Vulnerability details
-CVE-ID: CVE-2023-XXXXX-2
+CVE-ID: CVE-2023-41119
CVSS Base Score: 8.8
@@ -34,16 +34,18 @@ EnterpriseDB Postgres Advanced Server (EPAS)
## Remediation/fixes
+Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters using edb_sqlpatch. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
+
| Product | VRMF | Remediation/First Fix |
|---------|------|-----------------------|
-| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/))
-| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) |
-| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/))
-| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/))
-| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/))
+| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/)) and patch existing clusters. |
!!! Note Update
-No updates as of August 21st 2023
+30 Aug 2023 - Added recommendation to Remediation to patch existing clusters
!!!
!!! Warning Warning:
@@ -52,7 +54,7 @@ The patch modifies the definitions of system objects inside the database, some b
## References
-* [https://www.first.org/cvss/calculator/3.1](https://www.first.org/cvss/calculator/3.1)
+* [CVSS Calculator v3.1](https://www.first.org/cvss/calculator/3.1)
## Related information
@@ -68,7 +70,9 @@ EnterpriseDB
## Change history
-21 August 2023: Original Copy Published
+* 21 August 2023: Original Copy Published
+* 28 August 2023: Updated with assigned CVE number
+* 30 August 2023: Updated with Remediation to patch existing clusters
## Disclaimer
diff --git a/advocacy_docs/security/advisories/cve2023xxxxx8.mdx b/advocacy_docs/security/advisories/cve202341120.mdx
similarity index 63%
rename from advocacy_docs/security/advisories/cve2023xxxxx8.mdx
rename to advocacy_docs/security/advisories/cve202341120.mdx
index cc5af3eeada..fb9c0411444 100644
--- a/advocacy_docs/security/advisories/cve2023xxxxx8.mdx
+++ b/advocacy_docs/security/advisories/cve202341120.mdx
@@ -1,19 +1,19 @@
---
-title: EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission
-navTitle: CVE-2023-XXXXX-8
+title: CVE-2023-41120 - EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission
+navTitle: CVE-2023-41120
---
First Published: 2023/08/21
-Last Updated: 2023/08/21
+Last Updated: 2023/08/30
## Summary
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 permit an authenticated user to use DBMS_PROFILER to remove all accumulated profiling data on a system-wide basis, regardless of that user’s permissions.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It permits an authenticated user to use DBMS_PROFILER to remove all accumulated profiling data on a system-wide basis, regardless of that user's permissions.
## Vulnerability details
-CVE-ID: CVE-2023-XXXXX-8
+CVE-ID: CVE-2023-41120
CVSS Base Score: 6.5
@@ -34,18 +34,18 @@ EnterpriseDB Postgres Advanced Server (EPAS)
## Remediation/fixes
-Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
+Impacted users must upgrade to a fixed version of EPAS and then patch existing database instance clusters using edb_sqlpatch. Users running unsupported versions of EPAS should upgrade to receive these updates. For questions about updating, users can contact their account representative or [contact EDB](https://www.enterprisedb.com/contact).
| Product | VRMF | Remediation/First Fix |
|---------|------|-----------------------|
-| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/))
-| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) |
-| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/))
-| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/))
-| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/))
+| EPAS | All versions prior to 11.21.32 | Update to latest supported version (at least [11.21.32](/epas/11/epas_rel_notes/epas11_21_32_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 12.16.20 | Update to latest supported version (at least [12.16.20](/epas/12/epas_rel_notes/epas12_16_20_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 13.12.17 | Update to latest supported version (at least [13.12.17](/epas/13/epas_rel_notes/epas13_12_17_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 14.9.0 | Update to latest supported version (at least [14.9.0](/epas/14/epas_rel_notes/epas14_9_0_rel_notes/)) and patch existing clusters. |
+| EPAS | All versions prior to 15.4.0 | Update to latest supported version (at least [15.4.0](/epas/15/epas_rel_notes/epas15_4_0_rel_notes/)) and patch existing clusters. |
!!! Note Update
-No updates as of August 21st 2023
+30 Aug 2023 - Added recommendation to Remediation to patch existing clusters
!!!
!!! Warning Warning:
@@ -54,7 +54,7 @@ The patch modifies the definitions of system objects inside the database, some b
## References
-* [https://www.first.org/cvss/calculator/3.1](https://www.first.org/cvss/calculator/3.1)
+* [CVSS Calculator v3.1](https://www.first.org/cvss/calculator/3.1)
## Related information
@@ -69,7 +69,9 @@ EnterpriseDB
## Change history
-21 August 2023: Original Copy Published
+* 21 August 2023: Original Copy Published
+* 28 August 2023: Updated with assigned CVE number
+* 30 August 2023: Updated with Remediation to patch existing clusters
## Disclaimer
diff --git a/advocacy_docs/security/advisories/index.mdx b/advocacy_docs/security/advisories/index.mdx
index 36d55754a2e..9501d20e4ff 100644
--- a/advocacy_docs/security/advisories/index.mdx
+++ b/advocacy_docs/security/advisories/index.mdx
@@ -5,158 +5,153 @@ iconName: Security
hideKBLink: true
hideToC: false
navigation:
-- cve2023xxxxx8
-- cve2023xxxxx7
-- cve2023xxxxx6
-- cve2023xxxxx5
-- cve2023xxxxx4
-- cve2023xxxxx3
-- cve2023xxxxx2
-- cve2023xxxxx1
+- cve202341120
+- cve202341119
+- cve202341118
+- cve202341117
+- cve202341116
+- cve202341115
+- cve202341114
+- cve202341113
- cve202331043
- cve201910128
- cve20074639
---
-!!! Note
-Advisories with numbers in the format `CVE-YYYY-XXXXX-n` are submitted and pending full number assignment.
-!!!
-
## Updated 2023
-
EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission
+
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 contain packages, standalone packages and functions that run SECURITY DEFINER but are inadequately secured against search_path attacks.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It permits an authenticated user to use DBMS_PROFILER to remove all accumulated profiling data on a system-wide basis, regardless of that user's permissions.
-Read More...
-
-
EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser
All EnterpriseDB Postgres Advanced Server (EPAS) versions prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 contain the function _dbms_aq_move_to_exception_queue which may be used to elevate a user’s privileges to superuser. This function accepts the OID of a table, then accesses that table as the superuser using SELECT and DML commands.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It contains the function _dbms_aq_move_to_exception_queue that may be used to elevate a user's privileges to superuser. This function accepts the OID of a table, and then accesses that table as the superuser by using SELECT and DML commands.
-Read More...
+Read More...
EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory()
+ Read Advisory
+ Updated: 2023/08/30
+
EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass
-
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
-
+
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 allow an authenticated user to to obtain information about whether certain files exist on disk, what errors if any occur when attempting to read them, and some limited information about their contents regardless of permissions. This can occur when a superuser has configured one or more directories for filesystem access via CREATE DIRECTORY and adopted certain non-default settings for log_line_prefix and log_connections.
-Read More...
-
-
-
-
-
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It may allow an authenticated user to bypass authorization requirements and access underlying implementation functions. When a superuser has configured file locations using CREATE DIRECTORY, these functions allow users to take a wide range of actions, including read, write, copy, rename, and delete.
+
+Read More...
+
EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path
+
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
+Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 may allow an authenticated user to bypass authorization requirements and access underlying implementation functions. When a superuser has configured file locations using CREATE DIRECTORY, these functions allow users to take a wide range of actions, including read, write, copy, rename, and delete.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It contain packages, standalone packages, and functions that run SECURITY DEFINER but are inadequately secured against search_path attacks.
-Read More...
-
EDB Postgres Advanced Server (EPAS) permission bypass for materialized views
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) up to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 using DBMS_MVIEW allows an authenticated user to refresh any materialized view, regardless of that user’s permissions.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It allows an authenticated user to refresh any materialized view, regardless of that user's permissions.
-Read More...
+Read More...
EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL
+ Read Advisory
+ Updated: 2023/08/30
+
EDB Postgres Advanced Server (EPAS) permission bypass for large objects
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) up to 11.21,32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 contain the functions `get_url_as_text` and `get_url_as_bytea`. These functions are publicly executable, thus permitting an authenticated user to read any file from the local filesystem or remote system regardless of that user's permissions.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. When using UTL_ENCODE, an authenticated user can read any large object, regardless of that user's permissions.
-Read More...
+Read More...
EDB Postgres Advanced Server (EPAS) permission bypass for materialized views
+ Read Advisory
+ Updated: 2023/08/30
+
EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0, using UTL_ENCODE allows an authenticated user to read any large object, regardless of that users permissions.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It contains the functions get_url_as_text and get_url_as_bytea that are publicly executable, thus permitting an authenticated user to read any file from the local filesystem or remote system regardless of that user's permissions.
-Read More...
+Read More...
EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission
+ Read Advisory
+ Updated: 2023/08/30
+
EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory()
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 permit an authenticated user to use DBMS_PROFILER to remove all accumulated profiling data on a system-wide basis, regardless of that user’s permissions.
-
-Read More...
-
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It allows an authenticated user to to obtain information about whether certain files exist on disk, what errors if any occur when attempting to read them, and some limited information about their contents (regardless of permissions). This can occur when a superuser has configured one or more directories for filesystem access via CREATE DIRECTORY and adopted certain non-default settings for log_line_prefix and log_connections.
+
+Read More...
+
+
+
CVE-2023-31043
diff --git a/advocacy_docs/security/index.mdx b/advocacy_docs/security/index.mdx
index 1780c405865..4dccdc88fbc 100644
--- a/advocacy_docs/security/index.mdx
+++ b/advocacy_docs/security/index.mdx
@@ -23,143 +23,145 @@ This policy outlines how EnterpriseDB handles disclosures related to suspected v
## Most Recent Advisories
-!!! Note
-Advisories with numbers in the format `CVE-YYYY-XXXXX-n` are submitted and pending full number assignment.
-!!!
-
-
EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission
+
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 contain packages, standalone packages and functions that run SECURITY DEFINER but are inadequately secured against search_path attacks.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It permits an authenticated user to use DBMS_PROFILER to remove all accumulated profiling data on a system-wide basis, regardless of that user's permissions.
-Read More...
-
-
EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser
All EnterpriseDB Postgres Advanced Server (EPAS) versions prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 contain the function _dbms_aq_move_to_exception_queue which may be used to elevate a user’s privileges to superuser. This function accepts the OID of a table, then accesses that table as the superuser using SELECT and DML commands.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It contains the function _dbms_aq_move_to_exception_queue that may be used to elevate a user's privileges to superuser. This function accepts the OID of a table, and then accesses that table as the superuser by using SELECT and DML commands.
-Read More...
+Read More...
EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory()
+ Read Advisory
+ Updated: 2023/08/30
+
EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass
-
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
-
+
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 allow an authenticated user to to obtain information about whether certain files exist on disk, what errors if any occur when attempting to read them, and some limited information about their contents regardless of permissions. This can occur when a superuser has configured one or more directories for filesystem access via CREATE DIRECTORY and adopted certain non-default settings for log_line_prefix and log_connections.
-Read More...
-
-
-
-
-
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It may allow an authenticated user to bypass authorization requirements and access underlying implementation functions. When a superuser has configured file locations using CREATE DIRECTORY, these functions allow users to take a wide range of actions, including read, write, copy, rename, and delete.
+
+Read More...
+
EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path
+
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
+Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 may allow an authenticated user to bypass authorization requirements and access underlying implementation functions. When a superuser has configured file locations using CREATE DIRECTORY, these functions allow users to take a wide range of actions, including read, write, copy, rename, and delete.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It contain packages, standalone packages, and functions that run SECURITY DEFINER but are inadequately secured against search_path attacks.
-Read More...
-
EDB Postgres Advanced Server (EPAS) permission bypass for materialized views
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) up to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 using DBMS_MVIEW allows an authenticated user to refresh any materialized view, regardless of that user’s permissions.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It allows an authenticated user to refresh any materialized view, regardless of that user's permissions.
-Read More...
+Read More...
EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL
+ Read Advisory
+ Updated: 2023/08/30
+
EDB Postgres Advanced Server (EPAS) permission bypass for large objects
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) up to 11.21,32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 contain the functions `get_url_as_text` and `get_url_as_bytea`. These functions are publicly executable, thus permitting an authenticated user to read any file from the local filesystem or remote system regardless of that user's permissions.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. When using UTL_ENCODE, an authenticated user can read any large object, regardless of that user's permissions.
-Read More...
+Read More...
EDB Postgres Advanced Server (EPAS) permission bypass for materialized views
+ Read Advisory
+ Updated: 2023/08/30
+
EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0, using UTL_ENCODE allows an authenticated user to read any large object, regardless of that users permissions.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It contains the functions get_url_as_text and get_url_as_bytea that are publicly executable, thus permitting an authenticated user to read any file from the local filesystem or remote system regardless of that user's permissions.
-Read More...
+Read More...
EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission
+ Read Advisory
+ Updated: 2023/08/30
+
EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory()
All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0, 15.4.0
Summary:
-All versions of EnterpriseDB Postgres Advanced Server (EPAS) prior to 11.21.32, 12.16.20, 13.12.17, 14.9.0 and 15.4.0 permit an authenticated user to use DBMS_PROFILER to remove all accumulated profiling data on a system-wide basis, regardless of that user’s permissions.
+An issue was discovered in EnterpriseDB Postgres Advanced Server (EPAS) before 11.21.32, 12.x before 12.16.20, 13.x before 13.12.16, 14.x before 14.9.0, and 15.x before 15.4.0. It allows an authenticated user to to obtain information about whether certain files exist on disk, what errors if any occur when attempting to read them, and some limited information about their contents (regardless of permissions). This can occur when a superuser has configured one or more directories for filesystem access via CREATE DIRECTORY and adopted certain non-default settings for log_line_prefix and log_connections.
-Read More...
-
diff --git a/advocacy_docs/supported-open-source/pglogical2/configuration-options.mdx b/advocacy_docs/supported-open-source/pglogical2/configuration-options.mdx
index 50f01f1150d..5e388125e55 100644
--- a/advocacy_docs/supported-open-source/pglogical2/configuration-options.mdx
+++ b/advocacy_docs/supported-open-source/pglogical2/configuration-options.mdx
@@ -1,11 +1,9 @@
---
title: Configuration options
product: pglogical 2
-generatedBy: >-
- /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from
- originalFilePath
+generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath
originalFilePath: >-
- https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L687-#L767
+ https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L691-#L771
---
diff --git a/advocacy_docs/supported-open-source/pglogical2/conflicts.mdx b/advocacy_docs/supported-open-source/pglogical2/conflicts.mdx
index ef7ae3b31da..f012be24440 100644
--- a/advocacy_docs/supported-open-source/pglogical2/conflicts.mdx
+++ b/advocacy_docs/supported-open-source/pglogical2/conflicts.mdx
@@ -1,11 +1,9 @@
---
title: Conflicts
product: pglogical 2
-generatedBy: >-
- /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from
- originalFilePath
+generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath
originalFilePath: >-
- https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L673-#L685
+ https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L677-#L689
---
diff --git a/advocacy_docs/supported-open-source/pglogical2/index.mdx b/advocacy_docs/supported-open-source/pglogical2/index.mdx
index c2b060d0c62..041c03c0cac 100644
--- a/advocacy_docs/supported-open-source/pglogical2/index.mdx
+++ b/advocacy_docs/supported-open-source/pglogical2/index.mdx
@@ -1,9 +1,7 @@
---
title: pglogical 2
product: pglogical 2
-generatedBy: >-
- /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from
- originalFilePath
+generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath
navigation:
- index
- release-notes
diff --git a/advocacy_docs/supported-open-source/pglogical2/installation.mdx b/advocacy_docs/supported-open-source/pglogical2/installation.mdx
index 8285d463737..36ab4a7dfc3 100644
--- a/advocacy_docs/supported-open-source/pglogical2/installation.mdx
+++ b/advocacy_docs/supported-open-source/pglogical2/installation.mdx
@@ -1,11 +1,9 @@
---
title: Installation
product: pglogical 2
-generatedBy: >-
- /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from
- originalFilePath
+generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath
originalFilePath: >-
- https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L55-#L160
+ https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L55-#L164
---
@@ -36,6 +34,7 @@ If you don’t have PostgreSQL already:
- PostgreSQL 12: `yum install postgresql12-server postgresql12-contrib`
- PostgreSQL 13: `yum install postgresql13-server postgresql13-contrib`
- PostgreSQL 14: `yum install postgresql14-server postgresql14-contrib`
+ - PostgreSQL 15: `yum install postgresql15-server postgresql15-contrib`
Then install the “2ndQuadrant’s General Public” repository for your PostgreSQL
version, by running the following instructions as root on the destination Linux server:
@@ -48,6 +47,7 @@ version, by running the following instructions as root on the destination Linux
- PostgreSQL 12: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/12/rpm | bash`
- PostgreSQL 13: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/13/rpm | bash`
- PostgreSQL 14: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/14/rpm | bash`
+- PostgreSQL 15: `curl https://techsupport.enterprisedb.com/api/repository/dl/default/release/15/rpm | bash`
#### Installation
@@ -61,6 +61,7 @@ Once the repository is installed, you can proceed to pglogical for your PostgreS
- PostgreSQL 12: `yum install postgresql12-pglogical`
- PostgreSQL 13: `yum install postgresql13-pglogical`
- PostgreSQL 14: `yum install postgresql14-pglogical`
+- PostgreSQL 15: `yum install postgresql15-pglogical`
You may be prompted to accept the repository GPG key for package signing:
@@ -95,6 +96,7 @@ Once pre-requisites are complete, installing pglogical is simply a matter of exe
- PostgreSQL 12: `sudo apt-get install postgresql-12-pglogical`
- PostgreSQL 13: `sudo apt-get install postgresql-13-pglogical`
- PostgreSQL 14: `sudo apt-get install postgresql-14-pglogical`
+- PostgreSQL 15: `sudo apt-get install postgresql-15-pglogical`
## From source code
diff --git a/advocacy_docs/supported-open-source/pglogical2/limitations-and-restrictions.mdx b/advocacy_docs/supported-open-source/pglogical2/limitations-and-restrictions.mdx
index 425f6c98b6a..9c2cb3e8a4a 100644
--- a/advocacy_docs/supported-open-source/pglogical2/limitations-and-restrictions.mdx
+++ b/advocacy_docs/supported-open-source/pglogical2/limitations-and-restrictions.mdx
@@ -1,11 +1,9 @@
---
title: Limitations and restrictions
product: pglogical 2
-generatedBy: >-
- /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from
- originalFilePath
+generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath
originalFilePath: >-
- https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L769-#L938
+ https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L773-#L942
---
diff --git a/advocacy_docs/supported-open-source/pglogical2/release-notes.mdx b/advocacy_docs/supported-open-source/pglogical2/release-notes.mdx
index c9693084d36..e092f6379f7 100644
--- a/advocacy_docs/supported-open-source/pglogical2/release-notes.mdx
+++ b/advocacy_docs/supported-open-source/pglogical2/release-notes.mdx
@@ -1,14 +1,34 @@
---
title: Release Notes
product: pglogical 2
-generatedBy: >-
- /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from
- originalFilePath
+generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath
originalFilePath: >-
- https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L953-#L993
+ https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L957-#L1019
---
+## pglogical 2.4.3
+
+Version 2.4.3 is a maintenance release of pglogical 2.
+
+### Changes
+
+- Apply data filtering on the correct tuple during initial synchronization.
+
+- Restore the correct memory context while decoding a change.
+
+- Drop database never completes in PostgreSQL 15.
+
+- Don't replicate TRUNCATE as global message.
+
+## pglogical 2.4.2
+
+Version 2.4.2 is a maintenance release of pglogical 2.
+
+### Changes
+
+- Add support for PostgreSQL 15.
+
## pglogical 2.4.1
Version 2.4.1 is a maintenance release of pglogical 2.
diff --git a/advocacy_docs/supported-open-source/pglogical2/requirements.mdx b/advocacy_docs/supported-open-source/pglogical2/requirements.mdx
index 7803bce0b52..96748d21726 100644
--- a/advocacy_docs/supported-open-source/pglogical2/requirements.mdx
+++ b/advocacy_docs/supported-open-source/pglogical2/requirements.mdx
@@ -1,9 +1,7 @@
---
title: Requirements
product: pglogical 2
-generatedBy: >-
- /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from
- originalFilePath
+generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath
originalFilePath: >-
https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L36-#L53
@@ -24,4 +22,4 @@ be the same or weaker (more permissive) on the subscriber than the provider.
Tables must have the same `PRIMARY KEY`s. It is not recommended to add additional
`UNIQUE` constraints other than the `PRIMARY KEY` (see below).
-Some additional requirements are covered in [Limitations and Restrictions](limitations-and-restrictions.mdx).
\ No newline at end of file
+Some additional requirements are covered in [Limitations and Restrictions](limitations-and-restrictions).
diff --git a/advocacy_docs/supported-open-source/pglogical2/synchronous-replication.mdx b/advocacy_docs/supported-open-source/pglogical2/synchronous-replication.mdx
index e92e268f23b..61f096a3109 100644
--- a/advocacy_docs/supported-open-source/pglogical2/synchronous-replication.mdx
+++ b/advocacy_docs/supported-open-source/pglogical2/synchronous-replication.mdx
@@ -1,11 +1,9 @@
---
title: Synchronous Replication
product: pglogical 2
-generatedBy: >-
- /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from
- originalFilePath
+generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath
originalFilePath: >-
- https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L663-#L671
+ https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L667-#L675
---
diff --git a/advocacy_docs/supported-open-source/pglogical2/usage.mdx b/advocacy_docs/supported-open-source/pglogical2/usage.mdx
index 473181e9c5b..dce68454c54 100644
--- a/advocacy_docs/supported-open-source/pglogical2/usage.mdx
+++ b/advocacy_docs/supported-open-source/pglogical2/usage.mdx
@@ -1,11 +1,9 @@
---
title: Usage
product: pglogical 2
-generatedBy: >-
- /workspaces/docs/scripts/source/pglogical2.js - re-run to regenerate from
- originalFilePath
+generatedBy: scripts/source/pglogical2.js - re-run to regenerate from originalFilePath
originalFilePath: >-
- https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L162-#L661
+ https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1#L166-#L665
---
diff --git a/docs/agreements/release-notes-guidelines.md b/docs/agreements/release-notes-guidelines.md
index 73bedfef8f1..9a8d80482f4 100644
--- a/docs/agreements/release-notes-guidelines.md
+++ b/docs/agreements/release-notes-guidelines.md
@@ -1,6 +1,28 @@
-# Guidelines for wording release notes
+# Guidelines for release notes
-## General guidelines
+## Structural guidelines
+
+Release notes start with the name of the product, the version number being released and the words "release notes".
+This should be the setting for the frontmatter title field. The frontmatter will also need a short navigation title
+(navTitle). This should just be the version number, preceded by the word "Version".
+
+This is followed, in the body of the content, by a date of release in the `Released: DD mmm YYYY` format:
+
+```
+---
+title: EDB Postgres Advanced Server 64.0 release notes
+navTitle: Version 64.0
+---
+
+Released: 1 Jan 2024
+
+```
+
+If the release notes are substantially updated after release, add an `Updated: DD mmm YYYY` line, or update any existing `Updated:` line.
+
+## Guidelines for wording release notes
+
+### General guidelines
- For features and enhancements, use second person (you) instead of third person (the user).
- For bug fixes, avoid referring to the user. Instead, describe the software behavior. If the description is clearer by referring to the user, use third person.
@@ -10,11 +32,11 @@
- Use contractions.
- Use serial (aka Oxford) commas.
-## Phrasing guidelines for enhancements
+### Phrasing guidelines for enhancements
You can use either of the forms described in the following sections for enhancements. Add as much detail as needed to convey the relevance of the enhancement. If you need to describe how the product worked before, use “previously,” not “currently.”
-### Past tense description of the development work
+#### Past tense description of the development work
Start with a past-tense word that describes what you did. Then provide any relevant detail.
@@ -24,7 +46,7 @@ Examples:
- Added the ability to copy probes and alerts to all servers in a group without having to select them individually.
- Enabled multi-insert support for the dynamic partition for EDB*Loader and COPY command.
-### “Now” phrase
+#### “Now” phrase
Describe what the product does now as a result of the enhancement, generally in the form: The *product/feature* now *does the new behavior*.
@@ -32,7 +54,7 @@ Examples:
- EDB Postgres Advanced Server now provides INDEX and NO_INDEX hints for the partitioned table. The optimizer hints apply to the inherited index in the partitioned table. The execution plan internally expands to include the corresponding inherited child indexes and applies them in later processing.
- The INTO clause now accepts multiple composite row type targets in SPL. This enhancement allows you to assign a SELECT list having a mix of scalar and composite type values that are fetched from a table to corresponding scalar or composite variables (including collection variables) in the SPL code.
-### Enhancement don’ts
+#### Enhancement don’ts
Don’t start the enhancement with a gerund, which suggests the work is still in progress:
- **Correct**: Added the XYZ function.
@@ -44,7 +66,7 @@ Don’t use a title or short description:
- **Correct**: You can now configure the durability options, such as Group Commit, CAMO, Eager Replication, and Lag Control, through Commit Scope.
- **Incorrect**: Unified replication durability configuration - The durability options such as Group Commit, CAMO, Eager Replication, or Lag Control are now all configured through Commit Scope configuration.
-## Bug fix writing guidelines
+### Bug fix writing guidelines
Start bug fix descriptions with:
- Fixed an issue whereby…
diff --git a/gatsby-node.js b/gatsby-node.js
index 23095d76204..edc5035c1d3 100644
--- a/gatsby-node.js
+++ b/gatsby-node.js
@@ -3,7 +3,6 @@ const realFs = require("fs");
const path = require("path");
const gracefulFs = require("graceful-fs");
gracefulFs.gracefulify(realFs);
-
const { createFilePath } = require(`gatsby-source-filesystem`);
const { exec, execSync } = require("child_process");
@@ -22,6 +21,7 @@ const {
findPrevNextNavNodes,
preprocessPathsAndRedirects,
configureRedirects,
+ reportRedirectCollisions,
configureLegacyRedirects,
readFile,
writeFile,
@@ -53,7 +53,7 @@ const gitData = (() => {
.replace(/^refs\/tags\//, "");
sha = sha.trim();
- return { branch, sha };
+ return { branch, sha, docsRepoUrl: "https://github.com/EnterpriseDB/docs" };
})();
exports.onCreateNode = async ({
@@ -292,34 +292,14 @@ exports.createPages = async ({ actions, graphql, reporter }) => {
// determine next and previous nodes
const prevNext = findPrevNextNavNodes(navTree, curr);
- const { docType } = node.fields;
-
- const isLatest =
- docType === "doc"
- ? productVersions[node.fields.product][0] === node.fields.version
- : false;
-
- // all versions for this path.
- // Null entries for versions that don't exist. Will try to match redirects to avoid this, but won't follow redirect chains
- // Canonical version is the first non-null in the list, e.g. pathVersions.filter((p) => !!p)[0]
- const allPaths = [node.fields.path, ...(node.frontmatter?.redirects || [])];
- const pathVersions = (productVersions[node.fields.product] || []).map(
- (v, i) => {
- const versionPaths = allPaths.map((p) => replacePathVersion(p, v));
- const match = versionPaths.find((vp) => validPaths.has(vp));
- if (!match) return null;
- return i === 0 ? replacePathVersion(match) : match;
- },
- );
-
- configureRedirects(
- node.fields.path,
- node.frontmatter.redirects,
+ const pathVersions = configureRedirects(
+ productVersions,
+ node,
+ validPaths,
actions,
- isLatest,
- pathVersions,
);
+ const { docType } = node.fields;
if (docType === "doc") {
createDoc(
navTree,
@@ -330,9 +310,10 @@ exports.createPages = async ({ actions, graphql, reporter }) => {
actions,
);
} else if (docType === "advocacy") {
- createAdvocacy(navTree, prevNext, node, learn, actions);
+ createAdvocacy(navTree, prevNext, node, productVersions, learn, actions);
}
}
+ reportRedirectCollisions(validPaths, reporter);
};
const createDoc = (
@@ -358,24 +339,8 @@ const createDoc = (
});
}
- const isIndexPage = isPathAnIndexPage(doc.fileAbsolutePath);
- const docsRepoUrl = "https://github.com/EnterpriseDB/docs";
- // don't encourage folks to edit on main - set the edit links to develop in production builds
- const branch = gitData.branch === "main" ? "develop" : gitData.branch;
- const fileUrlSegment =
- removeTrailingSlash(doc.fields.path) +
- (isIndexPage ? "/index.mdx" : ".mdx");
- const githubFileLink = `${docsRepoUrl}/blob/${gitData.sha}/product_docs/docs${fileUrlSegment}`;
- const githubFileHistoryLink = `${docsRepoUrl}/commits/${gitData.sha}/product_docs/docs${fileUrlSegment}`;
- const githubEditLink = `${docsRepoUrl}/edit/${branch}/product_docs/docs${fileUrlSegment}`;
- const githubIssuesLink = `${docsRepoUrl}/issues/new?title=${encodeURIComponent(
- `Feedback on ${doc.fields.product} ${doc.fields.version} - "${doc.frontmatter.title}"`,
- )}&context=${encodeURIComponent(
- `${githubFileLink}\n`,
- )}&template=problem-with-topic.yaml`;
const template = doc.frontmatter.productStub ? "doc-stub.js" : "doc.js";
const path = isLatest ? replacePathVersion(doc.fields.path) : doc.fields.path;
- const deepToC = doc.frontmatter.deepToC != true ? false : true;
actions.createPage({
path: path,
@@ -385,12 +350,9 @@ const createDoc = (
pagePath: path,
navTree,
prevNext,
+ productVersions,
versions: productVersions[doc.fields.product],
nodeId: doc.id,
- githubFileLink: githubFileHistoryLink,
- githubEditLink: githubEditLink,
- githubIssuesLink: githubIssuesLink,
- isIndexPage: isIndexPage,
pathVersions,
},
});
@@ -418,7 +380,14 @@ const createDoc = (
});
};
-const createAdvocacy = (navTree, prevNext, doc, learn, actions) => {
+const createAdvocacy = (
+ navTree,
+ prevNext,
+ doc,
+ productVersions,
+ learn,
+ actions,
+) => {
// configure legacy redirects
configureLegacyRedirects({
toPath: doc.fields.path,
@@ -433,22 +402,6 @@ const createAdvocacy = (navTree, prevNext, doc, learn, actions) => {
(node) => node.fields.topic === doc.fields.topic,
);
- const advocacyDocsRepoUrl = "https://github.com/EnterpriseDB/docs";
- // don't encourage folks to edit on main - set the edit links to develop in production builds
- const branch = gitData.branch === "main" ? "develop" : gitData.branch;
- const isIndexPage = isPathAnIndexPage(doc.fileAbsolutePath);
- const fileUrlSegment =
- removeTrailingSlash(doc.fields.path) +
- (isIndexPage ? "/index.mdx" : ".mdx");
- const githubFileLink = `${advocacyDocsRepoUrl}/blob/${gitData.sha}/advocacy_docs${fileUrlSegment}`;
- const githubFileHistoryLink = `${advocacyDocsRepoUrl}/commits/${gitData.sha}/advocacy_docs${fileUrlSegment}`;
- const githubEditLink = `${advocacyDocsRepoUrl}/edit/${branch}/advocacy_docs${fileUrlSegment}`;
- const githubIssuesLink = `${advocacyDocsRepoUrl}/issues/new?title=${encodeURIComponent(
- `Regarding "${doc.frontmatter.title}"`,
- )}&context=${encodeURIComponent(
- `${githubFileLink}\n`,
- )}&template=problem-with-topic.yaml`;
-
actions.createPage({
path: doc.fields.path,
component: require.resolve("./src/templates/learn-doc.js"),
@@ -458,11 +411,8 @@ const createAdvocacy = (navTree, prevNext, doc, learn, actions) => {
pagePath: doc.fields.path,
navLinks: navLinks,
prevNext,
+ productVersions,
navTree,
- githubFileLink: githubFileHistoryLink,
- githubEditLink: githubEditLink,
- githubIssuesLink: githubIssuesLink,
- isIndexPage: isIndexPage,
},
});
@@ -629,6 +579,34 @@ exports.onPostBuild = async ({ graphql, reporter, pathPrefix }) => {
path.join(__dirname, "/public/netlify.toml"),
);
+ //
+ // get rid of compilation hash - speeds up netlify deploys
+ //
+ const { globby } = await import("globby");
+ const generatedHTML = await globby([
+ path.join(__dirname, "/public/**/*.html"),
+ ]);
+ for (let filename of generatedHTML) {
+ let file = await readFile(filename);
+ file = file.replace(
+ /window\.___webpackCompilationHash="[^"]+"/,
+ 'window.___webpackCompilationHash=""',
+ );
+ await writeFile(filename, file);
+ }
+ const appDataFilename = path.join(
+ __dirname,
+ "/public/page-data/app-data.json",
+ );
+ const appData = await readFile(appDataFilename);
+ await writeFile(
+ appDataFilename,
+ appData.replace(
+ /"webpackCompilationHash":"[^"]+"/,
+ '"webpackCompilationHash":""',
+ ),
+ );
+
//
// additional headers
//
diff --git a/install_template/templates/products/edb-odbc-connector/index.njk b/install_template/templates/products/edb-odbc-connector/index.njk
index dabf910bef1..f741d70e5bf 100644
--- a/install_template/templates/products/edb-odbc-connector/index.njk
+++ b/install_template/templates/products/edb-odbc-connector/index.njk
@@ -12,7 +12,6 @@ redirects:
- ../03_edb-odbc_overview/01_installing_edb-odbc
- /odbc_connector/{{ product.version }}/03_installing_edb_odbc/
- /odbc_connector/{{ product.version }}/03_installing_edb_odbc/01_installing_linux/
- - /odbc_connector/{{ product.version }}/03_installing_edb_odbc/14_installing_windows/
- /odbc_connector/{{ product.version }}/03_installing_edb_odbc/01_installing_linux/07_odbc13_ubuntu20_deb10_x86/
- /odbc_connector/{{ product.version }}/03_installing_edb_odbc/01_installing_linux/ibm_power_ppc64le/12_odbc13_sles12_ppcle/
{% endblock frontmatter %}
diff --git a/install_template/templates/products/edb-pgbouncer/base.njk b/install_template/templates/products/edb-pgbouncer/base.njk
index 22565551ded..a940018ecc3 100644
--- a/install_template/templates/products/edb-pgbouncer/base.njk
+++ b/install_template/templates/products/edb-pgbouncer/base.njk
@@ -11,8 +11,6 @@
deployPath: pgbouncer/{{ product.version }}/installing/linux_{{platform.arch}}/pgbouncer_{{deploy.map_platform[platform.name]}}.mdx
redirects:
- /pgbouncer/{{ product.version }}/01_installation/install_on_linux/{{deploy.expand_arch[platform.arch]}}/pgbouncer_{{deploy.map_platform_old[platform.name]}}_{{platform.arch | replace(r/_?64/g, "")}}.mdx
- - /pgbouncer/{{ product.version }}/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/{{ product.version }}/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
{% endblock frontmatter %}
diff --git a/install_template/templates/products/edb-pgbouncer/index.njk b/install_template/templates/products/edb-pgbouncer/index.njk
index 77c2b3fd9e5..0e97a334961 100644
--- a/install_template/templates/products/edb-pgbouncer/index.njk
+++ b/install_template/templates/products/edb-pgbouncer/index.njk
@@ -12,6 +12,8 @@ redirects:
- ../03_installing_pgbouncer_on_an_sles_host
- ../01_installation
- ../01_installation/install_on_linux/
+ - /pgbouncer/{{ product.version }}/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
+ - /pgbouncer/{{ product.version }}/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
{% endblock frontmatter %}
{% block navigation %}
- linux_x86_64
diff --git a/install_template/templates/products/failover-manager/index.njk b/install_template/templates/products/failover-manager/index.njk
index 5c77253f707..4d64de47777 100644
--- a/install_template/templates/products/failover-manager/index.njk
+++ b/install_template/templates/products/failover-manager/index.njk
@@ -7,7 +7,6 @@ redirects:
- ../efm_user/03_installing_efm
- 13_initial_config
- /efm/4/03_installing_efm/
- - /efm/4.0/efm_user/02_failover_manager_overview/01_prerequisites/
- /efm/4/03_installing_efm/x86_amd64/08_efm4_deb9_x86/
- /efm/4/03_installing_efm/02_efm4_other_linux8_x86/
- /efm/4/03_installing_efm/04_efm4_centos7_x86/
diff --git a/install_template/templates/products/postgres-enterprise-manager-server/base.njk b/install_template/templates/products/postgres-enterprise-manager-server/base.njk
index 1139b0a8bbc..dc748eda2e5 100644
--- a/install_template/templates/products/postgres-enterprise-manager-server/base.njk
+++ b/install_template/templates/products/postgres-enterprise-manager-server/base.njk
@@ -14,8 +14,6 @@ redirects:
- /pem/{{ product.version }}/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_{{deploy.map_platform_old[platform.name]}}_{{platform.arch | replace(r/_?64/g, "")}}/
- /pem/{{ product.version }}/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_{{deploy.map_platform_old[platform.name]}}_{{platform.arch | replace(r/_?64/g, "")}}/
- /pem/{{ product.version }}/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_{{deploy.map_platform_old[platform.name]}}_{{platform.arch | replace(r/_?64/g, "")}}/
- - /pem/{{ product.version }}/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/{{ product.version }}/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
{% endblock frontmatter %}
diff --git a/install_template/templates/products/postgres-enterprise-manager-server/index.njk b/install_template/templates/products/postgres-enterprise-manager-server/index.njk
index 10fd2405331..9449cb3fd1d 100644
--- a/install_template/templates/products/postgres-enterprise-manager-server/index.njk
+++ b/install_template/templates/products/postgres-enterprise-manager-server/index.njk
@@ -12,10 +12,8 @@ redirects:
- /pem/{{ product.version if product.version < 9 else "latest" }}/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/
- /pem/{{ product.version if product.version < 9 else "latest" }}/installing_pem_server/
- /pem/{{ product.version if product.version < 9 else "latest" }}/installing_pem_server/pem_server_inst_linux/
-- /pem/{{ product.version if product.version < 9 else "latest" }}/installing_pem_server/pem_server_inst_linux/
-- /pem/{{ product.version if product.version < 9 else "latest" }}/installing_pem_server/pem_server_inst_linux/configuring_the_pem_server_on_linux/
-- /pem/{{ product.version if product.version < 9 else "latest" }}/installing_pem_server/pem_server_inst_windows/
-- /pem/{{ product.version if product.version < 9 else "latest" }}/installing_pem_server/prerequisites_for_installing_pem_server/
+- /pem/{{ product.version }}/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
+- /pem/{{ product.version }}/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
{% endblock frontmatter %}
{% block navigation %}
diff --git a/product_docs/docs/biganimal/release/free_trial/detail/experiment/import_data.mdx b/product_docs/docs/biganimal/release/free_trial/detail/experiment/import_data.mdx
index fbc00dbd960..fea2f96b028 100644
--- a/product_docs/docs/biganimal/release/free_trial/detail/experiment/import_data.mdx
+++ b/product_docs/docs/biganimal/release/free_trial/detail/experiment/import_data.mdx
@@ -7,7 +7,7 @@ PostgreSQL includes a variety of ways to import data. Here, we'll show how to im
For this demonstration, we're going to import batter data from the [Baseball Databank](https://github.com/chadwickbureau/baseballdatabank), which is in CSV form. While it's easy to import the data using [PostgreSQL's COPY command](https://www.postgresql.org/docs/current/sql-copy.html), we'll need to first define a table to put that data into.
-We're going to [add a database](/biganimal/latest/using_cluster/01_postgres_access/#one-database-with-one-application) called "baseball," which we'll populate with some Major League Baseball statistics.
+We're going to add a database called "baseball," which we'll populate with some Major League Baseball statistics.
```sql
create database baseball;
diff --git a/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/creating_an_eha_cluster.mdx b/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/creating_an_eha_cluster.mdx
index fc55060c4d4..d11fe0dd5a3 100644
--- a/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/creating_an_eha_cluster.mdx
+++ b/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/creating_an_eha_cluster.mdx
@@ -8,19 +8,22 @@ When you create a distributed high-availability cluster, you need to set up the
1. On the **Nodes Settings** tab, in the **Nodes** section, select **Two Data Nodes** or **Three Data Nodes**.
- For more information on node architecture, see [Distributed high availability (Preview)](/biganimal/latest/overview/02_high_availability/#distributed-high-availability-preview).
+ For more information on node architecture, see [Distributed high availability](/biganimal/latest/overview/02_high_availability/#distributed-high-availability).
1. In the **Database Type** section:
- 1. Select the type of Postgres you want to use in the **Postgres Type** field:
+ 1. Select the type of Postgres you want to use in the **Postgres Type** field:
- **[EDB Postgres Advanced Server](/epas/latest/)** is EDB's Oracle-compatible database offering. View [a quick demonstration of Oracle compatibility on BigAnimal](../../using_cluster/06_demonstration_oracle_compatibility).
- - **[EDB Postgres Extended Server](/pge/latest/)** is EDB's advanced logical replication, PostgreSQL-compatible database offering.
-
- 1. In the **Postgres Version** list, select either 14 or 15 as the version of Postgres that you want to use.
+ - **[EDB Postgres Extended Server](/pge/latest/)** is EDB's advanced logical replication, PostgreSQL-compatible database offering.
+
+ 1. In the **Postgres Version** list, select 14 or 15 as the version of Postgres that you want to use.
-1. Select the settings for your cluster according to [Creating a cluster](/biganimal/latest/getting_started/creating_a_cluster/). Find the instructions for the **Node Settings** tab in [Cluster Settings tab](../creating_a_cluster/#cluster-settings-tab) and [Additional Settings tab](../creating_a_cluster/#additional-settings-tab).
+1. Select the settings for your cluster according to [Creating a cluster](/biganimal/latest/getting_started/creating_a_cluster/). Find the instructions for the **Node Settings** tab in the [Cluster Settings tab](../creating_a_cluster/#cluster-settings-tab) and [Additional Settings tab](../creating_a_cluster/#additional-settings-tab).
+
+ !!!tip
+ When choosing your storage options, for most workloads, consider using at least 20GB of storage.
1. In the **Parameters** section on the **DB Configuration** tab, you can update the value of the database configuration parameters for the data group as needed.
@@ -36,7 +39,7 @@ After creating the first data group, you can create a second data group for your
By default, the settings for your first data group populate the second data group's settings. However, if you want to change certain settings you can. Just know that your changes can change the settings for the entire cluster. That being said, the database type and cloud provider must be consistent across both data groups. The data groups and the witness group must all be in different regions. Otherwise, you can choose the second data group's settings as needed.
-When choosing the number of data nodes for the second data group, see [Distributed high availability (Preview)](/biganimal/latest/overview/02_high_availability/#distributed-high-availability-preview) for information on node architecture.
+When choosing the number of data nodes for the second data group, see [Distributed high availability](/biganimal/latest/overview/02_high_availability/#distributed-high-availability) for information on node architecture.
!!! Note
To maintain high availability, BigAnimal doesn't allow the maintenance windows of data groups to overlap.
\ No newline at end of file
diff --git a/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/index.mdx b/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/index.mdx
index 28f72719af7..931630f5e0f 100644
--- a/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/index.mdx
+++ b/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/index.mdx
@@ -16,7 +16,6 @@ Before creating your cluster, make sure you have enough resources. Without enoug
!!!note "When using BigAnimal's cloud"
The following options aren't available when creating your cluster:
-- Distributed High Availability for cluster type
- AWS IAM authentication
- Superuser access
- PgBouncer
@@ -44,7 +43,7 @@ The following options aren't available when creating your cluster:
- [Primary/Standby High Availability](/biganimal/latest/overview/02_high_availability/#primarystandby-high-availability) creates a cluster with one primary and one or two standby replicas in different availability zones. You can create primary/standby high-availability clusters running PostgreSQL or EDB Postgres Advanced Server. Only primary/standby high-availability clusters allow you to enable read-only workloads for users. However, if you enable read-only workloads, then you might have to raise the IP address resource limits for the cluster.
- - [Distributed High Availability (Preview)](/biganimal/latest/overview/02_high_availability/#distributed-high-availability-preview) creates a cluster, powered by EDB Postgres Distributed, with up to two data groups spread across multiple cloud regions to deliver higher performance and faster recovery. See [Creating a distributed high-availability cluster](creating_an_eha_cluster) for instructions.
+ - [Distributed High Availability](/biganimal/latest/overview/02_high_availability/#distributed-high-availability) creates a cluster, powered by EDB Postgres Distributed, with up to two data groups spread across multiple cloud regions to deliver higher performance and faster recovery. See [Creating a distributed high-availability cluster](creating_an_eha_cluster) for instructions.
See [Supported cluster types](/biganimal/latest/overview/02_high_availability/) for more information about the different cluster types.
@@ -95,25 +94,26 @@ The following options aren't available when creating your cluster:
To maximize your disk size for AWS, select Rb5 as your instance and then io2 Block Express as your storage to get a maximum disk size of 64 TB and 256,000 IOPS.
1. In the **Storage** section, from the **Volume Type** list, select your volume type.
- - For Azure, in **Volume Type**, select **Premium SSD** or **Ultra Disk**. Compared to Premium SSD volumes, ultra disks offer lower-latency, high-performance options and direct control over your disk's input/output operations per second (IOPS). For BigAnimal, we recommend using ultra disks for workloads that require the most demanding performance. See [Using Azure ultra disks](https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal) for more information.
- - For Premium SSD, in **Volume Properties**, select the type and amount of storage needed for your cluster. See [Azure Premium SSD storage types](https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds) for more information.
+ - For Azure, in **Volume Type**, select **Premium SSD** or **Ultra Disk**. Compared to Premium SSD volumes, ultra disks offer lower-latency, high-performance options and direct control over your disk's input/output operations per second (IOPS). For BigAnimal, we recommend using ultra disks for workloads that require the most demanding performance. See [Using Azure ultra disks](https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal) for more information.
+
+ - For Premium SSD, in **Volume Properties**, select the type and amount of storage needed for your cluster. See [Azure Premium SSD storage types](https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds) for more information.
- - For ultra disk, in **Volume Properties**, select the disk size and IOPS for your cluster. BigAnimal calculates disk throughput based on your IOPS settings, but you have the option of updating the value.
+ - For ultra disk, in **Volume Properties**, select the disk size and IOPS for your cluster. BigAnimal calculates disk throughput based on your IOPS settings, but you have the option of updating the value.
- !!!important
- While setting the required IOPS for the disk that you selected, consider the VM limits that are tied to the VM size that you selected. See [Ultra disk IOPS](https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#ultra-disk-iops) for more information.
+ !!!important
+ While setting the required IOPS for the disk that you selected, consider the VM limits that are tied to the VM size that you selected. See [Ultra disk IOPS](https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#ultra-disk-iops) for more information.
- - For AWS, in **Volume Type**, select **General Purpose SSD (GP3)**, **io2**, or **io2 Block Express**.
+ - For AWS, in **Volume Type**, select **General Purpose SSD (GP3)**, **io2**, or **io2 Block Express**.
- !!!Note
- io2 Block Express is available for selected instance types, such as R5b. However, you can't switch between io2 and io2 Block Express after creating your cluster.
- !!!
+ !!!Note
+ io2 Block Express is available for selected instance types, such as R5b. However, you can't switch between io2 and io2 Block Express after creating your cluster.
+ !!!
- In **Volume Properties**, select the disk size for your cluster, and configure the IOPS.
+ In **Volume Properties**, select the disk size for your cluster, and configure the IOPS.
- - For Google Cloud, in **Volume Type**, select **SSD Persistent Disk**.
+ - For Google Cloud, in **Volume Type**, select **SSD Persistent Disk**.
- In **Volume Properties**, select the disk size for your cluster, and configure the IOPS.
+ In **Volume Properties**, select the disk size for your cluster, and configure the IOPS.
2. ##### Network, Logs, & Telemetry section
@@ -183,7 +183,7 @@ For information on replication lag while using read-only workloads, see [Standby
!!! Note
Enabling PgBouncer incurs additional costs. For more information, see [PgBouncer costs](../../pricing_and_billing/#pgbouncer-costs).
-Enable **PgBouncer** to have it manage your connections to Postgres databases and help your workloads run more efficiently. To learn more about PgBouncer, see [Connection poolers](/biganimal/latest/overview/poolers/#edb-pgbouncer).
+Enable **PgBouncer** to have it manage your connections to Postgres databases and help your workloads run more efficiently — all entirely managed by BigAnimal. Learn more about [EDB PgBouncer](/biganimal/latest/overview/poolers/).
Use the **PgBouncer Configuration Settings** menu to set PgBouncer-specific settings. Select the **Read-Write** and **Read-Only** tabs according to the type of connection you want to configure. The **Read-Only** tab is available if you're creating a primary/standby high-availability cluster and have enabled read-only workloads.
diff --git a/product_docs/docs/biganimal/release/knowledge_base.mdx b/product_docs/docs/biganimal/release/knowledge_base.mdx
index 25a3c1c7f69..2e764a8b062 100644
--- a/product_docs/docs/biganimal/release/knowledge_base.mdx
+++ b/product_docs/docs/biganimal/release/knowledge_base.mdx
@@ -27,9 +27,8 @@ See the following articles for step-by-step instructions for creating links to r
- [Using edb_dblink_oci](https://support.biganimal.com/hc/en-us/articles/5528996025497-How-to-create-a-database-link-from-EnterpriseDB-PostgreSQL-Advanced-Server-EPAS-cluster-to-remote-Oracle-server)
- [Using dblink_ora_connect()](https://support.biganimal.com/hc/en-us/articles/11737021242649-How-to-establish-a-database-link-from-EPAS-cluster-to-a-remote-Oracle-server-using-dblink-ora-connect-function)
-## Configuring self-hosted connection poolers
+## Deploying PgBouncer outside of BigAnimal
-See the following articles for step-by-step instructions for setting up self-hosted connection poolers:
+See the following article for step-by-step instructions for setting up deploying PgBouncer outside of BigAnimal:
- [How to configure PgBouncer with a BigAnimal cluster](https://support.biganimal.com/hc/en-us/articles/4848726654745-How-to-configure-PgBouncer-with-BigAnimal-Cluster)
-- [How to configure Pgpool-II with a BigAnimal cluster](https://support.biganimal.com/hc/en-us/articles/5475273722009-How-to-configure-Pgpool-with-BigAnimal-Cluster)
\ No newline at end of file
diff --git a/product_docs/docs/biganimal/release/migration/index.mdx b/product_docs/docs/biganimal/release/migration/index.mdx
index b4a94fdd34d..89629048058 100644
--- a/product_docs/docs/biganimal/release/migration/index.mdx
+++ b/product_docs/docs/biganimal/release/migration/index.mdx
@@ -6,14 +6,16 @@ EDB provides migration tools to bring data from Oracle, PostgresSQL, and EDB Pos
## Migrating from Oracle
-[Migration Portal](/migration_portal/latest) provides the details for executing the migration steps using Migration Portal:
+For helpful considerations and information when migrating from Oracle, review the EDB [Migration Handbook](/migrating/oracle/).
+
+EDB also provides a tool, [Migration Portal](/migration_portal/latest), which provides the details for executing the migration steps:
1. [Schema extraction](/migration_portal/latest/04_mp_migrating_database/01_mp_schema_extraction/)
2. [Schema assessment](/migration_portal/latest/04_mp_migrating_database/02_mp_schema_assessment/)
3. [Schema migration](/migration_portal/latest/04_mp_migrating_database/03_mp_schema_migration/)
4. [Data migration](/migration_portal/latest/04_mp_migrating_database/04_mp_data_migration/)
-You can also use the Migration Toolkit for the data migration step. This toolkit is a good option for smaller databases.
+You can also use the [Migration Toolkit](/migration_toolkit/latest/) for the data migration step. This toolkit is a good option for smaller databases.
## Accessing remote Oracle servers from BigAnimal
diff --git a/product_docs/docs/biganimal/release/overview/02_high_availability.mdx b/product_docs/docs/biganimal/release/overview/02_high_availability.mdx
index 188e2b8151f..29b134f018a 100644
--- a/product_docs/docs/biganimal/release/overview/02_high_availability.mdx
+++ b/product_docs/docs/biganimal/release/overview/02_high_availability.mdx
@@ -1,5 +1,6 @@
---
title: "Supported cluster types"
+deepToC: true
redirects:
- 02_high_availibility
---
@@ -7,11 +8,13 @@ redirects:
BigAnimal supports three cluster types:
- Single node
- Primary/standby high availability
-- Distributed high availability (Preview)
+- Distributed high availability)
You choose the type of cluster you want on the [Create Cluster](https://portal.biganimal.com/create-cluster) page in the [BigAnimal](https://portal.biganimal.com) portal.
-Postgres distribution and version support varies by cluster and deployment type.
+## Choosing your Postgres distribution
+
+Postgres distribution and version support varies by [cluster](#choosing-your-cluster-type-and-configuration) and [deployment](deployment_options) type.
| Postgres distribution | Versions | Cluster type | Deployment type |
| ---------------------------- | -------- | ------------------------------ | ----------------------- |
@@ -20,9 +23,22 @@ Postgres distribution and version support varies by cluster and deployment type.
| EDB Postgres Advanced Server | 12–15 | Single-node, primary/standby high-availability | Your cloud account |
| EDB Postgres Advanced Server | 14–15 | Single node, primary/standby high availability | BigAnimal cloud account |
| EDB Postgres Advanced Server | 14–15 | Distributed high-availability | Your cloud account |
-| EDB Postgres Extended Server | 14-15 | Distributed high-availability | Your cloud account |
+| EDB Postgres Extended Server | 14-15 | Distributed high-availability | Your cloud account |
+
+## Choosing your cluster type and configuration
+
+| Consideration | [Single node](#single-node) |[Primary/standby HA](#primarystandby-high-availability) | [HA + standby replica](#standby-replicas) | [Distributed HA single region](#single-data-location) | [Distributed HA multi-region](#two-data-locations-and-witness) |
+|--------------------------|-----------|------------------|--------------------|----------------------------|---------------------------|
+| Data replication | None | Physical | Physical | Logical | Logical |
+| Region | Single | Single | Multi | Single | Multi |
+| VM failure tolerance | ✅ | ✅ | ✅ | ✅ | ✅ |
+| AZ failure tolerance | TBD | ✅ | ✅ | ✅ | ✅ |
+| Region failure tolerance | TBD | TBD | ✅ | TBD | ✅ |
+| Recovery time objective | varies | 35s-60s | varies | 0 | 0 |
+| Recovery point objective | <5 min | 0 | <5 min | 0 | 30s (configurable) |
+| Service level agreement | 99.5% | 99.99% | 99.99% | 99.99% | 99.995% |
-## Single node
+### Single node
For nonproduction use cases where high availability isn't a primary concern, a cluster deployment with high availability not enabled provides one primary with no standby replicas for failover or read-only workloads.
@@ -30,7 +46,7 @@ In case of unrecoverable failure of the primary, a restore from a backup is requ
![BigAnimal Cluster4](images/single-node.png)
-## Primary/standby high availability
+### Primary/standby high availability
The Primary/Standby High Availability option is provided to minimize downtime in cases of failures. Primary/standby high-availability clusters—one *primary* and one or two *standby replicas*—are configured automatically, with standby replicas staying up to date through physical streaming replication.
@@ -44,7 +60,7 @@ In case of temporary or permanent unavailability of the primary, a standby repli
Incoming client connections are always routed to the current primary. In case of failure of the primary, a standby replica is promoted to primary, and new connections are routed to the new primary. When the old primary recovers, it rejoins the cluster as a standby replica.
-### Standby replicas
+#### Standby replicas
By default, replication is synchronous to one standby replica and asynchronous to the other. That is, one standby replica must confirm that a transaction record was written to disk before the client receives acknowledgment of a successful commit.
@@ -56,7 +72,7 @@ To ensure write availability, BigAnimal disables synchronous replication during
Since BigAnimal replicates to only one node synchronously, some standby replicas in three-node clusters might experience replication lag. Also, if you override the BigAnimal synchronous replication configuration, then the standby replicas are inconsistent.
-## Distributed high availability (Preview)
+## Distributed high availability
Distributed high-availability clusters are powered by [EDB Postgres Distributed](/pgd/latest/) using multi-master logical replication to deliver more advanced cluster management compared to a physical replication-based system. Distributed high-availability clusters offer the ability to deploy a cluster across multiple regions or a single region. For use cases where high availability across regions is a major concern, a cluster deployment with distributed high availability enabled can provide one region with three data nodes, another region with the same configuration, and one group with a witness node in a third region for a true active-active solution.
@@ -68,7 +84,16 @@ Distributed high-availability clusters contain one or two data groups. Your data
The witness node/witness group doesn't host data but exists for management purposes, supporting operations that require a consensus, for example, in case of an availability zone failure.
-A single-region configuration with three data nodes (one lead and two shadow nodes each in separate availability zones) is available. However, if you're looking for a true active-active solution that protects against regional failures, select a three-region configuration with:
+!!!Note
+ Operations against a distributed high-availability cluster leverage the [EDB Postgres Distributed switchover](/pgd/latest/cli/command_ref/pgd_switchover/) feature which provides sub-second interruptions during planned lifecycle operations.
+
+#### Single data location
+
+A single data location configuration has three data nodes with one lead and two shadow nodes each in separate availability zones.
+
+#### Two data locations and witness
+
+A true active-active solution that protects against regional failures, a two data locations configuration has:
- A data node, shadow node, and a witness node in one region
- The same configuration in another region
@@ -76,6 +101,8 @@ A single-region configuration with three data nodes (one lead and two shadow nod
![region(2 data + 1 shadow) + region(2 data + 1 shadow) + region(1 witness)](images/Multi-Region-3Nodes.png)
+
+
### For more information
For instructions on creating a distributed high-availability cluster using the BigAnimal portal, see [Creating a distributed high-availability cluster](../getting_started/creating_a_cluster/creating_an_eha_cluster/).
diff --git a/product_docs/docs/biganimal/release/overview/extensions_tools.mdx b/product_docs/docs/biganimal/release/overview/extensions_tools.mdx
index c4d368a395d..7e8c0139be9 100644
--- a/product_docs/docs/biganimal/release/overview/extensions_tools.mdx
+++ b/product_docs/docs/biganimal/release/overview/extensions_tools.mdx
@@ -30,4 +30,5 @@ EDB develops and maintains several extensions and tools. These include:
- [PG Failover Slots](/pg_extensions/pg_failover_slots/) — Is an extension released as open source software under the PostgreSQL License. If you have logical replication publications on Postgres databases that are also part of a streaming replication architecture, PG Failover Slots avoids the need for you to reseed your logical replication tables when a new standby gets promoted to primary.
- [Foreign Data Wrappers](foreign_data_wrappers) — Allow you to connect your Postgres database server to external data sources.
-- [Connection poolers](poolers) — Allow you to manage your connections to your Postgres database.
+
+- [EDB PgBouncer](poolers) — Allows you to manage your connections to your Postgres database.
diff --git a/product_docs/docs/biganimal/release/overview/poolers.mdx b/product_docs/docs/biganimal/release/overview/poolers.mdx
index 439a9360f06..3c41b903c11 100644
--- a/product_docs/docs/biganimal/release/overview/poolers.mdx
+++ b/product_docs/docs/biganimal/release/overview/poolers.mdx
@@ -1,26 +1,16 @@
---
-title: Connection poolers
+title: EDB PgBouncer
---
-BigAnimal supports [EDB PgBouncer](/pgbouncer/latest/) and [EDB Pgpool-II](/pgpool/latest/) to manage your connections to your Postgres database. PgBouncer can be hosted on BigAnimal or self-hosted, while EDB Pgpool-II can only be self-hosted.
-
-
-### EDB PgBouncer
-
-EDB PgBouncer can manage your connections to Postgres databases and help your workloads run more efficiently. It's particularly useful if you plan to use more than a few hundred active connections. You can enable EDB PgBouncer to be hosted by BigAnimal when creating your cluster. See [Creating a cluster](/biganimal/latest/getting_started/creating_a_cluster/#pgbouncer).
-
-!!!note
- To use PgBouncer when using BigAnimal's cloud account or when creating a distributed high-availability cluster, see the [EDB Pgpool-II documentation](/pgpool/latest/installing).
+EDB PgBouncer can manage your connections to Postgres databases and help your workloads run more efficiently. It's particularly useful if you plan to use more than a few hundred active connections. You can enable EDB PgBouncer to be entirely managed by BigAnimal, when creating your cluster. See [Creating a cluster](/biganimal/latest/getting_started/creating_a_cluster/#pgbouncer).
BigAnimal provisions up to three instances per EDB PgBouncer-enabled cluster to ensure that performance is unaffected, so each availability zone receives its own instance of EDB PgBouncer.
-If you want to self-host your EDB PgBouncer deployment, see the [How to configure EDB PgBouncer with BigAnimal cluster](https://support.biganimal.com/hc/en-us/articles/4848726654745-How-to-configure-PgBouncer-with-BigAnimal-Cluster) knowledge-base article.
-
+!!!Note
+ Currently, you can't enable EDB PgBouncer when using BigAnimal's cloud account or when creating a distributed high-availability cluster using your cloud account.
-### EDB Pgpool-II
+If you want to deploy and manage PgBouncer outside of BigAnimal, see the [How to configure EDB PgBouncer with BigAnimal cluster](https://support.biganimal.com/hc/en-us/articles/4848726654745-How-to-configure-PgBouncer-with-BigAnimal-Cluster) knowledge-base article.
-EDB Pgpool-II acts as middleware between client applications and a Postgres database server. It saves connections to the Postgres servers and reuses them whenever a new connection with the same properties (that is, username, database, protocol version) comes in. It reduces connection overhead and improves the system's overall throughput.
-See [EDB Pgpool-II](/pgpool/latest/) for information on installing and configuring self-hosted EDB Pgpool-II. You can find other related content in the [How to configure EDB Pgpool with a BigAnimal cluster](https://support.biganimal.com/hc/en-us/articles/5475273722009-How-to-configure-Pgpool-with-BigAnimal-Cluster) knowledge-base article.
diff --git a/product_docs/docs/biganimal/release/overview/updates.mdx b/product_docs/docs/biganimal/release/overview/updates.mdx
index 8ade89424af..816ad84973d 100644
--- a/product_docs/docs/biganimal/release/overview/updates.mdx
+++ b/product_docs/docs/biganimal/release/overview/updates.mdx
@@ -2,14 +2,25 @@
title: Periodic maintenance
---
-EDB performs periodic maintenance to ensure stability and security of your clusters. We perform minor version upgrades and patch updates as part of this periodic maintenance. You're notified in the BigAnimal portal before maintenance occurs. Details are available on the [BigAnimal status page](https://status.biganimal.com/). You can subscribe to get these updates in a feed by selecting **Subscribe to Updates** on the status page.
+EDB performs periodic maintenance to ensure stability and security of your clusters. We perform minor version upgrades and patch updates as part of this periodic maintenance.
+
+## Notification of upcoming maintenance
+
+You're notified in the BigAnimal portal before maintenance occurs. Details are available on the [BigAnimal status page](https://status.biganimal.com/). You can subscribe to get these updates in a feed by selecting **Subscribe to Updates** on the status page.
EDB reserves the right to upgrade customers to the latest minor version without prior notice in an extraordinary circumstance. You can't configure minor versions.
In some cases, these updates might terminate existing network connections to your clusters. If that happens, the outage is typically less than 30 seconds. Be sure your applications are configured to automatically reconnect when connections are interrupted. Most modern database libraries do this by default.
-If you want to control when the updates are pushed, you can specify a weekly maintenance window for each cluster or each data group in the case of an extreme-high-availability cluster. BigAnimal displays a *scheduled maintenance* message on your cluster list four hours prior to the scheduled maintenance time to remind you of the upcoming maintenance window. This reminder allows you to make any necessary preparations, such as saving your work and closing any open connections. For more information on specifying maintenance windows, see [Maintenance](/biganimal/latest/getting_started/creating_a_cluster/#maintenance).
+## Specifying maintenance windows
+
+If you want to control when the updates are pushed, you can specify a weekly maintenance window for each cluster or each data group in the case of a distributed high-availability cluster. BigAnimal displays a *scheduled maintenance* message on your cluster list four hours prior to the scheduled maintenance time to remind you of the upcoming maintenance window. This reminder allows you to make any necessary preparations, such as saving your work and closing any open connections. For more information on specifying maintenance windows, see [Maintenance](/biganimal/latest/getting_started/creating_a_cluster/#maintenance).
+
+## Maintenance for high-availability clusters
+
+For primary/standby high-availability clusters, periodic maintenance is performed first on the standby replicas and then on the primary.
+While there is no downtime during periodic maintenance, there will be a network connection reset as the primary is failing over.
## Connectivity issues after an automatic upgrade
diff --git a/product_docs/docs/biganimal/release/pricing_and_billing/index.mdx b/product_docs/docs/biganimal/release/pricing_and_billing/index.mdx
index 468f637b369..ffc41414e80 100644
--- a/product_docs/docs/biganimal/release/pricing_and_billing/index.mdx
+++ b/product_docs/docs/biganimal/release/pricing_and_billing/index.mdx
@@ -8,14 +8,30 @@ BigAnimal calculates the estimated monthly price for your cluster and displays i
## Database pricing
-Pricing is based on the number of virtual central processing units (vCPUs) provisioned for the database software offering. Consumption of vCPUs is metered hourly. A deployment is typically made up of either one instance or one primary and two standby replica instances of either PostgreSQL or EDB Postgres Advanced Server. When primary/standby high-availability configurations are enabled, multiply the number of vCPUs per instance by the number of replicas configured to calculate the full price for all resources used. This table shows the cost breakdown.
+Pricing is based on the number of virtual central processing units (vCPUs) provisioned for the database software offering. Consumption of vCPUs is metered hourly.
-| Database type | Hourly price | Monthly price\* |
+### Single-node and primary/standby high-availability pricing
+When primary/standby high-availability configurations are enabled, to calculate the full price for all resources used, multiply the number of vCPUs per instance by the number of replicas configured.
+
+This table shows the cost breakdown.
+
+| Database type | Hourly price | Monthly price\* |
| ---------------------------- | -------------- | --------------- |
-| PostgreSQL | $0.0856 / vCPU | $62.49 / vCPU |
-| EDB Postgres Advanced Server | $0.2568 / vCPU | $187.46 / vCPU |
+| PostgreSQL | $0.0856 / vCPU | $62.49 / vCPU |
+| EDB Postgres Advanced Server | $0.2568 / vCPU | $187.46 / vCPU |
+
+\* The monthly cost is approximate and assumes 730 hours in a month.
+
+### Distributed high-availability pricing
+
+When distributed high-availability configurations are enabled, to calculate the full price for all resources used, multiply the number of vCPUs per instance by the number of data nodes configured. You aren't charged for the database price for witness nodes or groups in distributed high-availability configurations, just the infrastructure resources, such as compute.
-Distributed high availability powered by EDB Postgres Distributed is now available in preview! Contact Sales for more information about pricing.
+This table shows the cost breakdown.
+
+| Database type | Hourly price | Monthly price\* |
+| ---------------------------- | -------------- | --------------- |
+| EDB Postgres Extended Server | $0.2511 / vCPU | $188.33 / vCPU |
+| EDB Postgres Advanced Server | $0.3424 / vCPU | $256.80 / vCPU |
\* The monthly cost is approximate and assumes 730 hours in a month.
diff --git a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx
index b46c2d00abd..2707e232191 100644
--- a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx
+++ b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx
@@ -121,7 +121,7 @@ To create the cluster using the sample configuration file `config_file.yaml`:
biganimal cluster create --config-file "./config_file.yaml"
```
-To query an enumeration of valid values for the BigAnimal and cloud service provider-related properties, CLI provides a series of subcommands. For example, you can use `cluster show-architectures` to list all cloud architectures available to your current login account:
+To view valid values you can use in the configuration file for BigAnimal and cloud service provider-related properties, the CLI provides a series of cluster subcommands. For example, you can use `cluster show-architectures` to list all BigAnimal database architectures available within your cloud service provider account:
```shell
biganimal cluster show-architectures
@@ -137,10 +137,6 @@ __OUTPUT__
└────────┴───────────────────────────┴──────────┘
```
-!!!Note
-Distributed high-availability architecture isn't enabled by default. To get access, contact your sales representative or [Support](/biganimal/release/overview/support).
-!!!
-
!!!Tip
You can turn off the confirmation step with the `biganimal disable-confirm` command.
!!!
@@ -249,7 +245,7 @@ You can list all deleted clusters using the `show-deleted-clusters` command and
### Restore a cluster
-BigAnimal continuously backs up your PostgreSQL clusters. Using the CLI, you can restore a cluster from its backup to any point in time as long as the backups are retained in the backup storage. The restored cluster can be in another region and with different configurations. You can specify new configurations in the `cluster restore` command. For example:
+BigAnimal continuously backs up your PostgreSQL clusters. Using the CLI, you can restore a cluster from its backup to any point in time as long as the backups are retained in the backup storage. The restored cluster can be in another region and have different configurations. You can specify new configurations in the `cluster restore` command. For example:
```shell
biganimal cluster restore\
@@ -278,146 +274,13 @@ To restore a deleted cluster, use the `--from-deleted` flag in the command.
You can restore a cluster in a single cluster to a primary/standby high-availability cluster and vice versa. You can restore a distributed high-availability cluster only to a cluster using the same architecture.
!!!
-### Get cluster connection information
-
-To use your BigAnimal cluster, you first need to get your cluster's connection information. To get your cluster's connection information, use the `cluster show-connection` command:
-
-```shell
-biganimal cluster show-connection \
- --name "my-biganimal-cluster" \
- --provider "azure" \
- --region "eastus"
-__OUTPUT__
-┌─────────────┬──────────────────────────────────────────────────────────────────────────────────────────┐
-│ Access Type │ Connection String │
-├─────────────┼──────────────────────────────────────────────────────────────────────────────────────────┤
-│ read-write │ postgresql://edb_admin@p-gxhkfww1fe.30glixgayvwhtmn3.enterprisedb.network:5432/edb_admin │
-│ read-only │ Disabled │
-└─────────────┴──────────────────────────────────────────────────────────────────────────────────────────┘
-```
-
-!!!tip
-You can query the complete connection information with other output formats, like JSON or YAML. For example:
-
-```shell
-biganimal cluster show-connection \
- --name "my-biganimal-cluster" \
- --provider "azure" \
- --region "eastus" \
- --output "json"
-```
-!!!
-
-### Update cluster
-
-After the cluster is created, you can update attributes of the cluster, including both the cluster’s profile and its deployment architecture. You can update the following attributes:
-
-- Cluster name
-- Password of administrator account
-- Cluster architecture
-- Number of standby replicas
-- Instance type of cluster
-- Instance volume properties
-- Networking
-- Allowed IP list
-- Postgres database configuration
-- Volume properties, size, IOPS
-- Retention period
-- Read-only workloads
-- IAM authentication
-
-For example, to set the public allowed IP range list, use the `--cidr-blocks` flag:
-
-```shell
-./biganimal cluster update --name "my-biganimal-cluster" --provider "azure" \
- --region "eastus" \
- --cidr-blocks "9.9.9.9/28=Traffic from App A"
-```
-
-To check whether the setting took effect, use the `cluster show` command, and view the detailed cluster information output in JSON format. For example:
-
-```shell
-biganimal cluster show --name "my-biganimal-cluster" --provider "azure" \
- --region "eastus" \
- --output "json" \
-| jq '.[0].allowIpRangeMap'
-__OUTPUT__
-[
- [
- "9.9.9.9/28",
- "Traffic from App A"
- ]
-]
-```
-
-### Update the Postgres configuration of a cluster
-
-To update the Postgres configuration of a BigAnimal cluster directly from the CLI:
-
-```shell
-biganimal cluster update --id "p-gxhkfww1fe" \
- --pg-config "application_name=ba_test_app,array_nulls=false"
-__OUTPUT__
-Update Cluster operation is started
-Cluster ID is "p-gxhkfww1fe"
-```
-To specify multiple configurations, you can use multiple `--pg-config` flags or include multiple configuration settings as a key-value array string separated by commas in one `--pg-config` flag. If a Postgres setting contains a comma, you need to specify it with a separate `--pg-config` flag.
-
-!!! Note
-You can update the cluster architecture with the `--cluster-architecture` flag. The only supported scenario is to update a single-node cluster to a primary/standby high-availability cluster.
-!!!
-
-### Delete a cluster
-
-To delete a cluster you no longer need, use the `cluster delete` command. For example:
-
-```shell
-biganimal cluster delete \
- --name "my-biganimal-cluster" \
- --provider "azure" \
- --region "eastus"
-```
-
-You can list all deleted clusters using the `show-deleted-clusters` command and restore them from their history backups as needed.
-
-
-### Restore a cluster
-BigAnimal continuously backs up your PostgreSQL clusters. Using the CLI, you can restore a cluster from its backup to any point in time as long as the backups are retained in the backup storage. The restored cluster can be in another region and with different configurations. You can specify new configurations in the `cluster restore` command. For example:
-
-```shell
-biganimal cluster restore\
- --name "my-biganimal-cluster" \
- --provider "azure" \
- --region "eastus" \
- --password "mypassword@123" \
- --new-name "my-biganimal-cluster-restored" \
- --new-region="eastus2" \
- --cluster-architecture "single" \
- --instance-type "azure:Standard_E2s_v3" \
- --volume-type "azurepremiumstorage" \
- --volume-property "P1" \
- --networking "public" \
- --cidr-blocks="10.10.10.10/27=Traffic from App B" \
- --restore-point "2022-01-26T15:04:05+0800" \
- --backup-retention-period "2w" \
- --read-only-workloads: "true"
-```
-
-The password for the restored cluster is mandatory. The other parameters, if not specified, inherit the source database's settings.
-
-To restore a deleted cluster, use the `--from-deleted` flag in the command.
-
-!!! Note
-You can restore a cluster in a single cluster to a primary/standby high-availability cluster and vice versa. You can restore a distributed high-availability cluster only to a cluster using the same architecture.
-!!!
-
## Managing distributed high-availability clusters
Use the BigAnimal `pgd` commands to create, retrieve information on, and manage distributed high-availability clusters.
!!!note
- In addition to the BigAnimal `pgd` commands, you can switch over and use additional commands available in the [EDB Postgres Distributed CLI](/pgd/latest/cli/) to perform PGD-specific operations. The only EDB Postgres Distributed CLI commands that aren't applicable with BigAnimal are `create-proxy` and `delete-proxy`.
+ In addition to the BigAnimal `pgd` commands, you can switch over and use commands available in the [EDB Postgres Distributed CLI](/pgd/latest/cli/) to perform PGD-specific operations. The only EDB Postgres Distributed CLI commands that don't apply to BigAnimal are `create-proxy` and `delete-proxy`.
### Create a distributed high-availability cluster
@@ -536,7 +399,7 @@ biganimal pgd show [--id] [--deleted]
### Restore a distributed high-availability cluster
-Restore a distributed high-availability cluster or a deleted distributed high-availability cluster to a new cluster on the same cloud provider. You can restore an active cluster or a deleted cluster within its retention period. You can only restore one data group. By default, the new cluster inherits all settings of the source cluster. You can change the cluster setting and database configurations by specifying new values in the configuration file.
+Restore a distributed high-availability cluster or a deleted distributed high-availability cluster to a new cluster on the same cloud provider. You can restore an active cluster or a deleted cluster within its retention period. You can restore only one data group. By default, the new cluster inherits all settings of the source cluster. You can change the cluster setting and database configurations by specifying new values in the configuration file.
The syntax of the command is:
@@ -571,7 +434,7 @@ dataGroups:
### Get distributed high-availability cluster connection information
-You first need to get your cluster group's connection information in order to connect to and use your BigAnimal distributed high-availability cluster.
+To connect to and use your BigAnimal distributed high-availability cluster, you first need to get your cluster group's connection information.
The syntax of the command is:
diff --git a/product_docs/docs/biganimal/release/using_cluster/01_postgres_access/index.mdx b/product_docs/docs/biganimal/release/using_cluster/01_postgres_access/index.mdx
index 83780e88c56..bd207325b1f 100644
--- a/product_docs/docs/biganimal/release/using_cluster/01_postgres_access/index.mdx
+++ b/product_docs/docs/biganimal/release/using_cluster/01_postgres_access/index.mdx
@@ -25,8 +25,6 @@ psql -W "postgres://edb_admin@xxxxxxxxx.xxxxx.biganimal.io:5432/edb_admin?sslmod
### pg_ba_admin
So that we can effectively manage the cloud resources and ensure users are protected against security threats, BigAnimal provides a special administrative role, pg_ba_admin. The edb_admin user is a member of the pg_ba_admin role. The pg_ba_admin role has privileges similar to a Postgres superuser. Like the edb_admin user, the pg_ba_admin role shouldn't be used for day-to-day application operations and access to the role must be controlled carefully. See [pg_ba_admin role](pg_ba_admin) for details.
-The pg_ba_admin role is present on clusters deployed in BigAnimal's cloud account. Soon, it will be deployed to all BigAnimal clusters.
-
### superuser
When using your own cloud account, you can grant the edb_admin role superuser privileges for a cluster. See [Superuser access](/biganimal/latest/getting_started/creating_a_cluster/#superuser-access). If you grant superuser privileges, you must take care to limit the number of connections used by superusers to avoid degraded service or compromising availability.
diff --git a/product_docs/docs/biganimal/release/using_cluster/03_modifying_your_cluster/index.mdx b/product_docs/docs/biganimal/release/using_cluster/03_modifying_your_cluster/index.mdx
index 22468f65f91..e8abf58baf3 100644
--- a/product_docs/docs/biganimal/release/using_cluster/03_modifying_your_cluster/index.mdx
+++ b/product_docs/docs/biganimal/release/using_cluster/03_modifying_your_cluster/index.mdx
@@ -25,14 +25,14 @@ You can also modify your cluster by installing Postgres extensions. See [Postgre
| Settings | Tab | Notes |
| ---------------------------------------------------- | ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
- | Cluster type | **Cluster Info** | You can't switch from a single-node cluster or a high-availability cluster to an extreme-high-availability cluster or vice versa. |
+ | Cluster type | **Cluster Info** | You can't switch from a single-node cluster or a high-availability cluster to a distributed high-availability cluster or vice versa. |
| Number of replicas (for a high-availability cluster) | **Cluster Info** | — |
| Cluster name and password | **Cluster Settings** | — |
| Instance type | **Cluster Settings** | Changing the instance type can incur higher cloud infrastructure charges. |
| Volume type | **Cluster Settings** | You can't switch between the io2 and io2 Block Express volume types in an AWS cluster. |
| Volume properties | **Cluster Settings** | It can take up to six hours to tune IOPS or resize the disks of your cluster because AWS requires a cooldown period after volume modifications, as explained in [Limitations](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/modify-volume-requirements.html). The volume properties are disabled and can't be modified while this is in progress. |
| Networking type (public or private) | **Cluster Settings** | If you're using Azure and previously set up a private link and want to change to a public network, you must remove the private link resources before making the change. |
- | Nodes (for an extreme-high-availability cluster) | **Data Groups** | — |
+ | Nodes (for a distributed high-availability cluster) | **Data Groups** | After you create your cluster, you can't change the number of data nodes. |
| Database configuration parameters | **DB Configuration** | If you're using faraway replicas, only a small subset of parameters are editable. These parameters need to be modified in the replica when increased in the replica's source cluster. See [Modify a faraway replica](/biganimal/latest/using_cluster/managing_replicas/#modify-a-faraway-replica) for details. |
| Retention period for backups | **Additional Settings** | — |
| Custom maintenance window | **Additional Settings** | Set or modify a maintenance window in which maintenance upgrades occur for the cluster. See [Maintenance](/biganimal/latest/getting_started/creating_a_cluster/#maintenance). |
@@ -48,7 +48,7 @@ You can also modify your cluster by installing Postgres extensions. See [Postgre
## Modify a data group
-You can modify the data groups in your extreme-high-availability cluster by editing the configuration settings.
+You can modify the data groups in your distributed high-availability cluster by editing the configuration settings.
1. Sign in to the [BigAnimal portal](https://portal.biganimal.com).
diff --git a/product_docs/docs/biganimal/release/using_cluster/04_backup_and_restore.mdx b/product_docs/docs/biganimal/release/using_cluster/04_backup_and_restore.mdx
index f8177a82f14..57313f0d4da 100644
--- a/product_docs/docs/biganimal/release/using_cluster/04_backup_and_restore.mdx
+++ b/product_docs/docs/biganimal/release/using_cluster/04_backup_and_restore.mdx
@@ -54,6 +54,6 @@ The restore operation is available for any cluster that has at least one availab
1. Select **Next: Data Group**.
1. Select the **Node Settings** tab.
1. In the **Source** section, select **Fully Restore** or **Point in Time Restore**. A point-in-time restore restores the data group as it was at the specified date and time.
-1. In the **Nodes** section, select **Two Data Nodes** or **Three Data Nodes**. For more information on node architecture, see [Distributed high availability](/biganimal/latest/overview/02_high_availability/#distributed-high-availability-preview).
+1. In the **Nodes** section, select **Two Data Nodes** or **Three Data Nodes**. For more information on node architecture, see [Distributed high availability](/biganimal/latest/overview/02_high_availability/#distributed-high-availability).
1. Follow Steps 3-5 in [Creating a distributed high-availability cluster](../getting_started/creating_a_cluster/creating_an_eha_cluster/).
1. Select **Restore**.
diff --git a/product_docs/docs/biganimal/release/using_cluster/managing_replicas.mdx b/product_docs/docs/biganimal/release/using_cluster/managing_replicas.mdx
index 55073ed6224..2f0727e496a 100644
--- a/product_docs/docs/biganimal/release/using_cluster/managing_replicas.mdx
+++ b/product_docs/docs/biganimal/release/using_cluster/managing_replicas.mdx
@@ -97,7 +97,7 @@ You can promote a faraway replica to a full-fledged cluster, which makes it capa
1. The cluster settings are populated with values inherited from the source cluster. You can edit the cluster settings while creating your cluster.
!!! NoteNotes
- - You can promote a faraway replica to a single node or high-availability cluster but not to an extreme-high-availability cluster.
+ - You can promote a faraway replica to a single node or high-availability cluster but not to a distributed high-availability cluster.
- While promoting a replica to a cluster, you can't modify the **Provider** and **Region** fields and the **Database Type** section or enable read-only workloads.
1. Select **Promote Replica**.
diff --git a/product_docs/docs/edb_plus/41/02_release_notes/edbplus_41.2_rel_notes.mdx b/product_docs/docs/edb_plus/41/02_release_notes/edbplus_41.2_rel_notes.mdx
index a8f415a1b59..701148a1e35 100644
--- a/product_docs/docs/edb_plus/41/02_release_notes/edbplus_41.2_rel_notes.mdx
+++ b/product_docs/docs/edb_plus/41/02_release_notes/edbplus_41.2_rel_notes.mdx
@@ -6,5 +6,5 @@ New features, enhancements, bug fixes, and other changes in EDB\*Plus 41.2.0 inc
| Type | Description |
| ----------- | -------------------------------------------------------------- |
-| Enhancement | The connection string syntax in EDB\*Plus now supports multi-host connectivity. When multiple hosts are specified, you can also use the `targetServerType` connection property and set it to `primary` to ensure that EDB\*Plus always establishes a connection with the active primary EDB Postgres Advanced Server (EPAS) database server. [Support ticket #92553] |
+| Enhancement | The connection string syntax in EDB\*Plus now supports multi-host connectivity. When multiple hosts are specified, you can also use the `targetServerType` connection property and set it to `primary` to ensure that EDB\*Plus always establishes a connection with the active primary EDB Postgres Advanced Server database server. [Support ticket #92553] |
| Bug fix | Fixed the issue whereby the ECHO command wasn't emitting the SQL statement in the spooled file when using EDB\*Plus in an interactive session. [Support ticket # 83580] |
diff --git a/product_docs/docs/edb_plus/41/02_release_notes/index.mdx b/product_docs/docs/edb_plus/41/02_release_notes/index.mdx
index dac25ecd895..5269d7c8505 100644
--- a/product_docs/docs/edb_plus/41/02_release_notes/index.mdx
+++ b/product_docs/docs/edb_plus/41/02_release_notes/index.mdx
@@ -13,6 +13,6 @@ The EDB\*Plus documentation describes the latest version of EDB\*Plus Version 41
| Version | Release Date |
| ------------------------------------- | ------------ |
-| [41.2.0](edbplus_41.2_rel_notes.mdx) | 2023 Aug 23 |
-| [41.1.0](edbplus_41.1_rel_notes.mdx) | 2023 Apr 20 |
-| [41.0.0](edbplus_41.0_rel_notes.mdx) | 2023 Feb 14 |
+| [41.2.0](edbplus_41.2_rel_notes.mdx) | 23 Aug 2023 |
+| [41.1.0](edbplus_41.1_rel_notes.mdx) | 20 Apr 2023 |
+| [41.0.0](edbplus_41.0_rel_notes.mdx) | 14 Feb 2023 |
diff --git a/product_docs/docs/edb_plus/41/04_using_edb_plus.mdx b/product_docs/docs/edb_plus/41/04_using_edb_plus.mdx
index b6902fe3f4c..561c258e9f3 100644
--- a/product_docs/docs/edb_plus/41/04_using_edb_plus.mdx
+++ b/product_docs/docs/edb_plus/41/04_using_edb_plus.mdx
@@ -40,7 +40,7 @@ edbplus [ -S[ILENT ] ] [ | /NOLOG ] [ @[. ] ]
`port` is the port number receiving connections on the database server. The default is `5444`.
!!! Note
- If multiple hosts are specified, the driver will try to connect once to each of them in the order specified until the connection succeeds. If none succeed, a normal connection exception is thrown. Including the `targetServerType` connection property and setting it to `primary` ensures that the connection is only made to a primary database server.
+ If you specify multiple hosts, the driver tries to connect once to each of them in the order specified until the connection succeeds. If none succeed, a normal connection exception is thrown. Including the `targetServerType` connection property and setting it to `primary` ensures that the connection is made only to a primary database server.
`dbname` is the name of the database to connect to. The default is `edb`.
@@ -50,7 +50,7 @@ edbplus [ -S[ILENT ] ] [ | /NOLOG ] [ @[. ] ]
edbplus.sh enterprisedb/password@[fe80::20c:29ff:fe7c:78b2]:5444/edb
```
- The `pg_hba.conf` file for the database server must contain an appropriate entry for the IPv6 connection. The following example shows an entry that allows all addresses:
+ The `pg_hba.conf` file for the database server must contain an appropriate entry for the IPv6 connection. This example shows an entry that allows all addresses:
```text
# TYPE DATABASE USER ADDRESS METHOD
diff --git a/product_docs/docs/edb_plus/41/06_command_summary.mdx b/product_docs/docs/edb_plus/41/06_command_summary.mdx
index a43cfdfe2d2..10348cfde1c 100644
--- a/product_docs/docs/edb_plus/41/06_command_summary.mdx
+++ b/product_docs/docs/edb_plus/41/06_command_summary.mdx
@@ -298,9 +298,9 @@ CON[NECT] [/][@{ | } ]
Where:
- `username` is a database user name with which to connect to the database.
+ `username` is a database username with which to connect to the database.
- `password` is the password associated with the specified user name. If you don't provide a `password` but a password is required for authentication, a search is made for a password file, first in the home directory of the Linux operating system account invoking EDB\*Plus (or in the `%APPDATA%\postgresql\` directory for Windows) and then at the location specified by the `PGPASSFILE` environment variable. The password file is `.pgpass` on Linux hosts and `pgpass.conf` on Windows hosts. The following is an example on a Windows host:
+ `password` is the password associated with the specified username. If you don't provide a password, but a password is required for authentication, a search is made for a password file. The search looks first in the home directory of the Linux operating system account invoking EDB\*Plus (or in the `%APPDATA%\postgresql\` directory for Windows) and then at the location specified by the `PGPASSFILE` environment variable. The password file is `.pgpass` on Linux hosts and `pgpass.conf` on Windows hosts. The following is an example on a Windows host:
```text
C:\Users\Administrator\AppData\Roaming\postgresql\pgpass.conf
@@ -323,7 +323,7 @@ Disconnected from EnterpriseDB Database.
Connected to EnterpriseDB 14.0.0 (localhost:5445/edb) AS smith
```
-In this session, the connection is changed to user name `enterprisedb`. The host defaults to the `localhost`, the port defaults to `5444` (which isn't the same as the port previously used), and the database defaults to `edb`.
+In this session, the connection is changed to the username `enterprisedb`. The host defaults to the localhost, the port defaults to `5444` (which isn't the same as the port previously used), and the database defaults to `edb`.
```sql
SQL> CONNECT enterprisedb/password
@@ -331,7 +331,7 @@ Disconnected from EnterpriseDB Database.
Connected to EnterpriseDB 14.0.0 (localhost:5444/edb) AS enterprisedb
```
-The following example illustrates connectivity for a multi-node cluster (one primary node and two secondary nodes) setup. The given multi-host `connectstring` syntax is used to establish a connection with the active primary database server. In this case, using `CONNECT` command, the connection is established with the primary database node on host `192.168.22.24` at port `5444`.
+This example shows connectivity for a multi-node cluster (one primary node and two secondary nodes) setup. The given multi-host `connectstring` syntax is used to establish a connection with the active primary database server. In this case, using `CONNECT` command, the connection is established with the primary database node on host `192.168.22.24` at port `5444`.
```sql
SQL> CONNECT enterprisedb/edb@192.168.22.24:5444,192.168.22.25:5445,192.168.22.26:5446/edb?targetServerType=primary
diff --git a/product_docs/docs/efm/4/05_using_efm.mdx b/product_docs/docs/efm/4/05_using_efm.mdx
index fe797d2ee82..6a7c7601131 100644
--- a/product_docs/docs/efm/4/05_using_efm.mdx
+++ b/product_docs/docs/efm/4/05_using_efm.mdx
@@ -265,27 +265,39 @@ After creating the `acctg.properties` and `sales.properties` files, create a ser
### RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x
-If you're using RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x, copy the `edb-efm-4.` unit file to a new file with a name that is unique for each cluster. For example, if you have two clusters named acctg and sales, the unit file names might be:
+If you're using RHEL/CentOS 7.x or RHEL/Rocky Linux/AlmaLinux 8.x, copy the service file `/usr/lib/systemd/system/edb-efm-4..service` to `/etc/systemd/system` with a new name that is unique for each cluster.
-```text
-/usr/lib/systemd/system/efm-acctg.service
+For example, if you have two clusters named `acctg` and `sales` managed by Failover Manager 4.7, the unit file names might be `efm-acctg.service` and `efm-sales.service`, and they can be created with:
-/usr/lib/systemd/system/efm-sales.service
+```shell
+cp /usr/lib/systemd/system/edb-efm-4.7.service /etc/systemd/system/efm-acctg.service
+cp /usr/lib/systemd/system/edb-efm-4.7.service /etc/systemd/system/efm-sales.service
```
-Then, edit the `CLUSTER` variable in each unit file, changing the specified cluster name from `efm` to the new cluster name. For example, for a cluster named `acctg`, the value specifies:
+Then use `systemctl edit` to edit the `CLUSTER` variable in each unit file, changing the specified cluster name from `efm` to the new cluster name.
+Also update the value of the `PIDfile` parameter to match the new cluster name.
-```text
+In our example, edit the `acctg` cluster by running `systemctl edit efm-acctg.service` and write:
+
+```ini
+[Service]
Environment=CLUSTER=acctg
+PIDFile=/run/efm-4.7/acctg.pid
```
-Also update the value of the `PIDfile` parameter to specify the new cluster name. For example:
+And edit the `sales` cluster by running `systemctl edit efm-sales.service` and write:
```ini
-PIDFile=/var/run/efm-4.7/acctg.pid
+[Service]
+Environment=CLUSTER=sales
+PIDFile=/run/efm-4.7/sales.pid
```
-After copying the service scripts, enable the services:
+!!!Note
+You could also have edited the files in `/etc/systemd/system` directly, but then you'll have to run `systemctl daemon-reload`, which is unecessary when using `systemd edit` to change the override files.
+!!!
+
+After saving the changes, enable the services:
```text
# systemctl enable efm-acctg.service
@@ -296,7 +308,7 @@ After copying the service scripts, enable the services:
Then, use the new service scripts to start the agents. For example, to start the `acctg` agent:
```text
-# systemctl start efm-acctg`
+# systemctl start efm-acctg
```
For information about customizing a unit file, see [Understanding and administering systemd](https://docs.fedoraproject.org/en-US/quick-docs/understanding-and-administering-systemd/index.html).
diff --git a/product_docs/docs/efm/4/efm_rel_notes/index.mdx b/product_docs/docs/efm/4/efm_rel_notes/index.mdx
index f63a2e6ede6..6eb796f3255 100644
--- a/product_docs/docs/efm/4/efm_rel_notes/index.mdx
+++ b/product_docs/docs/efm/4/efm_rel_notes/index.mdx
@@ -9,14 +9,14 @@ about the release that introduced the feature.
| Version | Release Date |
| ------- | ------------ |
-| [4.7](03_efm_47_rel_notes) | 2023 Jun 20 |
-| [4.6](04_efm_46_rel_notes) | 2023 Feb 14 |
-| [4.5](05_efm_45_rel_notes) | 2022 Aug 30 |
-| [4.4](06_efm_44_rel_notes) | 2022 Jan 5 |
-| [4.3](07_efm_43_rel_notes) | 2021 Dec 18|
-| [4.2](08_efm_42_rel_notes) | 2021 Apr 19|
-| [4.1](09_efm_41_rel_notes) | 2021 Dec 11 |
-| [4.0](10_efm_40_rel_notes) | 2021 Sep 2 |
+| [4.7](03_efm_47_rel_notes) | 20 Jun 2023|
+| [4.6](04_efm_46_rel_notes) | 14 Feb 2023|
+| [4.5](05_efm_45_rel_notes) | 30 Aug 2022|
+| [4.4](06_efm_44_rel_notes) | 05 Jan 2022|
+| [4.3](07_efm_43_rel_notes) | 18 Dec 2021|
+| [4.2](08_efm_42_rel_notes) | 19 Apr 2021 |
+| [4.1](09_efm_41_rel_notes) | 11 Dec 2021|
+| [4.0](10_efm_40_rel_notes) | 02 Sep 2021 |
diff --git a/product_docs/docs/efm/4/installing/index.mdx b/product_docs/docs/efm/4/installing/index.mdx
index 69c668c58b8..908d757205f 100644
--- a/product_docs/docs/efm/4/installing/index.mdx
+++ b/product_docs/docs/efm/4/installing/index.mdx
@@ -11,7 +11,6 @@ redirects:
- ../efm_user/03_installing_efm
- 13_initial_config
- /efm/4/03_installing_efm/
- - /efm/4.0/efm_user/02_failover_manager_overview/01_prerequisites/
- /efm/4/03_installing_efm/x86_amd64/08_efm4_deb9_x86/
- /efm/4/03_installing_efm/02_efm4_other_linux8_x86/
- /efm/4/03_installing_efm/04_efm4_centos7_x86/
diff --git a/product_docs/docs/epas/11/epas_rel_notes/epas11_21_32_rel_notes.mdx b/product_docs/docs/epas/11/epas_rel_notes/epas11_21_32_rel_notes.mdx
index 59ab86efb7a..da4d23aa2b9 100644
--- a/product_docs/docs/epas/11/epas_rel_notes/epas11_21_32_rel_notes.mdx
+++ b/product_docs/docs/epas/11/epas_rel_notes/epas11_21_32_rel_notes.mdx
@@ -3,24 +3,34 @@ title: "Version 11.21.32"
hideToC: true
---
+Released: 21 Aug 2023
+
+Updated: 30 Aug 2023
+
!!! Important Upgrading
-Once you have upgraded to this version of EDB Postgres Advanced Server, you will need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application will check that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool.
+After you upgrade to this version of EDB Postgres Advanced Server, you need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application checks that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool.
+!!!
+
+!!! Note After applying patches
+Users making use of the UTL_MAIL package now require EXECUTE permission on the UTL_SMTP and UTL_TCP packages in addition to EXECUTE permission on UTL_MAIL.
+
+Users making use of the UTL_SMTP package now require EXECUTE permission on the UTL_TCP packages in addition to EXECUTE permission on UTL_SMTP.
!!!
-#### EDB Postgres Advanced Server 11.21.32 includes the following enhancements and bug fixes:
+EDB Postgres Advanced Server 11.21.32 includes the following enhancements and bug fixes:
| Type | Description | Addresses |
| -------------- | -------------------------------------------------------------------------------------------------------------------------------------| --------------------- |
-| Security fix | EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path. | [CVE-2023-XXXXX-1](/security/advisories/cve2023xxxxx1/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser. | [CVE-2023-XXXXX-2](/security/advisories/cve2023xxxxx2/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory() | [CVE-2023-XXXXX-3](/security/advisories/cve2023xxxxx3/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass | [CVE-2023-XXXXX-4](/security/advisories/cve2023xxxxx4/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for materialized views | [CVE-2023-XXXXX-5](/security/advisories/cve2023xxxxx5/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL | [CVE-2023-XXXXX-6](/security/advisories/cve2023xxxxx6/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for large objects | [CVE-2023-XXXXX-7](/security/advisories/cve2023xxxxx7/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission | [CVE-2023-XXXXX-8](/security/advisories/cve2023xxxxx8/) | 11+
-| Bug fix | Allowed subtypes in INDEX BY clause of the packaged collection. | #1371 | 11+
-| Bug fix | Fixed %type resolution when pointing to a packaged type field. | #1243 | 11+
+| Security fix | EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path. | [CVE-2023-41117](/security/advisories/cve202341117/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser. | [CVE-2023-41119](/security/advisories/cve202341119/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory() | [CVE-2023-41113](/security/advisories/cve202341113/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass | [CVE-2023-41118](/security/advisories/cve202341118/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for materialized views | [CVE-2023-41116](/security/advisories/cve202341116/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL | [CVE-2023-41114](/security/advisories/cve202341114/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for large objects | [CVE-2023-41115](/security/advisories/cve202341115/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission | [CVE-2023-41120](/security/advisories/cve202341120/) |
+| Bug fix | Allowed subtypes in INDEX BY clause of the packaged collection. | #1371 |
+| Bug fix | Fixed %type resolution when pointing to a packaged type field. | #1243 |
!!! Note Addresses
diff --git a/product_docs/docs/epas/11/epas_rel_notes/index.mdx b/product_docs/docs/epas/11/epas_rel_notes/index.mdx
index 1cab0645dad..b321278359f 100644
--- a/product_docs/docs/epas/11/epas_rel_notes/index.mdx
+++ b/product_docs/docs/epas/11/epas_rel_notes/index.mdx
@@ -24,26 +24,26 @@ The EDB Postgres Advanced Server (Advanced Server) documentation describes the l
| Version | Release Date | Upstream Merge |
| ------- | ------------ | -------------- |
-| [11.21.32](epas11_21_32_rel_notes.mdx) | 2023 Aug 21 | [11.21](https://www.postgresql.org/docs/11/release-11-21.html) |
-| [11.20.31](epas11_20_31_rel_notes.mdx) | 2023 May 11 | [11.20](https://www.postgresql.org/docs/11/release-11-20.html) |
-| [11.19.30](epas11_19_30_rel_notes.mdx) | 2023 Feb 10 | [11.19](https://www.postgresql.org/docs/11/release-11-19.html) |
-| [11.18.29](epas11_18_29_rel_notes.mdx) | 2022 Nov 10 | [11.18](https://www.postgresql.org/docs/11/release-11-18.html) |
-| [11.17.28](epas11_17_28_rel_notes.mdx) | 2022 Aug 11 | [11.17](https://www.postgresql.org/docs/11/release-11-17.html) |
-| [11.16.26](epas11_16_26_rel_notes.mdx) | 2022 May 12 | [11.16](https://www.postgresql.org/docs/11/release-11-16.html) |
-| [11.15.25](09_epas11.15.25_rel_notes.mdx) | 2022 Feb 10 | [11.15](https://www.postgresql.org/docs/11/release-11-15.html) |
-| [11.14.24](10_epas11.14.24_rel_notes.mdx) | 2021 Nov 11 | [11.14](https://www.postgresql.org/docs/11/release-11-14.html) |
-| [11.13.23](11_epas11.13.23_rel_notes.mdx) | 2021 Sep 08 | [11.13](https://www.postgresql.org/docs/11/release-11-13.html) |
-| [11.12.22](13_epas11.12.22_rel_notes.mdx) | 2021 May 05 | [11.12](https://www.postgresql.org/docs/11/release-11-12.html) |
-| [11.12.21](15_epas11.12.21_rel_notes.mdx) | 2021 Apr 15 | [11.12](https://www.postgresql.org/docs/11/release-11-12.html) |
-| [11.11.20](17_epas11.11.20_rel_notes.mdx) | 2021 Feb 12 | [11.11](https://www.postgresql.org/docs/11/release-11-11.html) |
-| [11.10.19](19_epas11.10.19_rel_notes.mdx) | 2020 Nov 20 | [11.10](https://www.postgresql.org/docs/11/release-11-10.html) |
-| [11.9.17](21_epas11.9.17_rel_notes.mdx) | 2020 Aug 18 | [11.9](https://www.postgresql.org/docs/11/release-11-9.html) |
-| [11.9.16](23_epas11.9.16_rel_notes.mdx) | 2020 Aug 17 | [11.9](https://www.postgresql.org/docs/11/release-11-9.html) |
-| [11.8.15](25_epas11.8.15_rel_notes.mdx) | 2020 May 18 | [11.8](https://www.postgresql.org/docs/11/release-11-8.html) |
-| [11.7.14](27_epas11.7.14_rel_notes.mdx) | 2020 Feb 14 | [11.7](https://www.postgresql.org/docs/11/release-11-7.html) |
-| [11.6.13](29_epas11.6.13_rel_notes.mdx) | 2019 Nov 19 | [11.6](https://www.postgresql.org/docs/11/release-11-6.html) |
-| [11.5.12](31_epas11.5.12_rel_notes.mdx) | 2019 Aug 26 | [11.5](https://www.postgresql.org/docs/11/release-11-5.html) |
-| [11.4.11](33_epas11.4.11_rel_notes.mdx) | 2019 Jun 25 | [11.4](https://www.postgresql.org/docs/11/release-11-4.html) |
-| [11.3.10](35_epas11.3.10_rel_notes.mdx) | 2019 May 13 | [11.3](https://www.postgresql.org/docs/11/release-11-3.html) |
-| [11.2.9](37_epas11.2.9_rel_notes.mdx) | 2019 Feb 22 | [11.2](https://www.postgresql.org/docs/11/release-11-2.html) |
-| [11.1.7](39_epas11.1.7_rel_notes.mdx) | 2018 Nov 28 | [11.1](https://www.postgresql.org/docs/11/release-11-1.html) |
\ No newline at end of file
+| [11.21.32](epas11_21_32_rel_notes.mdx) | 21 Aug 2023 | [11.21](https://www.postgresql.org/docs/11/release-11-21.html) |
+| [11.20.31](epas11_20_31_rel_notes.mdx) | 11 May 2023 | [11.20](https://www.postgresql.org/docs/11/release-11-20.html) |
+| [11.19.30](epas11_19_30_rel_notes.mdx) | 10 Feb 2023 | [11.19](https://www.postgresql.org/docs/11/release-11-19.html) |
+| [11.18.29](epas11_18_29_rel_notes.mdx) | 10 Nov 2022 | [11.18](https://www.postgresql.org/docs/11/release-11-18.html) |
+| [11.17.28](epas11_17_28_rel_notes.mdx) | 11 Aug 2022 | [11.17](https://www.postgresql.org/docs/11/release-11-17.html) |
+| [11.16.26](epas11_16_26_rel_notes.mdx) | 12 May 2022 | [11.16](https://www.postgresql.org/docs/11/release-11-16.html) |
+| [11.15.25](09_epas11.15.25_rel_notes.mdx) | 10 Feb 2022 | [11.15](https://www.postgresql.org/docs/11/release-11-15.html) |
+| [11.14.24](10_epas11.14.24_rel_notes.mdx) | 11 Nov 2021| [11.14](https://www.postgresql.org/docs/11/release-11-14.html) |
+| [11.13.23](11_epas11.13.23_rel_notes.mdx) | 08 Sep 2021| [11.13](https://www.postgresql.org/docs/11/release-11-13.html) |
+| [11.12.22](13_epas11.12.22_rel_notes.mdx) | 05 May 2021| [11.12](https://www.postgresql.org/docs/11/release-11-12.html) |
+| [11.12.21](15_epas11.12.21_rel_notes.mdx) | 15 Apr 2021| [11.12](https://www.postgresql.org/docs/11/release-11-12.html) |
+| [11.11.20](17_epas11.11.20_rel_notes.mdx) | 12 Feb 2021| [11.11](https://www.postgresql.org/docs/11/release-11-11.html) |
+| [11.10.19](19_epas11.10.19_rel_notes.mdx) | 20 Nov 2020| [11.10](https://www.postgresql.org/docs/11/release-11-10.html) |
+| [11.9.17](21_epas11.9.17_rel_notes.mdx) | 18 Aug 2020| [11.9](https://www.postgresql.org/docs/11/release-11-9.html) |
+| [11.9.16](23_epas11.9.16_rel_notes.mdx) | 17 Aug 2020| [11.9](https://www.postgresql.org/docs/11/release-11-9.html) |
+| [11.8.15](25_epas11.8.15_rel_notes.mdx) | 18 May 2020| [11.8](https://www.postgresql.org/docs/11/release-11-8.html) |
+| [11.7.14](27_epas11.7.14_rel_notes.mdx) | 14 Feb 2020| [11.7](https://www.postgresql.org/docs/11/release-11-7.html) |
+| [11.6.13](29_epas11.6.13_rel_notes.mdx) | 19 Nov 2019| [11.6](https://www.postgresql.org/docs/11/release-11-6.html) |
+| [11.5.12](31_epas11.5.12_rel_notes.mdx) | 26 Aug 2019| [11.5](https://www.postgresql.org/docs/11/release-11-5.html) |
+| [11.4.11](33_epas11.4.11_rel_notes.mdx) | 25 Jun 2019| [11.4](https://www.postgresql.org/docs/11/release-11-4.html) |
+| [11.3.10](35_epas11.3.10_rel_notes.mdx) | 13 May 2019| [11.3](https://www.postgresql.org/docs/11/release-11-3.html) |
+| [11.2.9](37_epas11.2.9_rel_notes.mdx) | 22 Feb 2019| [11.2](https://www.postgresql.org/docs/11/release-11-2.html) |
+| [11.1.7](39_epas11.1.7_rel_notes.mdx) | 28 Nov 2018| [11.1](https://www.postgresql.org/docs/11/release-11-1.html) |
\ No newline at end of file
diff --git a/product_docs/docs/epas/11/installing/linux_install_details/component_locations.mdx b/product_docs/docs/epas/11/installing/linux_install_details/component_locations.mdx
index ab97cfda841..7a657f60ef5 100644
--- a/product_docs/docs/epas/11/installing/linux_install_details/component_locations.mdx
+++ b/product_docs/docs/epas/11/installing/linux_install_details/component_locations.mdx
@@ -18,15 +18,12 @@ The RPM installers place EDB Postgres Advanced Server components in the director
| --------------------------------- | ------------------------------------------ |
| Executables | `/usr/edb/as11/bin` |
| Libraries | `/usr/edb/as11/lib` |
-| Cluster configuration files | `/etc/edb/as11` |
+| Cluster configuration files | `/var/lib/edb/as11` |
| Documentation | `/usr/edb/as11/share/doc` |
| Contrib | `/usr/edb/as11/share/contrib` |
| Data | `/var/lib/edb/as11/data` |
-| Logs | `/var/log/as11` |
-| Lock files | `/var/lock/as11` |
-| Log rotation file | `/etc/logrotate.d/as11` |
-| Sudo configuration file | `/etc/sudoers.d/as11` |
-| Binary to access VIP without sudo | `/usr/edb/as11/bin/secure` |
+| Logs | `/var/log/edb/as11` |
+| Lock files | `/var/lock/edb/as11` |
| Backup area | `/var/lib/edb/as11/backups` |
| Templates | `/usr/edb/as11/share` |
| Procedural Languages | `/usr/edb/as11/lib or /usr/edb/as11/lib64` |
diff --git a/product_docs/docs/epas/12/epas_rel_notes/epas12_16_20_rel_notes.mdx b/product_docs/docs/epas/12/epas_rel_notes/epas12_16_20_rel_notes.mdx
index bed112d7f05..cce0cfa56ba 100644
--- a/product_docs/docs/epas/12/epas_rel_notes/epas12_16_20_rel_notes.mdx
+++ b/product_docs/docs/epas/12/epas_rel_notes/epas12_16_20_rel_notes.mdx
@@ -3,27 +3,37 @@ title: "Version 12.16.20"
hideToC: true
---
+Released: 21 Aug 2023
+
+Updated: 30 Aug 2023
+
!!! Important Upgrading
-Once you have upgraded to this version of EDB Postgres Advanced Server, you will need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application will check that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool.
+After you upgrade to this version of EDB Postgres Advanced Server, you need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application checks that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool.
+!!!
+
+!!! Note After applying patches
+Users making use of the UTL_MAIL package now require EXECUTE permission on the UTL_SMTP and UTL_TCP packages in addition to EXECUTE permission on UTL_MAIL.
+
+Users making use of the UTL_SMTP package now require EXECUTE permission on the UTL_TCP packages in addition to EXECUTE permission on UTL_SMTP.
!!!
-#### EDB Postgres Advanced Server 12.16.20 includes the following enhancements and bug fixes:
+EDB Postgres Advanced Server 12.16.20 includes the following enhancements and bug fixes:
| Type | Description | Addresses |
| -------------- | -------------------------------------------------------------------------------------------------------------------------------------| --------------------- |
-| Security fix | EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path. | [CVE-2023-XXXXX-1](/security/advisories/cve2023xxxxx1/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser. | [CVE-2023-XXXXX-2](/security/advisories/cve2023xxxxx2/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory() | [CVE-2023-XXXXX-3](/security/advisories/cve2023xxxxx3/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass | [CVE-2023-XXXXX-4](/security/advisories/cve2023xxxxx4/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for materialized views | [CVE-2023-XXXXX-5](/security/advisories/cve2023xxxxx5/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL | [CVE-2023-XXXXX-6](/security/advisories/cve2023xxxxx6/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for large objects | [CVE-2023-XXXXX-7](/security/advisories/cve2023xxxxx7/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission | [CVE-2023-XXXXX-8](/security/advisories/cve2023xxxxx8/) | 11+
-| Bug fix | Allowed subtypes in INDEX BY clause of the packaged collection. | #1371 | 11+
-| Bug fix | Fixed %type resolution when pointing to a packaged type field. | #1243 | 11+
-| Bug fix | Profile: Fixed upgrade when `REUSE` constraints were `ENABLED`/`DISABLED`. | #92739 | 11+
-| Bug fix | Set correct collation for packaged cursor parameters. | #92739 | 11+
-| Bug fix | Rolled back autonomous transaction creating pg_temp in case of error. | #91614 | 11+
+| Security fix | EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path. | [CVE-2023-41117](/security/advisories/cve202341117/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser. | [CVE-2023-41119](/security/advisories/cve202341119/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory() | [CVE-2023-41113](/security/advisories/cve202341113/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass | [CVE-2023-41118](/security/advisories/cve202341118/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for materialized views | [CVE-2023-41116](/security/advisories/cve202341116/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL | [CVE-2023-41114](/security/advisories/cve202341114/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for large objects | [CVE-2023-41115](/security/advisories/cve202341115/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission | [CVE-2023-41120](/security/advisories/cve202341120/) |
+| Bug fix | Allowed subtypes in INDEX BY clause of the packaged collection. | #1371 |
+| Bug fix | Fixed %type resolution when pointing to a packaged type field. | #1243 |
+| Bug fix | Profile: Fixed upgrade when `REUSE` constraints were `ENABLED`/`DISABLED`. | #92739 |
+| Bug fix | Set correct collation for packaged cursor parameters. | #92739 |
+| Bug fix | Rolled back autonomous transaction creating pg_temp in case of error. | #91614 |
!!! Note Addresses
diff --git a/product_docs/docs/epas/12/epas_rel_notes/index.mdx b/product_docs/docs/epas/12/epas_rel_notes/index.mdx
index 9261a7af773..2c30ddf4612 100644
--- a/product_docs/docs/epas/12/epas_rel_notes/index.mdx
+++ b/product_docs/docs/epas/12/epas_rel_notes/index.mdx
@@ -27,20 +27,20 @@ The EDB Postgres Advanced Server (Advanced Server) documentation describes the l
| Version | Release Date | Upstream Merges |
| ------- | ------------ | --------------- |
-| [12.16.20](epas12_16_20_rel_notes.mdx) | 2023 Aug 21 | [12.16](https://www.postgresql.org/docs/12/release-12-16.html)
-| [12.15.19](epas12_15_19_rel_notes.mdx) | 2023 May 11 | [12.15](https://www.postgresql.org/docs/12/release-12-15.html) |
-| [12.14.18](epas12_14_18_rel_notes.mdx) | 2023 Feb 10 | [12.14](https://www.postgresql.org/docs/12/release-12-14.html) |
-| [12.13.17](epas12_13_17_rel_notes.mdx) | 2022 Nov 10 | [12.13](https://www.postgresql.org/docs/12/release-12-13.html) |
-| [12.12.16](epas12_12_16_rel_notes.mdx) | 2022 Aug 11 | [12.12](https://www.postgresql.org/docs/12/release-12-12.html) |
-| [12.11.15](epas12_11_15_rel_notes.mdx) | 2022 May 12 | [12.11](https://www.postgresql.org/docs/12/release-12-11.html) |
-| [12.10.14](05_epas12.10.14_rel_notes.mdx) | 2022 Feb 10 | [12.10](https://www.postgresql.org/docs/12/release-12-10.html) | | [12.10.14](05_epas12.10.14_rel_notes.mdx) | 2022 Feb 10 | [12.10](https://www.postgresql.org/docs/12/release-12-10.html) |
-| [12.9.13](06_epas12.9.13_rel_notes.mdx) | 2021 Nov 11 | [12.9](https://www.postgresql.org/docs/12/release-12-9.html) |
-| [12.8.12](07_epas12.8.12_rel_notes.mdx) | 2021 Sep 28 | [12.8](https://www.postgresql.org/docs/12/release-12-8.html) |
-| [12.7.10](08_epas12.7.10_rel_notes.mdx) | 2021 May 25 | [12.7](https://www.postgresql.org/docs/12/release-12-7.html) |
-| [12.7](09_epas12.7_rel_notes.mdx) | 2021 May 14 | [12.7](https://www.postgresql.org/docs/12/release-12-7.html) |
-| [12.6.7](10_epas12.6.7_rel_notes.mdx) | 2021 Feb 12 | [12.6](https://www.postgresql.org/docs/12/release-12-6.html) |
-| [12.5.6](11_epas12.5.6_rel_notes.mdx) | 2020 Nov 20 | [12.5](https://www.postgresql.org/docs/12/release-12-5.html) |
-| [12.4.5](13_epas12.4.5_rel_notes.mdx) | 2020 Aug 17 | [12.4](https://www.postgresql.org/docs/12/release-12-4.html) |
-| [12.3.4](15_epas12.3.4_rel_notes.mdx) | 2020 May 18 | [12.3](https://www.postgresql.org/docs/12/release-12-3.html) |
-| [12.2.3](17_epas12.2.3_rel_notes.mdx) | 2020 Feb 14 | [12.2](https://www.postgresql.org/docs/12/release-12-2.html) |
-| [12.1.2](19_epas12.1.2_rel_notes.mdx) | 2019 Dec 10 | [12.0](https://www.postgresql.org/docs/12/release-12.html) |
\ No newline at end of file
+| [12.16.20](epas12_16_20_rel_notes.mdx) | 21 Aug 2023 | [12.16](https://www.postgresql.org/docs/12/release-12-16.html)
+| [12.15.19](epas12_15_19_rel_notes.mdx) | 11 May 2023 | [12.15](https://www.postgresql.org/docs/12/release-12-15.html) |
+| [12.14.18](epas12_14_18_rel_notes.mdx) | 10 Feb 2023 | [12.14](https://www.postgresql.org/docs/12/release-12-14.html) |
+| [12.13.17](epas12_13_17_rel_notes.mdx) | 10 Nov 2022 | [12.13](https://www.postgresql.org/docs/12/release-12-13.html) |
+| [12.12.16](epas12_12_16_rel_notes.mdx) | 11 Aug 2022 | [12.12](https://www.postgresql.org/docs/12/release-12-12.html) |
+| [12.11.15](epas12_11_15_rel_notes.mdx) | 12 May 2022 | [12.11](https://www.postgresql.org/docs/12/release-12-11.html) |
+| [12.10.14](05_epas12.10.14_rel_notes.mdx) | 10 Feb 2022 | [12.10](https://www.postgresql.org/docs/12/release-12-10.html) | | [12.10.14](05_epas12.10.14_rel_notes.mdx) | 10 Feb 2022 | [12.10](https://www.postgresql.org/docs/12/release-12-10.html) |
+| [12.9.13](06_epas12.9.13_rel_notes.mdx) | 11 Nov 2021| [12.9](https://www.postgresql.org/docs/12/release-12-9.html) |
+| [12.8.12](07_epas12.8.12_rel_notes.mdx) | 28 Sep 2021| [12.8](https://www.postgresql.org/docs/12/release-12-8.html) |
+| [12.7.10](08_epas12.7.10_rel_notes.mdx) | 25 May 2021| [12.7](https://www.postgresql.org/docs/12/release-12-7.html) |
+| [12.7](09_epas12.7_rel_notes.mdx) | 14 May 2021| [12.7](https://www.postgresql.org/docs/12/release-12-7.html) |
+| [12.6.7](10_epas12.6.7_rel_notes.mdx) | 12 Feb 2021| [12.6](https://www.postgresql.org/docs/12/release-12-6.html) |
+| [12.5.6](11_epas12.5.6_rel_notes.mdx) | 20 Nov 2020| [12.5](https://www.postgresql.org/docs/12/release-12-5.html) |
+| [12.4.5](13_epas12.4.5_rel_notes.mdx) | 17 Aug 2020| [12.4](https://www.postgresql.org/docs/12/release-12-4.html) |
+| [12.3.4](15_epas12.3.4_rel_notes.mdx) | 18 May 2020| [12.3](https://www.postgresql.org/docs/12/release-12-3.html) |
+| [12.2.3](17_epas12.2.3_rel_notes.mdx) | 14 Feb 2020| [12.2](https://www.postgresql.org/docs/12/release-12-2.html) |
+| [12.1.2](19_epas12.1.2_rel_notes.mdx) | 10 Dec 2019| [12.0](https://www.postgresql.org/docs/12/release-12.html) |
\ No newline at end of file
diff --git a/product_docs/docs/epas/12/installing/linux_install_details/component_locations.mdx b/product_docs/docs/epas/12/installing/linux_install_details/component_locations.mdx
index 7f5a8778d12..83777c8ccc9 100644
--- a/product_docs/docs/epas/12/installing/linux_install_details/component_locations.mdx
+++ b/product_docs/docs/epas/12/installing/linux_install_details/component_locations.mdx
@@ -18,15 +18,12 @@ The RPM installers place EDB Postgres Advanced Server components in the director
| --------------------------------- | ------------------------------------------ |
| Executables | `/usr/edb/as12/bin` |
| Libraries | `/usr/edb/as12/lib` |
-| Cluster configuration files | `/etc/edb/as12` |
+| Cluster configuration files | `/var/lib/edb/as12` |
| Documentation | `/usr/edb/as12/share/doc` |
| Contrib | `/usr/edb/as12/share/contrib` |
| Data | `/var/lib/edb/as12/data` |
-| Logs | `/var/log/as12` |
-| Lock files | `/var/lock/as12` |
-| Log rotation file | `/etc/logrotate.d/as12` |
-| Sudo configuration file | `/etc/sudoers.d/as12` |
-| Binary to access VIP without sudo | `/usr/edb/as12/bin/secure` |
+| Logs | `/var/log/edb/as12` |
+| Lock files | `/var/lock/edb/as12` |
| Backup area | `/var/lib/edb/as12/backups` |
| Templates | `/usr/edb/as12/share` |
| Procedural Languages | `/usr/edb/as12/lib or /usr/edb/as12/lib64` |
diff --git a/product_docs/docs/epas/13/epas_rel_notes/epas13_12_17_rel_notes.mdx b/product_docs/docs/epas/13/epas_rel_notes/epas13_12_17_rel_notes.mdx
index c5557d68bd9..092e3f925ae 100644
--- a/product_docs/docs/epas/13/epas_rel_notes/epas13_12_17_rel_notes.mdx
+++ b/product_docs/docs/epas/13/epas_rel_notes/epas13_12_17_rel_notes.mdx
@@ -3,28 +3,38 @@ title: "Version 13.12.17"
hideToC: true
---
+Released: 21 Aug 2023
+
+Updated: 30 Aug 2023
+
!!! Important Upgrading
-Once you have upgraded to this version of EDB Postgres Advanced Server, you will need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application will check that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool.
+After you upgrade to this version of EDB Postgres Advanced Server, you need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application checks that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool.
+!!!
+
+!!! Note After applying patches
+Users making use of the UTL_MAIL package now require EXECUTE permission on the UTL_SMTP and UTL_TCP packages in addition to EXECUTE permission on UTL_MAIL.
+
+Users making use of the UTL_SMTP package now require EXECUTE permission on the UTL_TCP packages in addition to EXECUTE permission on UTL_SMTP.
!!!
-#### EDB Postgres Advanced Server 13.12.17 includes the following enhancements and bug fixes:
+EDB Postgres Advanced Server 13.12.17 includes the following enhancements and bug fixes:
| Type | Description | Addresses |
| -------------- | -------------------------------------------------------------------------------------------------------------------------------------| --------------------- |
-| Security fix | EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path. | [CVE-2023-XXXXX-1](/security/advisories/cve2023xxxxx1/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser. | [CVE-2023-XXXXX-2](/security/advisories/cve2023xxxxx2/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory() | [CVE-2023-XXXXX-3](/security/advisories/cve2023xxxxx3/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass | [CVE-2023-XXXXX-4](/security/advisories/cve2023xxxxx4/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for materialized views | [CVE-2023-XXXXX-5](/security/advisories/cve2023xxxxx5/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL | [CVE-2023-XXXXX-6](/security/advisories/cve2023xxxxx6/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for large objects | [CVE-2023-XXXXX-7](/security/advisories/cve2023xxxxx7/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission | [CVE-2023-XXXXX-8](/security/advisories/cve2023xxxxx8/) | 11+
-| Bug fix | Allowed subtypes in INDEX BY clause of the packaged collection. | #1371 | 11+
-| Bug fix | Fixed %type resolution when pointing to a packaged type field. | #1243 | 11+
-| Bug fix | Profile: Fixed upgrade when `REUSE` constraints were `ENABLED`/`DISABLED`. | #92739 | 11+
-| Bug fix | Set correct collation for packaged cursor parameters. | #92739 | 11+
-| Bug fix | Rolled back autonomous transaction creating pg_temp in case of error. | #91614 | 11+
-| Bug fix | Added checks to ensure required WAL logging in EXCHANGE PARTITION command.| | 13+
+| Security fix | EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path. | [CVE-2023-41117](/security/advisories/cve202341117/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser. | [CVE-2023-41119](/security/advisories/cve202341119/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory() | [CVE-2023-41113](/security/advisories/cve202341113/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass | [CVE-2023-41118](/security/advisories/cve202341118/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for materialized views | [CVE-2023-41116](/security/advisories/cve202341116/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL | [CVE-2023-41114](/security/advisories/cve202341114/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for large objects | [CVE-2023-41115](/security/advisories/cve202341115/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission | [CVE-2023-41120](/security/advisories/cve202341120/) |
+| Bug fix | Allowed subtypes in INDEX BY clause of the packaged collection. | #1371 |
+| Bug fix | Fixed %type resolution when pointing to a packaged type field. | #1243 |
+| Bug fix | Profile: Fixed upgrade when `REUSE` constraints were `ENABLED`/`DISABLED`. | #92739 |
+| Bug fix | Set correct collation for packaged cursor parameters. | #92739 |
+| Bug fix | Rolled back autonomous transaction creating pg_temp in case of error. | #91614 |
+| Bug fix | Added checks to ensure required WAL logging in EXCHANGE PARTITION command.| |
diff --git a/product_docs/docs/epas/13/epas_rel_notes/index.mdx b/product_docs/docs/epas/13/epas_rel_notes/index.mdx
index eb487d41638..e0a3eb281bd 100644
--- a/product_docs/docs/epas/13/epas_rel_notes/index.mdx
+++ b/product_docs/docs/epas/13/epas_rel_notes/index.mdx
@@ -20,16 +20,16 @@ The EDB Postgres Advanced Server (Advanced Server) documentation describes the l
| Version | Release Date | Upstream Merges |
| ------------------------------------- | ------------ | ---------------------------------------------------------------------------------------------------------------- |
-| [13.12.17](epas13_12_17_rel_notes) | 2023 Aug 21 | [13.12](https://www.postgresql.org/docs/release/13.12/) |
-| [13.11.15](epas13_11_15_rel_notes) | 2023 May 11 | [13.11](https://www.postgresql.org/docs/release/13.11/) |
-| [13.10.14](epas13_10_14_rel_notes) | 2023 Feb 10 | [13.10](https://www.postgresql.org/docs/release/13.10/) |
-| [13.9.13](epas13_9_13_rel_notes) | 2022 Nov 10 | [13.9](https://www.postgresql.org/docs/release/13.9/) |
-| [13.8.12](epas13_8_12_rel_notes) | 2022 Aug 11 | [13.8](https://www.postgresql.org/docs/release/13.8/) |
-| [13.7.11](epas13_7_11_rel_notes) | 2022 May 23 | [13.7](https://www.postgresql.org/docs/release/13.7/)
-| [13.6.10](13_epas13.6.10_rel_notes) | 2022 Feb 10 | [13.6](https://www.postgresql.org/docs/13/release-13-6.html) |
-| [13.5.9](14_epas13.5.9_rel_notes) | 2021 Nov 11 | [13.5](https://www.postgresql.org/docs/13/release-13-5.html) |
-| [13.4.8](15_epas13.4.8_rel_notes) | 2021 Sep 28 | [13.4](https://www.postgresql.org/docs/13/release-13-4.html) |
-| [13.3.7](16_epas13.3.7_rel_notes) | 2021 May 25 | NA |
-| [13.3.6](17_epas13.3.6_rel_notes) | 2021 May 14 | [13.3](https://www.postgresql.org/docs/13/release-13-3.html) |
-| [13.2.5](19_epas13.2.5_rel_notes) | 2021 Feb 02 | [13.2](https://www.postgresql.org/docs/13/release-13-2.html) |
-| [13.1.4](20_epas13_rel_notes) | 2020 Dec 12 | [13](https://www.postgresql.org/docs/13/release-13.html), [13.1](https://www.postgresql.org/docs/13/release-13-1.html) |
\ No newline at end of file
+| [13.12.17](epas13_12_17_rel_notes) | 21 Aug 2023 | [13.12](https://www.postgresql.org/docs/release/13.12/) |
+| [13.11.15](epas13_11_15_rel_notes) | 11 May 2023 | [13.11](https://www.postgresql.org/docs/release/13.11/) |
+| [13.10.14](epas13_10_14_rel_notes) | 10 Feb 2023 | [13.10](https://www.postgresql.org/docs/release/13.10/) |
+| [13.9.13](epas13_9_13_rel_notes) | 10 Nov 2022 | [13.9](https://www.postgresql.org/docs/release/13.9/) |
+| [13.8.12](epas13_8_12_rel_notes) | 11 Aug 2022 | [13.8](https://www.postgresql.org/docs/release/13.8/) |
+| [13.7.11](epas13_7_11_rel_notes) | 23 May 2022 | [13.7](https://www.postgresql.org/docs/release/13.7/)
+| [13.6.10](13_epas13.6.10_rel_notes) | 10 Feb 2022 | [13.6](https://www.postgresql.org/docs/13/release-13-6.html) |
+| [13.5.9](14_epas13.5.9_rel_notes) | 11 Nov 2021 | [13.5](https://www.postgresql.org/docs/13/release-13-5.html) |
+| [13.4.8](15_epas13.4.8_rel_notes) | 28 Sep 2021 | [13.4](https://www.postgresql.org/docs/13/release-13-4.html) |
+| [13.3.7](16_epas13.3.7_rel_notes) | 25 May 2021 | NA |
+| [13.3.6](17_epas13.3.6_rel_notes) | 14 May 2021 | [13.3](https://www.postgresql.org/docs/13/release-13-3.html) |
+| [13.2.5](19_epas13.2.5_rel_notes) | 02 Feb 2021 | [13.2](https://www.postgresql.org/docs/13/release-13-2.html) |
+| [13.1.4](20_epas13_rel_notes) | 12 Dec 2020 | [13](https://www.postgresql.org/docs/13/release-13.html), [13.1](https://www.postgresql.org/docs/13/release-13-1.html) |
\ No newline at end of file
diff --git a/product_docs/docs/epas/13/installing/linux_install_details/component_locations.mdx b/product_docs/docs/epas/13/installing/linux_install_details/component_locations.mdx
index 9f1b374a8eb..1954eaade0c 100644
--- a/product_docs/docs/epas/13/installing/linux_install_details/component_locations.mdx
+++ b/product_docs/docs/epas/13/installing/linux_install_details/component_locations.mdx
@@ -17,15 +17,12 @@ The RPM installers place EDB Postgres Advanced Server components in the director
| --------------------------------- | ------------------------------------------ |
| Executables | `/usr/edb/as13/bin` |
| Libraries | `/usr/edb/as13/lib` |
-| Cluster configuration files | `/etc/edb/as13` |
+| Cluster configuration files | `/var/lib/edb/as13` |
| Documentation | `/usr/edb/as13/share/doc` |
| Contrib | `/usr/edb/as13/share/contrib` |
| Data | `/var/lib/edb/as13/data` |
-| Logs | `/var/log/as13` |
-| Lock files | `/var/lock/as13` |
-| Log rotation file | `/etc/logrotate.d/as13` |
-| Sudo configuration file | `/etc/sudoers.d/as13` |
-| Binary to access VIP without sudo | `/usr/edb/as13/bin/secure` |
+| Logs | `/var/log/edb/as13` |
+| Lock files | `/var/lock/edb/as13` |
| Backup area | `/var/lib/edb/as13/backups` |
| Templates | `/usr/edb/as13/share` |
| Procedural Languages | `/usr/edb/as13/lib or /usr/edb/as13/lib64` |
diff --git a/product_docs/docs/epas/14/epas_rel_notes/epas14_9_0_rel_notes.mdx b/product_docs/docs/epas/14/epas_rel_notes/epas14_9_0_rel_notes.mdx
index d56b0600eb8..5dd0d882c2c 100644
--- a/product_docs/docs/epas/14/epas_rel_notes/epas14_9_0_rel_notes.mdx
+++ b/product_docs/docs/epas/14/epas_rel_notes/epas14_9_0_rel_notes.mdx
@@ -3,31 +3,41 @@ title: "Version 14.9.0"
hideToC: true
---
+Released: 21 Aug 2023
+
+Updated: 30 Aug 2023
+
!!! Important Upgrading
-Once you have upgraded to this version of EDB Postgres Advanced Server, you will need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application will check that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool.
+After you upgrade to this version of EDB Postgres Advanced Server, you need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application checks that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool.
+!!!
+
+!!! Note After applying patches
+Users making use of the UTL_MAIL package now require EXECUTE permission on the UTL_SMTP and UTL_TCP packages in addition to EXECUTE permission on UTL_MAIL.
+
+Users making use of the UTL_SMTP package now require EXECUTE permission on the UTL_TCP packages in addition to EXECUTE permission on UTL_SMTP.
!!!
-#### EDB Postgres Advanced Server 14.9.0 includes the following enhancements and bug fixes:
+EDB Postgres Advanced Server 14.9.0 includes the following enhancements and bug fixes:
| Type | Description | Addresses |
| -------------- | -------------------------------------------------------------------------------------------------------------------------------------| --------------------- |
-| Security fix | EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path. | [CVE-2023-XXXXX-1](/security/advisories/cve2023xxxxx1/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser. | [CVE-2023-XXXXX-2](/security/advisories/cve2023xxxxx2/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory() | [CVE-2023-XXXXX-3](/security/advisories/cve2023xxxxx3/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass | [CVE-2023-XXXXX-4](/security/advisories/cve2023xxxxx4/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for materialized views | [CVE-2023-XXXXX-5](/security/advisories/cve2023xxxxx5/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL | [CVE-2023-XXXXX-6](/security/advisories/cve2023xxxxx6/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for large objects | [CVE-2023-XXXXX-7](/security/advisories/cve2023xxxxx7/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission | [CVE-2023-XXXXX-8](/security/advisories/cve2023xxxxx8/) | 11+
-| Bug fix | Allowed subtypes in INDEX BY clause of the packaged collection. | #1371 | 11+
-| Bug fix | Fixed %type resolution when pointing to a packaged type field. | #1243 | 11+
-| Bug fix | Profile: Fixed upgrade when `REUSE` constraints were `ENABLED`/`DISABLED`. | #92739 | 11+
-| Bug fix | Set correct collation for packaged cursor parameters. | #92739 | 11+
-| Bug fix | Rolled back autonomous transaction creating pg_temp in case of error. | #91614 | 11+
-| Bug fix | Added checks to ensure required WAL logging in EXCHANGE PARTITION command.| | 13+
-| Bug fix | Dumped/restored the sequences created for GENERATED AS IDENTITY constraint. | #90658 | 14+
-| Bug fix | Skipped updating the last DDL time for the parent table in CREATE INDEX. | #91270 | 14+
-| Bug fix | Removed existing package private procedure or function entries from the edb_last_ddl_time while replacing the package body. | | 14+
+| Security fix | EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path. | [CVE-2023-41117](/security/advisories/cve202341117/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser. | [CVE-2023-41119](/security/advisories/cve202341119/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory() | [CVE-2023-41113](/security/advisories/cve202341113/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass | [CVE-2023-41118](/security/advisories/cve202341118/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for materialized views | [CVE-2023-41116](/security/advisories/cve202341116/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL | [CVE-2023-41114](/security/advisories/cve202341114/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for large objects | [CVE-2023-41115](/security/advisories/cve202341115/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission | [CVE-2023-41120](/security/advisories/cve202341120/) |
+| Bug fix | Allowed subtypes in INDEX BY clause of the packaged collection. | #1371 |
+| Bug fix | Fixed %type resolution when pointing to a packaged type field. | #1243 |
+| Bug fix | Profile: Fixed upgrade when `REUSE` constraints were `ENABLED`/`DISABLED`. | #92739 |
+| Bug fix | Set correct collation for packaged cursor parameters. | #92739 |
+| Bug fix | Rolled back autonomous transaction creating pg_temp in case of error. | #91614 |
+| Bug fix | Added checks to ensure required WAL logging in EXCHANGE PARTITION command.| |
+| Bug fix | Dumped/restored the sequences created for GENERATED AS IDENTITY constraint. | #90658 |
+| Bug fix | Skipped updating the last DDL time for the parent table in CREATE INDEX. | #91270 |
+| Bug fix | Removed existing package private procedure or function entries from the edb_last_ddl_time while replacing the package body. | |
!!! Note Addresses
diff --git a/product_docs/docs/epas/14/epas_rel_notes/index.mdx b/product_docs/docs/epas/14/epas_rel_notes/index.mdx
index 3fdff1905c9..a11ad85cf6e 100644
--- a/product_docs/docs/epas/14/epas_rel_notes/index.mdx
+++ b/product_docs/docs/epas/14/epas_rel_notes/index.mdx
@@ -19,15 +19,15 @@ The EDB Postgres Advanced Server (EDB Postgres Advanced Server) documentation de
| Version | Release date | Upstream merges |
| ------------------------------------- | ------------ | ---------------------------------------------------------------------------------------------------------------- |
-| [14.9.0](epas14_9_0_rel_notes) | 2023 Aug 21 | [14.9](https://www.postgresql.org/docs/14/release-14-9.html) |
-| [14.8.0](epas14_8_0_rel_notes) | 2023 May 11 | [14.8](https://www.postgresql.org/docs/14/release-14-8.html) |
-| [14.7.0](epas14_7_0_rel_notes) | 2023 Feb 10 | [14.7](https://www.postgresql.org/docs/14/release-14-7.html) |
-| [14.6.0](epas14_6_0_rel_notes) | 2022 Nov 10 | [14.6](https://www.postgresql.org/docs/14/release-14-6.html) |
-| [14.5.0](epas14_5_0_rel_notes) | 2022 Aug 11 | [14.5](https://www.postgresql.org/docs/14/release-14-5.html) |
-| [14.4.0](epas14_4_0_rel_notes) | 2022 Jun 16 | [14.4](https://www.postgresql.org/docs/14/release-14-4.html) |
-| [14.3.0](epas14_3_0_rel_notes) | 2022 May 12 | [14.3](https://www.postgresql.org/docs/14/release-14-3.html) |
-| [14.2.1](19_epas14.2.1_rel_notes) | 2022 Feb 10 | [14.2](https://www.postgresql.org/docs/14/release-14-2.html) |
-| [14.1.0](20_epas14_rel_notes) | 2021 Dec 01 | [14.0](https://www.postgresql.org/docs/14/release-14.html), [14.1](https://www.postgresql.org/docs/14/release-14-1.html) |
+| [14.9.0](epas14_9_0_rel_notes) | 21 Aug 2023 | [14.9](https://www.postgresql.org/docs/14/release-14-9.html) |
+| [14.8.0](epas14_8_0_rel_notes) | 11 May 2023 | [14.8](https://www.postgresql.org/docs/14/release-14-8.html) |
+| [14.7.0](epas14_7_0_rel_notes) | 10 Feb 2023 | [14.7](https://www.postgresql.org/docs/14/release-14-7.html) |
+| [14.6.0](epas14_6_0_rel_notes) | 10 Nov 2022 | [14.6](https://www.postgresql.org/docs/14/release-14-6.html) |
+| [14.5.0](epas14_5_0_rel_notes) | 11 Aug 2022 | [14.5](https://www.postgresql.org/docs/14/release-14-5.html) |
+| [14.4.0](epas14_4_0_rel_notes) | 16 Jun 2022 | [14.4](https://www.postgresql.org/docs/14/release-14-4.html) |
+| [14.3.0](epas14_3_0_rel_notes) | 12 May 2022 | [14.3](https://www.postgresql.org/docs/14/release-14-3.html) |
+| [14.2.1](19_epas14.2.1_rel_notes) | 10 Feb 2022 | [14.2](https://www.postgresql.org/docs/14/release-14-2.html) |
+| [14.1.0](20_epas14_rel_notes) | 01 Dec 2021 | [14.0](https://www.postgresql.org/docs/14/release-14.html), [14.1](https://www.postgresql.org/docs/14/release-14-1.html) |
## Support announcements
diff --git a/product_docs/docs/epas/14/installing/linux_install_details/component_locations.mdx b/product_docs/docs/epas/14/installing/linux_install_details/component_locations.mdx
index 7010acb2381..7d84735bec9 100644
--- a/product_docs/docs/epas/14/installing/linux_install_details/component_locations.mdx
+++ b/product_docs/docs/epas/14/installing/linux_install_details/component_locations.mdx
@@ -17,15 +17,12 @@ The RPM installers place EDB Postgres Advanced Server components in the director
| --------------------------------- | ------------------------------------------ |
| Executables | `/usr/edb/as14/bin` |
| Libraries | `/usr/edb/as14/lib` |
-| Cluster configuration files | `/etc/edb/as14` |
+| Cluster configuration files | `/var/lib/edb/as14` |
| Documentation | `/usr/edb/as14/share/doc` |
| Contrib | `/usr/edb/as14/share/contrib` |
| Data | `/var/lib/edb/as14/data` |
-| Logs | `/var/log/as14` |
-| Lock files | `/var/lock/as14` |
-| Log rotation file | `/etc/logrotate.d/as14` |
-| Sudo configuration file | `/etc/sudoers.d/as14` |
-| Binary to access VIP without sudo | `/usr/edb/as14/bin/secure` |
+| Logs | `/var/log/edb/as14` |
+| Lock files | `/var/lock/edb/as14` |
| Backup area | `/var/lib/edb/as14/backups` |
| Templates | `/usr/edb/as14/share` |
| Procedural Languages | `/usr/edb/as14/lib or /usr/edb/as14/lib64` |
diff --git a/product_docs/docs/epas/15/application_programming/ecpgplus_guide/installing_ecpgplus.mdx b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/installing_ecpgplus.mdx
index 240bca41fd7..29605d0a77d 100644
--- a/product_docs/docs/epas/15/application_programming/ecpgplus_guide/installing_ecpgplus.mdx
+++ b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/installing_ecpgplus.mdx
@@ -39,7 +39,7 @@ export PATH=$EDB_PATH:$PATH
A makefile contains a set of instructions that tell the make utility how to transform a program written in C that contains embedded SQL into a C program. To try the examples, you need:
-- A C compiler (and linker)
+- A C compiler and linker
- The make utility
- ECPGPlus preprocessor and library
- A makefile that contains instructions for ECPGPlus
@@ -62,14 +62,14 @@ LDLIBS += $(LIBPATH) -lecpg -lpq
ecpg -C PROC -c $(INCLUDES) $?
```
-The first two lines use the `pg_config` program to locate the necessary header files and library directories:
+The first two lines use the pg_config program to locate the necessary header files and library directories:
```sql
INCLUDES = -I$(shell pg_config --includedir)
LIBPATH = -L $(shell pg_config --libdir)
```
-The `pg_config` program is shipped with EDB Postgres Advanced Server.
+The pg_config program is shipped with EDB Postgres Advanced Server.
make knows to use the `CFLAGS` variable when running the C compiler and `LDFLAGS` and `LDLIBS` when invoking the linker. ECPG programs must be linked against the ECPG runtime library (`-lecpg`) and the libpq library (`-lpq`).
@@ -111,7 +111,7 @@ The make utility:
In the sample makefile, make includes the `-C` option when invoking ECPGPlus to invoke ECPGPlus in Pro\*C-compatible mode.
-If you include the `-C` `PROC` keywords on the command line, in addition to the ECPG syntax, you can use Pro\*C command line syntax. For example:
+If you include the `-C` `PROC` keywords at the command line, in addition to the ECPG syntax, you can use Pro\*C command line syntax. For example:
```shell
$ ecpg -C PROC INCLUDE=/usr/edb/as14/include acct_update.c
@@ -127,16 +127,16 @@ The command line options are:
| Option | Description |
| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| -c | Automatically generate C code from embedded SQL code. |
-| -C *mode* | Use the `-C` option to specify a compatibility mode:
`INFORMIX`
`INFORMIX_SE`
`PROC` |
-| -D *symbol* | Define a preprocessor *symbol*.
The *-D* keyword is not supported when compiling in *PROC mode.* Instead, use the Oracle-style *‘DEFINE=’* clause. |
-| -h | Parse a header file, this option includes option `'-c'`. |
-| -i | Parse system, include files as well. |
-| -I directory | Search *directory* for `include` files. |
-| -o *outfile* | Write the result to *outfile*. |
-| -r *option* | Specify runtime behavior; *option* can be:
`no_indicator` - Don't use indicators, but instead use special values to represent NULL values.
`prepare` - Prepare all statements before using them.
`questionmarks` - Allow use of a question mark as a placeholder.
`usebulk` - Enable bulk processing for `INSERT`, `UPDATE`, and `DELETE` statements that operate on host variable arrays. |
+| -c | Generate C code from embedded SQL code. |
+| -C <mode> | Specify a compatibility mode:
`INFORMIX`
`INFORMIX_SE`
`PROC` |
+| -D <symbol> | Define a preprocessor symbol.
The -D keyword isn't supported when compiling in `PROC` mode. Instead, use the Oracle-style `‘DEFINE=’` clause. |
+| -h | Parse a header file. This option includes option `'-c'`. |
+| -i | Parse system. Include files as well. |
+| -I <directory> | Search <directory> for `include` files. |
+| -o <outfile> | Write the result to <outfile>. |
+| -r <option> | Specify runtime behavior. The value of <option> can be:
`no_indicator` — Don't use indicators, but instead use special values to represent NULL values.
`prepare` — Prepare all statements before using them.
`questionmarks` — Allow use of a question mark as a placeholder.
`usebulk` — Enable bulk processing for `INSERT`, `UPDATE`, and `DELETE` statements that operate on host variable arrays. |
| --regression | Run in regression testing mode. |
-| -t | Turn on `autocommit` of transactions. |
+| -t | Turn on autocommit of transactions. |
| -l | Disable `#line` directives. |
| --help | Display the help options. |
| --version | Output version information. |
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure.mdx
index 2c53ad027ad..6b3eb1053d3 100644
--- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure.mdx
+++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure.mdx
@@ -11,7 +11,7 @@ The `CREATE PROCEDURE` command defines and names a standalone procedure that's s
If you include a schema name, then the procedure is created in the specified schema. Otherwise it's created in the current schema. The name of the new procedure must not match any existing procedure with the same input argument types in the same schema. However, procedures of different input argument types can share a name. This is called *overloading*.
!!! Note
- Overloading of procedures is an EDB Postgres Advanced Server feature. **Overloading of stored, standalone procedures isn't compatible with Oracle databases.**
+ Overloading of procedures is an EDB Postgres Advanced Server feature. Overloading of stored, standalone procedures isn't compatible with Oracle databases.
## Updating the definition of an existing procedure
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/declaring_parameters.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/declaring_parameters.mdx
index 807061bacd6..a84ced4e434 100644
--- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/declaring_parameters.mdx
+++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/declaring_parameters.mdx
@@ -4,7 +4,7 @@ title: "Declaring parameters"
Declare parameters in the procedure or function definition, and enclose them in parentheses following the procedure or function name. Parameters declared in the procedure or function definition are known as *formal parameters*. When you invoke the procedure or function, the calling program supplies the actual data to use in the called program’s processing as well as the variables that receive the results of the called program’s processing. The data and variables supplied by the calling program when the procedure or function is called are referred to as the *actual parameters*.
-The following is the general format of a formal parameter declaration.
+The following is the general format of a formal parameter declaration:
```text
( [ IN | OUT | IN OUT ] [ DEFAULT ])
@@ -40,9 +40,9 @@ END;
In this example, `p_deptno` is an `IN` formal parameter, `p_empno` and `p_ename` are `IN OUT` formal parameters, and `p_job, p_hiredate` and `p_sal` are `OUT` formal parameters.
!!! Note
- In the example, no maximum length was specified on the `VARCHAR2` parameters and no precision and scale were specified on the `NUMBER` parameters. It's illegal to specify a length, precision, scale, or other constraints on parameter declarations. These constraints are inherited from the actual parameters that are used when the procedure or function is called.
+ In the example, no maximum length was specified on the `VARCHAR2` parameters, and no precision and scale were specified on the `NUMBER` parameters. It's illegal to specify a length, precision, scale, or other constraints on parameter declarations. These constraints are inherited from the actual parameters that are used when the procedure or function is called.
-The `emp_query` procedure can be called by another program, passing it the actual parameters. This example is another SPL program that calls `emp_query`.
+The `emp_query` procedure can be called by another program, passing it the actual parameters. This example is another SPL program that calls `emp_query`:
```sql
DECLARE
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/about_transactions.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/about_transactions.mdx
index 42cc6364f32..2469034108f 100644
--- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/about_transactions.mdx
+++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/about_transactions.mdx
@@ -6,10 +6,10 @@ A transaction begins when the first SQL command is encountered in the SPL progra
The transaction ends when one of the following occurs:
-- **An unhandled exception occurs**. In this case, the effects of all database updates made during the transaction are rolled back, and the transaction is aborted.
-- **A `COMMIT` command is encountered**. In this case, the effect of all database updates made during the transaction become permanent.
-- **A `ROLLBACK` command is encountered**. In this case, the effects of all database updates made during the transaction are rolled back, and the transaction is aborted. If a new SQL command is encountered, a new transaction begins.
-- **Control returns to the calling application** (such as Java or PSQL). In this case, the action of the application determines whether the transaction is committed or rolled back. The exception is when the transaction is in a block in which `PRAGMA AUTONOMOUS_TRANSACTION` was declared. In this case, the commitment or rollback of the transaction occurs independently of the calling program.
+- An unhandled exception occurs. In this case, the effects of all database updates made during the transaction are rolled back, and the transaction is aborted.
+- A `COMMIT` command is encountered. In this case, the effect of all database updates made during the transaction become permanent.
+- A `ROLLBACK` command is encountered. In this case, the effects of all database updates made during the transaction are rolled back, and the transaction is aborted. If a new SQL command is encountered, a new transaction begins.
+- Control returns to the calling application, such as Java or PSQL. In this case, the action of the application determines whether the transaction is committed or rolled back. The exception is when the transaction is in a block in which `PRAGMA AUTONOMOUS_TRANSACTION` was declared. In this case, the commitment or rollback of the transaction occurs independently of the calling program.
!!! Note
Unlike Oracle, DDL commands such as `CREATE TABLE` don't implicitly occur in their own transaction. Therefore, DDL commands don't cause an immediate database commit as in Oracle, and you can roll back DDL commands just like DML commands.
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/about_collections.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/about_collections.mdx
index 312f99a6310..1071875345f 100644
--- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/about_collections.mdx
+++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/about_collections.mdx
@@ -12,7 +12,7 @@ The most commonly known type of collection is an array. In EDB Postgres Advanced
To set up a collection:
1. Define a collection of the desired type. You can do this in the declaration section of an SPL program, which results in a *local type* that you can access only in that program. For nested table and varray types, you can also do this using the `CREATE TYPE` command, which creates a persistent, *standalone type* that any SPL program in the database can reference.
-2. Declare variables of the collection type. The collection associated with the declared variable is *uninitialized* at this point if no value assignment is made as part of the variable declaration.
+2. Declare variables of the collection type. The collection associated with the declared variable is uninitialized at this point if no value assignment is made as part of the variable declaration.
## Initializing a null collection
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement.mdx
index 2a5dc71c8cc..860cfa6e692 100644
--- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement.mdx
+++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement.mdx
@@ -6,7 +6,7 @@ redirects:
-You can use collections to process DML commands more efficiently by passing all the values to be used for repetitive execution of a `DELETE`, `INSERT`, or `UPDATE` command in one pass to the database server. The alternative is to reiteratively invoking the DML command with new values. Specify the DML command to process this way with the `FORALL` statement. In addition, provide one or more collections in the DML command where you want to subsitute different values each time the command is executed.
+You can use collections to process DML commands more efficiently by passing all the values to be used for repetitive execution of a `DELETE`, `INSERT`, or `UPDATE` command in one pass to the database server. The alternative is to reiteratively invoke the DML command with new values. Specify the DML command to process this way with the `FORALL` statement. In addition, provide one or more collections in the DML command where you want to subsitute different values each time the command is executed.
## Syntax
@@ -19,7 +19,10 @@ FORALL IN ..
## How it works
-If an exception occurs during any iteration of the `FORALL` statement, all updates that occurred since the start of the execution of the `FORALL` statement are rolled back. This behavior **isn't compatible** with Oracle databases. Oracle allows explicit use of the `COMMIT` or `ROLLBACK` commands to control whether to commit or roll back updates that occurred prior to the exception.
+If an exception occurs during any iteration of the `FORALL` statement, all updates that occurred since the start of the execution of the `FORALL` statement are rolled back.
+
+!!! Note
+ This behavior isn't compatible with Oracle databases. Oracle allows explicit use of the `COMMIT` or `ROLLBACK` commands to control whether to commit or roll back updates that occurred prior to the exception.
The `FORALL` statement creates a loop. Each iteration of the loop increments the `index` variable. You typically use the `index` in the loop to select a member of a collection. Control the number of iterations with the `lower_bound .. upper_bound` clause. The loop executes once for each integer between the `lower_bound` and `upper_bound` (inclusive), and the index increments by one for each iteration.
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/02_types_of_triggers.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/02_types_of_triggers.mdx
index 390529a1f64..347856c8faa 100644
--- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/02_types_of_triggers.mdx
+++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/02_types_of_triggers.mdx
@@ -12,9 +12,9 @@ redirects:
EDB Postgres Advanced Server supports *row-level* and *statement-level* triggers.
-- A *row-level trigger* fires once for each row that's affected by a triggering event. For example, suppose deletion is defined as a triggering event on a table, and a single `DELETE` command is issued that deletes five rows from the table. In this case, the trigger fires five times, once for each row.
+- A row-level trigger fires once for each row that's affected by a triggering event. For example, suppose deletion is defined as a triggering event on a table, and a single `DELETE` command is issued that deletes five rows from the table. In this case, the trigger fires five times, once for each row.
-- A *statement-level* trigger fires once per triggering statement, regardless of the number of rows affected by the triggering event. In the previous example of a single `DELETE` command deleting five rows, a statement-level trigger fires only once.
+- A statement-level trigger fires once per triggering statement, regardless of the number of rows affected by the triggering event. In the previous example of a single `DELETE` command deleting five rows, a statement-level trigger fires only once.
You can define the sequence of actions regarding whether the trigger code block executes before or after the triggering statement for statement-level triggers. For row-level triggers, you can define whether the trigger code block executes before or after each row is affected by the triggering statement.
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger.mdx
index 790774bb68d..9f267d486f1 100644
--- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger.mdx
+++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger.mdx
@@ -8,7 +8,7 @@ redirects:
## Defining a compound trigger on a table
-This example shows a compound trigger that records a change to the employee salary by defining a compound trigger (named `hr_trigger`) on the `emp` table.
+This example shows a compound trigger that records a change to the employee salary by defining a compound trigger named `hr_trigger` on the `emp` table.
1. Create a table named `emp`:
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods.mdx
index 37e4955855e..4e7ba1654be 100644
--- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods.mdx
+++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods.mdx
@@ -8,6 +8,6 @@ redirects:
Methods are SPL procedures or functions defined in an object type. Methods are categorized into three general types:
-- *Member methods* — Procedures or functions that operate in the context of an object instance. Member methods have access to and can change the attributes of the object instance on which they're operating.
-- *Static methods* — Procedures or functions that operate independently of any particular object instance. Static methods don't have access to and can't change the attributes of an object instance.
-- *Constructor methods* — Functions used to create an instance of an object type. A default constructor method is always provided when an object type is defined.
+- **Member methods** — Procedures or functions that operate in the context of an object instance. Member methods have access to and can change the attributes of the object instance on which they're operating.
+- **Static methods** — Procedures or functions that operate independently of any particular object instance. Static methods don't have access to and can't change the attributes of an object instance.
+- **Constructor methods** — Functions used to create an instance of an object type. A default constructor method is always provided when an object type is defined.
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/index.mdx
index e6369debb98..89c4ee511b3 100644
--- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/index.mdx
+++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/index.mdx
@@ -13,8 +13,8 @@ redirects:
An object type is a description or definition of some entity. This definition of an object type is characterized by two components:
-- *Attributes* — Fields that describe particular characteristics of an object instance. For a person object, examples are name, address, gender, date of birth, height, weight, eye color, and occupation.
-- *Methods* — Programs that perform some type of function or operation on or are related to an object. For a person object, examples are calculating the person’s age, displaying the person’s attributes, and changing the values assigned to the person’s attributes.
+- Attributes — Fields that describe particular characteristics of an object instance. For a person object, examples are name, address, gender, date of birth, height, weight, eye color, and occupation.
+- Methods — Programs that perform some type of function or operation on or are related to an object. For a person object, examples are calculating the person’s age, displaying the person’s attributes, and changing the values assigned to the person’s attributes.
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods.mdx
index 2053696e50a..3ce3180cc15 100644
--- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods.mdx
+++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods.mdx
@@ -36,7 +36,7 @@ CONSTRUCTOR FUNCTION address
The body of the default constructor sets each member to `NULL`.
-To create a custom constructor, declare the constructor function (using the keyword constructor) in the `CREATE TYPE` command and define the construction function in the `CREATE TYPE BODY` command. For example, you might want to create a custom constructor for the `address` type that computes the city and state given a `street_address` and `postal_code`:
+To create a custom constructor, using the keyword constructor, declare the constructor function in the `CREATE TYPE` command, and define the construction function in the `CREATE TYPE BODY` command. For example, you might want to create a custom constructor for the `address` type that computes the city and state given a `street_address` and `postal_code`:
```sql
CREATE TYPE address AS OBJECT
diff --git a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/about_optimizer_hints.mdx b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/about_optimizer_hints.mdx
index 9e16c46c6d0..0fd018abed8 100644
--- a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/about_optimizer_hints.mdx
+++ b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/about_optimizer_hints.mdx
@@ -2,7 +2,7 @@
title: "About optimizer hints"
---
-An *optimizer hint* is one or more directives embedded in a comment-like syntax that immediately follows a `DELETE`, `INSERT`, `SELECT` or `UPDATE` command. Keywords in the comment instruct the server to use or avoid a specific plan when producing the result set.
+An *optimizer hint* is one or more directives embedded in a comment-like syntax that immediately follows a `DELETE`, `INSERT`, `SELECT`, or `UPDATE` command. Keywords in the comment instruct the server to use or avoid a specific plan when producing the result set.
## Synopsis
@@ -23,8 +23,8 @@ If you're using the first form, the hint and optional comment might span multipl
Note:
- The database server always tries to use the specified hints if at all possible.
-- If a planner method parameter is set so as to disable a certain plan type, then this plan isn't used even if it is specified in a hint, unless there are no other possible options for the planner. Examples of planner method parameters are `enable_indexscan`, `enable_seqscan`, `enable_hashjoin`, `enable_mergejoin`, and `enable_nestloop`. These are all Boolean parameters.
-- The hint is embedded in a comment. As a consequence, if the hint is misspelled or if any parameter to a hint such as view, table, or column name is misspelled or nonexistent in the SQL command, there's no indication that an error occurred. No syntax error is given. The entire hint is silently ignored.
+- If a planner method parameter is set so as to disable a certain plan type, then this plan isn't used even if it's specified in a hint, unless there are no other possible options for the planner. Examples of planner method parameters are `enable_indexscan`, `enable_seqscan`, `enable_hashjoin`, `enable_mergejoin`, and `enable_nestloop`. These are all Boolean parameters.
+- The hint is embedded in a comment. As a consequence, if the hint is misspelled or if any parameter to a hint, such as view, table, or column name, is misspelled or nonexistent in the SQL command, there's no indication that an error occurred. No syntax error is given. The entire hint is silently ignored.
- If an alias is used for a table name in the SQL command, then you must use the alias name in the hint, not the original table name. For example, in the command `SELECT /*+ FULL(acct) */ * FROM accounts acct ..., acct`, you must specify the alias for `accounts` in the `FULL` hint, not in the table name `accounts`.
Use the `EXPLAIN` command to ensure that the hint is correctly formed and the planner is using the hint.
diff --git a/product_docs/docs/epas/15/database_administration/01_configuration_parameters/01_setting_new_parameters.mdx b/product_docs/docs/epas/15/database_administration/01_configuration_parameters/01_setting_new_parameters.mdx
index 8af56c67a92..c7c80beddda 100644
--- a/product_docs/docs/epas/15/database_administration/01_configuration_parameters/01_setting_new_parameters.mdx
+++ b/product_docs/docs/epas/15/database_administration/01_configuration_parameters/01_setting_new_parameters.mdx
@@ -14,7 +14,7 @@ redirects:
-Set each configuration parameter using a name/value pair. Parameter names are not case sensitive. The parameter name is typically separated from its value by an optional equals sign (`=`).
+Set each configuration parameter using a name/value pair. Parameter names aren't case sensitive. The parameter name is typically separated from its value by an optional equals sign (`=`).
This example shows some configuration parameter settings in the `postgresql.conf` file:
@@ -56,19 +56,19 @@ The multiplier for memory units is 1024.
A number of parameter settings are set when the EDB Postgres Advanced Server database product is built. These are read-only parameters, and you can't change their values. A couple of parameters are also permanently set for each database when the database is created. These parameters are read-only and you can't later change them for the database. However, there are a number of ways to specify the configuration parameter settings:
-- The initial settings for almost all configurable parameters across the entire database cluster are listed **in the `postgresql.conf`** configuration file. These settings are put into effect upon database server start or restart. You can override some of these initial parameter settings. All configuration parameters have built-in default settings that are in effect unless they are explicitly overridden.
+- The initial settings for almost all configurable parameters across the entire database cluster are listed in the `postgresql.conf` configuration file. These settings are put into effect upon database server start or restart. You can override some of these initial parameter settings. All configuration parameters have built-in default settings that are in effect unless you explicitly override them.
-- Configuration parameters in the `postgresql.conf` file are overridden when the same parameters are included **in the `postgresql.auto.conf` file**. The `ALTER SYSTEM` command is used to manage the configuration parameters in the `postgresql.auto.conf` file.
+- Configuration parameters in the `postgresql.conf` file are overridden when the same parameters are included in the `postgresql.auto.conf` file. Use the `ALTER SYSTEM` command to manage the configuration parameters in the `postgresql.auto.conf` file.
-- You can modify parameter settings **in the configuration file while the database server is running**. If the configuration file is then reloaded (meaning a SIGHUP signal is issued), for certain parameter types, the changed parameters settings immediately take effect. For some of these parameter types, the new settings are available in a currently running session immediately after the reload. For other of these parameter types, you must start a new session to use the new settings. And yet for other parameter types, modified settings don't take effect until the database server is stopped and restarted. See the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/config-setting.html) for information on how to reload the configuration file:
+- You can modify parameter settings in the configuration file while the database server is running. If the configuration file is then reloaded (meaning a SIGHUP signal is issued), for certain parameter types, the changed parameters settings immediately take effect. For some of these parameter types, the new settings are available in a currently running session immediately after the reload. For others, you must start a new session to use the new settings. And for some others, modified settings don't take effect until the database server is stopped and restarted. See the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/config-setting.html) for information on how to reload the configuration file.
-- You can **use the SQL commands** `ALTER DATABASE`, `ALTER ROLE`, or `ALTER ROLE IN DATABASE` to modify certain parameter settings. The modified parameter settings take effect for new sessions after you execute the command. `ALTER DATABASE` affects new sessions connecting to the specified database. `ALTER ROLE` affects new sessions started by the specified role. `ALTER ROLE IN DATABASE` affects new sessions started by the specified role connecting to the specified database. Parameter settings established by these SQL commands remain in effect indefinitely, across database server restarts, overriding settings established by the other methods. Parameter settings established using the `ALTER DATABASE`, `ALTER ROLE`, or `ALTER ROLE IN DATABASE` commands can be changed only by either:
+- You can use the SQL commands `ALTER DATABASE`, `ALTER ROLE`, or `ALTER ROLE IN DATABASE` to modify certain parameter settings. The modified parameter settings take effect for new sessions after you execute the command. `ALTER DATABASE` affects new sessions connecting to the specified database. `ALTER ROLE` affects new sessions started by the specified role. `ALTER ROLE IN DATABASE` affects new sessions started by the specified role connecting to the specified database. Parameter settings established by these SQL commands remain in effect indefinitely, across database server restarts, overriding settings established by the other methods. You can change parameter settings established using the `ALTER DATABASE`, `ALTER ROLE`, or `ALTER ROLE IN DATABASE` commands by either:
- Reissuing these commands with a different parameter value.
- - Issuing these commands using either of the `SET parameter TO DEFAULT` clause or the `RESET parameter` clause. These clauses change the parameter back to using the setting set by the other methods. See the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/sql-commands.html) for the exact syntax of these SQL commands.
+ - Issuing these commands using the `SET parameter TO DEFAULT` clause or the `RESET parameter` clause. These clauses change the parameter back to using the setting set by the other methods. See the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/sql-commands.html) for the syntax of these SQL commands.
-- You can make changes for certain parameter settings for the duration of individual sessions **using the `PGOPTIONS` environment variable** or by **using the `SET` command in the EDB-PSQL or PSQL command-line programs**. Parameter settings made this way override settings established using any of the methods descussed earlier, but only during that session.
+- You can make changes for certain parameter settings for the duration of individual sessions using the `PGOPTIONS` environment variable or by using the `SET` command in the EDB-PSQL or PSQL command-line programs. Parameter settings made this way override settings established using any of the methods descussed earlier, but only during that session.
## Modifying the postgresql.conf file
@@ -76,7 +76,7 @@ The configuration parameters in the `postgresql.conf` file specify server behavi
Parameters that are preceded by a pound sign (#) are set to their default value. To change a parameter value, remove the pound sign and enter a new value. After setting or changing a parameter, you must either `reload` or `restart` the server for the new parameter value to take effect.
-In the `postgresql.conf` file, some parameters contain comments that indicate `change requires restart`. To view a list of the parameters that require a server restart, execute the following query at the psql command line:
+In the `postgresql.conf` file, some parameters contain comments that indicate `change requires restart`. To view a list of the parameters that require a server restart, use the following query at the psql command line:
```sql
SELECT name FROM pg_settings WHERE context = 'postmaster';
@@ -88,7 +88,7 @@ SELECT name FROM pg_settings WHERE context = 'postmaster';
Appropriate authentication methods provide protection and security. Entries in the `pg_hba.conf` file specify the authentication methods that the server uses with connecting clients. Before connecting to the server, you might need to modify the authentication properties specified in the `pg_hba.conf` file.
-When you invoke the `initdb` utility to create a cluster, the utility creates a `pg_hba.conf` file for that cluster that specifies the type of authentication required from connecting clients. You can modify this file. After modifying the authentication settings in the `pg_hba.conf` file, restart the server and apply the changes. For more information about authentication and modifying the `pg_hba.conf` file, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html).
+When you invoke the initdb utility to create a cluster, the utility creates a `pg_hba.conf` file for that cluster that specifies the type of authentication required from connecting clients. You can modify this file. After modifying the authentication settings in the `pg_hba.conf` file, restart the server and apply the changes. For more information about authentication and modifying the `pg_hba.conf` file, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html).
When the server receives a connection request, it verifies the credentials provided against the authentication settings in the `pg_hba.conf` file before allowing a connection to a database. To log the `pg_hba.conf` file entry to authenticate a connection to the server, set the `log_connections` parameter to `ON` in the `postgresql.conf` file.
@@ -106,9 +106,9 @@ application_name=psql
## Obfuscating the LDAP password
-When using [LDAP](https://www.postgresql.org/docs/15/auth-ldap.html) for authentication, the LDAP password used to connect to the LDAP server, (the ldapbindpasswd password) is stored in the pg_hba.conf file. You can store the password there in an obfuscated form, which can then be de-obfuscated by a loadable module which you supply. The loadable module supplies a hook function that performs the de-obfuscation.
+When using [LDAP](https://www.postgresql.org/docs/15/auth-ldap.html) for authentication, the LDAP password used to connect to the LDAP server (the ldapbindpasswd password) is stored in the `pg_hba.conf` file. You can store the password there in an obfuscated form, which can then be de-obfuscated by a loadable module that you supply. The loadable module supplies a hook function that performs the de-obfuscation.
-For example, here is a C loadable module which uses `rot13_passphrase` as the hook function to de-obfuscate the password from the pg_hba.conf file:
+For example, this C-loadable module uses `rot13_passphrase` as the hook function to de-obfuscate the password from the `pg_hba.conf` file:
```c
#include "postgres.h"
@@ -165,13 +165,10 @@ For example, here is a C loadable module which uses `rot13_passphrase` as the ho
}
```
-Add your module to the `shared_preload_libraries` parameter in the postgresql.conf file. For example:
+Add your module to the `shared_preload_libraries` parameter in the `postgresql.conf` file. For example:
```ini
shared_preload_libraries = '$libdir/ldap_password_func'
```
Restart your server to load the changes in this parameter.
-
-
-
diff --git a/product_docs/docs/epas/15/database_administration/01_configuration_parameters/index.mdx b/product_docs/docs/epas/15/database_administration/01_configuration_parameters/index.mdx
index d2734bcf096..0dd62389ff1 100644
--- a/product_docs/docs/epas/15/database_administration/01_configuration_parameters/index.mdx
+++ b/product_docs/docs/epas/15/database_administration/01_configuration_parameters/index.mdx
@@ -14,7 +14,7 @@ redirects:
The EDB Postgres Advanced Server configuration parameters control various aspects of the database server’s behavior and environment such as data file and log file locations, connection, authentication and security settings, resource allocation and consumption, archiving and replication settings, error logging and statistics gathering, optimization and performance tuning, and locale and formatting settings
-Configuration parameters that apply only to EDB Postgres Advanced Server are noted in the [Summary of configuration parameters](/epas/latest/reference/database_administrator_reference/02_summary_of_configuration_parameters/) topic, which lists all EDB Postgres Advanced Server configuration parameters along with a number of key attributes of the parameters.
+Configuration parameters that apply only to EDB Postgres Advanced Server are noted in [Summary of configuration parameters](/epas/latest/reference/database_administrator_reference/02_summary_of_configuration_parameters/), which lists all EDB Postgres Advanced Server configuration parameters along with a number of key attributes of the parameters.
You can find more information about configuration parameters in the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/runtime-config.html).
diff --git a/product_docs/docs/epas/15/database_administration/02_edb_loader/data_loading_methods.mdx b/product_docs/docs/epas/15/database_administration/02_edb_loader/data_loading_methods.mdx
index d75c1ccd3ea..5bde3714902 100644
--- a/product_docs/docs/epas/15/database_administration/02_edb_loader/data_loading_methods.mdx
+++ b/product_docs/docs/epas/15/database_administration/02_edb_loader/data_loading_methods.mdx
@@ -6,11 +6,11 @@ description: "Description of the data loading methods supported by EDB*Loader"
As with Oracle SQL\*Loader, EDB\*Loader supports three data loading methods:
-- *Conventional path load* — Conventional path load is the default method used by EDB\*Loader. Use basic insert processing to add rows to the table. The advantage of a conventional path load is that table constraints and database objects defined on the table are enforced during a conventional path load. Table constraints and database objects include primary keys, not null constraints, check constraints, unique indexes, foreign key constraints, triggers, and so on. One exception is that the EDB Postgres Advanced Server rules defined on the table aren't enforced. EDB\*Loader can load tables on which rules are defined. However, the rules aren't executed. As a consequence, you can't load partitioned tables implemented using rules with EDB\*Loader. See [Conventional path load](invoking_edb_loader/conventional_path_load.mdx)
+- **Conventional path load** — Conventional path load is the default method used by EDB\*Loader. Use basic insert processing to add rows to the table. The advantage of a conventional path load is that table constraints and database objects defined on the table are enforced during a conventional path load. Table constraints and database objects include primary keys, not null constraints, check constraints, unique indexes, foreign key constraints, triggers, and so on. One exception is that the EDB Postgres Advanced Server rules defined on the table aren't enforced. EDB\*Loader can load tables on which rules are defined. However, the rules aren't executed. As a consequence, you can't load partitioned tables implemented using rules with EDB\*Loader. See [Conventional path load](invoking_edb_loader/conventional_path_load.mdx).
-- *Direct path load* — A direct path load is faster than a conventional path load but requires removing most types of constraints and triggers from the table. See [Direct path load](invoking_edb_loader/direct_path_load.mdx).
+- **Direct path load** — A direct path load is faster than a conventional path load but requires removing most types of constraints and triggers from the table. See [Direct path load](invoking_edb_loader/direct_path_load.mdx).
-- *Parallel direct path load* — A parallel direct path load provides even greater performance improvement by permitting multiple EDB\*Loader sessions to run simultaneously to load a single table. See [Parallel direct path load](invoking_edb_loader/parallel_direct_path_load.mdx).
+- **Parallel direct path load** — A parallel direct path load provides even greater performance improvement by permitting multiple EDB\*Loader sessions to run simultaneously to load a single table. See [Parallel direct path load](invoking_edb_loader/parallel_direct_path_load.mdx).
!!! Note
Create EDB Postgres Advanced Server rules using the `CREATE RULE` command. EDB Postgres Advanced Server rules aren't the same database objects as rules and rule sets used in Oracle.
diff --git a/product_docs/docs/epas/15/database_administration/02_edb_loader/edb_loader_overview_and_restrictions.mdx b/product_docs/docs/epas/15/database_administration/02_edb_loader/edb_loader_overview_and_restrictions.mdx
index 9e87a46566c..4f4efffab17 100644
--- a/product_docs/docs/epas/15/database_administration/02_edb_loader/edb_loader_overview_and_restrictions.mdx
+++ b/product_docs/docs/epas/15/database_administration/02_edb_loader/edb_loader_overview_and_restrictions.mdx
@@ -21,9 +21,9 @@ EDB\*Loader features include:
The important version compatibility restrictions between the EDB\*Loader client and the database server are:
-- When you invoke the EDB\*Loader program (called `edbldr`), you pass in parameters and directive information to the database server. We strongly recommend that you use the version 14 EDB\*Loader client (the edbldr program supplied with EDB Postgres Advanced Server 14) to load data only into version 14 of the database server. In general, use the same version for the EDB\*Loader client and database server.
+- When you invoke the EDB\*Loader program (called `edbldr`), you pass in parameters and directive information to the database server. We strongly recommend that you use the version of the EDB\*Loader client, the `edbldr` program supplied with the version of EDB Postgres Advanced Server you are using, to load data only into the database server. In general, use the same version for the EDB\*Loader client and database server.
-- Using EDB\*Loader with connection poolers such as PgPool-II and PgBouncer isn't supported. EDB\*Loader must connect directly to EDB Postgres Advanced Server version 14. Alternatively, there are commands you can use for loading data through connection poolers:
+- Using EDB\*Loader with connection poolers such as PgPool-II and PgBouncer isn't supported. EDB\*Loader must connect directly to EDB Postgres Advanced Server version 14. Alternatively, you can use these commands for loading data through connection poolers:
```shell
psql \copy
diff --git a/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/direct_path_load.mdx b/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/direct_path_load.mdx
index 1ff5a0a7ecf..15603bf79e2 100644
--- a/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/direct_path_load.mdx
+++ b/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/direct_path_load.mdx
@@ -8,7 +8,7 @@ redirects:
-During a direct path load, EDB\*Loader writes the data directly to the database pages, which is then synchronized to disk. The insert processing associated with a conventional path load is bypassed, resulting in a performance improvement. Bypassing insert processing reduces the types of constraints on the target table. The types of constraints permitted on the target table of a direct path load are:
+During a direct path load, EDB\*Loader writes the data directly to the database pages, which is then synchronized to disk. The insert processing associated with a conventional path load is bypassed, resulting in performance improvement. Bypassing insert processing reduces the types of constraints on the target table. The types of constraints permitted on the target table of a direct path load are:
- Primary key
- Not null constraints
diff --git a/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/performing_remote_loading.mdx b/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/performing_remote_loading.mdx
index 15e0f91cd07..bc539ea9247 100644
--- a/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/performing_remote_loading.mdx
+++ b/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/performing_remote_loading.mdx
@@ -16,7 +16,7 @@ In addition, you can use the standard input feature to pipe the data from the da
## Requirements
-Performing remote loading along with using standard input requires:
+Performing remote loading using standard input requires:
- The `edbldr` program must be installed on the client host on which to invoke it with the data source for the EDB\*Loader session.
- The control file must contain the clause `INFILE 'stdin'` so you can pipe the data directly into EDB\*Loader’s standard input. For information on the `INFILE` clause and the EDB\*Loader control file, see [Building the EDB\*Loader control file](../building_the_control_file/).
diff --git a/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/running_edb_loader.mdx b/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/running_edb_loader.mdx
index ab018dcd42a..cf95ea94d92 100644
--- a/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/running_edb_loader.mdx
+++ b/product_docs/docs/epas/15/database_administration/02_edb_loader/invoking_edb_loader/running_edb_loader.mdx
@@ -29,21 +29,28 @@ edbldr [ -d ] [ -p ] [ -h ]
## Description
-You can specify parameters listed in the syntax diagram in a *parameter file*. Exeptions include the `-d` option, `-p` option, `-h` option, and the `PARFILE` parameter. Specify the parameter file on the command line when you invoke `edbldr` using `PARFILE=param_file`. You can specify some parameters in the `OPTIONS` clause in the control file. For more information on the control file, see [Building the EDB\*Loader control file](../building_the_control_file/).
+You can specify parameters listed in the syntax diagram in a *parameter file*. Exeptions include the `-d` option, `-p` option, `-h` option, and the `PARFILE` parameter. Specify the parameter file on the command line when you invoke edbldr using `PARFILE=param_file`. You can specify some parameters in the `OPTIONS` clause in the control file. For more information on the control file, see [Building the EDB\*Loader control file](../building_the_control_file/).
-You can include the full directory path or a relative directory path to the file name when specifying `control_file`, `data_file`, `bad_file`, `discard_file`, `log_file`, and `param_file`. If you specify the file name alone or with a relative directory path, the file is assumed to exist in the case of `control_file`, `data_file`, or `param_file` relative to the current working directory from which `edbldr` is invoked. In the case of `bad_file`, `discard_file`, or `log_file`, the file is created.
+You can include the full directory path or a relative directory path to the file name when specifying `control_file`, `data_file`, `bad_file`, `discard_file`, `log_file`, and `param_file`. If you specify the file name alone or with a relative directory path, the file is assumed to exist in the case of `control_file`, `data_file`, or `param_file` relative to the current working directory from which edbldr is invoked. In the case of `bad_file`, `discard_file`, or `log_file`, the file is created.
-If you omit the `-d` option, the `-p` option, or the `-h` option, the defaults for the database, port, and host are determined by the same rules as other EDB Postgres Advanced Server (EPAS) utility programs, such as `edb-psql`.
+If you omit the `-d` option, the `-p` option, or the `-h` option, the defaults for the database, port, and host are determined by the same rules as other EDB Postgres Advanced Server utility programs, such as edb-psql.
## Requirements
-- The control file must exist in the character set encoding of the client where `edbldr` is invoked. If the client is in an encoding different from the database encoding, then you must set the `PGCLIENTENCODING` environment variable on the client to the client’s encoding prior to invoking `edbldr`. This technique ensures character set conversion between the client and the database server is done correctly.
+- The control file must exist in the character set encoding of the client where edbldr is invoked. If the client is in an encoding different from the database encoding, then you must set the `PGCLIENTENCODING` environment variable on the client to the client’s encoding prior to invoking edbldr. This technique ensures character set conversion between the client and the database server is done correctly.
-- The file names for `control_file`, `data_file`, `bad_file`, `discard_file`, and `log_file` must include the extensions `.ctl`, `.dat`, `.bad`, `.dsc`, and `.log`, respectively. If the provided file name doesn't contain an extension, EDB\*Loader assumes the actual file name includes the appropriate extension.
+- The file names must include these extensions:
+ - `control_file` must use the `.ctl` extension.
+ - `data_file` must use the `.dat` extension.
+ - `bad_file` must use the `.bad` extension
+ - `discard_file` must use the `.dsc` extension
+ - `log_file` must include the `.log` extension
+
+ If the provided file name doesn't have an extension, EDB\*Loader assumes the actual file name includes the appropriate extension.
-- The operating system account used to invoke `edbldr` must have read permission on the directories and files specified by `control_file`, `data_file`, and `param_file`.
+- The operating system account used to invoke edbldr must have read permission on the directories and files specified by `control_file`, `data_file`, and `param_file`.
-- The operating system account `enterprisedb` must have write permission on the directories where `bad_file`, `discard_file`, and `log_file` are written.
+- The operating system account enterprisedb must have write permission on the directories where `bad_file`, `discard_file`, and `log_file` are written.
## Parameters
@@ -59,24 +66,24 @@ If you omit the `-d` option, the `-p` option, or the `-h` option, the defaults f
IP address of the host on which the database server is running.
-`USERID={ username/password | username/ | username | / }`
+`USERID={ | | | / }`
- EDB\*Loader connects to the database with `username`. `username` must be a superuser or a username with the required privileges. `password` is the password for `username`.
+ EDB\*Loader connects to the database with ``. `` must be a superuser or a username with the required privileges. `` is the password for ``.
If you omit the `USERID` parameter, EDB\*Loader prompts for `username` and `password`. If you specify `USERID=username/`, then EDB\*Loader either:
- - Uses the password file specified by environment variable `PGPASSFILE` if `PGPASSFILE` is set
+ - Uses the password file specified by the environment variable `PGPASSFILE` if `PGPASSFILE` is set
- Uses the `.pgpass` password file (`pgpass.conf` on Windows systems) if `PGPASSFILE` isn't set
If you specify `USERID=username`, then EDB\*Loader prompts for `password`. If you specify `USERID=/`, the connection is attempted using the operating system account as the user name.
!!! Note
- EDB\*Loader ignores the EPAS connection environment variables `PGUSER` and `PGPASSWORD`. See the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/libpq-pgpass.html) for information on the `PGPASSFILE` environment variable and the password file.
+ EDB\*Loader ignores the EDB Postgres Advanced Server connection environment variables `PGUSER` and `PGPASSWORD`. See the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/libpq-pgpass.html) for information on the `PGPASSFILE` environment variable and the password file.
`-c CONNECTION_STRING`
`connstr=CONNECTION_STRING`
- The `-c` or `connstr=` option allows you to specify all the connection parameters supported by libpq. With this option, you can also specify SSL connection parameters or other connection parameters supported by libpq. If you provide connection options such as `-d`, `-h`, `-p` or `userid=dbuser/dbpass` separately, they might override the values provided by the `-c` or `connstr=` option.
+ The `-c` or `connstr=` option allows you to specify all the connection parameters supported by libpq. With this option, you can also specify SSL connection parameters or other connection parameters supported by libpq. If you provide connection options such as `-d`, `-h`, `-p`, or `userid=dbuser/dbpass` separately, they might override the values provided by the `-c` or `connstr=` option.
`CONTROL=control_file`
@@ -92,13 +99,13 @@ If you omit the `-d` option, the `-p` option, or the `-h` option, the defaults f
`BAD=bad_file`
- `bad_file` specifies the name of a file that receives input data records that can't be loaded due to errors. Specifying a `bad_file` on the command line overrides any `BADFILE` clause specified in the control file.
+ `bad_file` specifies the name of a file that receives input data records that can't be loaded due to errors. Specifying `bad_file` on the command line overrides any `BADFILE` clause specified in the control file.
For more information about `bad_file`, see [Building the EDB\*Loader control file](../building_the_control_file/).
`DISCARD=discard_file`
- `discard_file` is the name of the file that receives input data records that don't meet any table’s selection criteria. Specifying a `discard_file` on the command line overrides the `DISCARDFILE` clause in the control file.
+ `discard_file` is the name of the file that receives input data records that don't meet any table’s selection criteria. Specifying `discard_file` on the command line overrides the `DISCARDFILE` clause in the control file.
For more information about `discard_file`, see [Building the EDB\*Loader control file](../building_the_control_file/).
@@ -118,7 +125,7 @@ If you omit the `-d` option, the `-p` option, or the `-h` option, the defaults f
`log_file` specifies the name of the file in which EDB\*Loader records the results of the EDB\*Loader session.
- If you omit the `LOG` parameter, EDB\*Loader creates a log file with the name `control_file_base.log` in the directory from which `edbldr` is invoked. `control_file_base` is the base name of the control file used in the EDB\*Loader session. The operating system account `enterprisedb` must have write permission on the directory where the log file is written.
+ If you omit the `LOG` parameter, EDB\*Loader creates a log file with the name `control_file_base.log` in the directory from which edbldr is invoked. `control_file_base` is the base name of the control file used in the EDB\*Loader session. The operating system account `enterprisedb` must have write permission on the directory where the log file is written.
`PARFILE=param_file`
@@ -127,11 +134,11 @@ If you omit the `-d` option, the `-p` option, or the `-h` option, the defaults f
Any parameter given in `param_file` overrides the same parameter supplied on the command line before the `PARFILE` option. Any parameter given on the command line that appears after the `PARFILE` option overrides the same parameter given in `param_file`.
!!! Note
- Unlike other EDB\*Loader files, there's no default file name or extension assumed for `param_file`. However, by Oracle SQL\*Loader convention, `.par` is typically used as an extension. It isn't required.
+ Unlike other EDB\*Loader files, there's no default file name or extension assumed for `param_file`. However, by Oracle SQL\*Loader convention, `.par` is typically used as an extension. It isn't required.
`DIRECT= { FALSE | TRUE }`
- If `DIRECT` is set to `TRUE` EDB\*Loader performs a direct path load instead of a conventional path load. The default value of `DIRECT` is `FALSE`.
+ If `DIRECT` is set to `TRUE`, EDB\*Loader performs a direct path load instead of a conventional path load. The default value of `DIRECT` is `FALSE`.
Don't set `DIRECT=true` when loading the data into a replicated table. If you're using EDB\*Loader to load data into a replicated table and set `DIRECT=true`, indexes might omit rows that are in a table or might contain references to rows that were deleted. EnterpriseDB doesn't support direct inserts to load data into replicated tables.
@@ -139,7 +146,7 @@ If you omit the `-d` option, the `-p` option, or the `-h` option, the defaults f
`FREEZE= { FALSE | TRUE }`
- Set `FREEZE` to `TRUE` to copy the data with the rows `frozen`. A tuple guaranteed to be visible to all current and future transactions is marked as frozen to prevent transaction ID wraparound. For more information about frozen tuples, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/routine-vacuuming.html).
+ Set `FREEZE` to `TRUE` to copy the data with the rows *frozen*. A tuple guaranteed to be visible to all current and future transactions is marked as frozen to prevent transaction ID wraparound. For more information about frozen tuples, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/routine-vacuuming.html).
You must specify a data-loading type of `TRUNCATE` in the control file when using the `FREEZE` option. `FREEZE` isn't supported for direct loading.
@@ -169,7 +176,7 @@ If you omit the `-d` option, the `-p` option, or the `-h` option, the defaults f
If set to `TRUE`, index maintenance isn't performed as part of a direct path load, and indexes on the loaded table are marked as invalid. The default value of `SKIP_INDEX_MAINTENANCE` is `FALSE`.
- During a parallel direct path load, target table indexes aren't updated. They are marked as invalid after the load is complete.
+ During a parallel direct path load, target table indexes aren't updated. They're marked as invalid after the load is complete.
You can use the `REINDEX` command to rebuild an index. For more information about the `REINDEX` command, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-reindex.htm).
@@ -177,7 +184,7 @@ If you omit the `-d` option, the `-p` option, or the `-h` option, the defaults f
`group_name` specifies the name of an EDB Resource Manager resource group to which to assign the EDB\*Loader session.
- Any default resource group that was assigned to the session is overridden by the resource group given by the `edb_resource_group` parameter specified on the `edbldr` command line. An example of such a group is a database user running the EDB\*Loader session who was assigned a default resource group with the `ALTER ROLE ... SET` `edb_resource_group` command.
+ Any default resource group that was assigned to the session is overridden by the resource group given by the `edb_resource_group` parameter specified on the edbldr command line. An example of such a group is a database user running the EDB\*Loader session who was assigned a default resource group with the `ALTER ROLE ... SET` `edb_resource_group` command.
## Examples
@@ -191,7 +198,7 @@ EDB*Loader: Copyright (c) 2007-2021, EnterpriseDB Corporation.
Successfully loaded (4) records
```
-In this example, EDB\*Loader prompts for the user name and password since they are omitted from the command line. In addition, the files for the bad file and log file are specified with the `BAD` and `LOG` command line parameters.
+In this example, EDB\*Loader prompts for the user name and password since they're omitted from the command line. In addition, the files for the bad file and log file are specified with the `BAD` and `LOG` command line parameters.
```shell
$ /usr/edb/as14/bin/edbldr -d edb CONTROL=emp.ctl BAD=/tmp/emp.bad
@@ -234,7 +241,7 @@ EDB*Loader: Copyright (c) 2007-2021, EnterpriseDB Corporation.
Successfully loaded (4) records
```
-This example invokes EDB\*Loader using a normal user. For this example, one empty table `bar` is created and a normal user `bob` is created. The `bob` user is granted all privileges on the table `bar`. The CREATE TABLE command creates the empty table. The CREATE USER command creates the user and the GRANT command gives required privileges to the user `bob` on the `bar` table:
+This example invokes EDB\*Loader using a normal user. For this example, one empty table `bar` is created and a normal user `bob` is created. The `bob` user is granted all privileges on the table `bar`. The CREATE TABLE command creates the empty table. The CREATE USER command creates the user, and the GRANT command gives required privileges to the user `bob` on the `bar` table:
```sql
CREATE TABLE bar(i int);
@@ -261,7 +268,7 @@ EDBAS/ - (master) $ cat /tmp/edbldr.dat
5
```
-Invoke EDB*Loader:
+Invoke EDB\*Loader:
```shell
EDBAS/ - (master) $ /usr/edb/as15/bin/edbldr -d edb userid=bob/123 control=/tmp/edbldr.ctl
@@ -278,8 +285,8 @@ When EDB\*Loader exits, it returns one of the following codes:
| Exit code | Description |
| --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `0` | Indicates that all rows loaded successfully. |
-| `1` | Indicates that EDB\*Loader encountered command line or syntax errors or aborted the load operation due to an unrecoverable error. |
-| `2` | Indicates that the load completed, but some (or all) rows were rejected or discarded. |
-| `3` | Indicates that EDB\*Loader encountered fatal errors (such as OS errors). This class of errors is equivalent to the `FATAL` or `PANIC` severity levels of PostgreSQL errors. |
+| `0` | All rows loaded successfully. |
+| `1` | EDB\*Loader encountered command line or syntax errors or aborted the load operation due to an unrecoverable error. |
+| `2` | The load completed, but some or all rows were rejected or discarded. |
+| `3` | EDB\*Loader encountered fatal errors, such as OS errors. This class of errors is equivalent to the `FATAL` or `PANIC` severity levels of PostgreSQL errors. |
diff --git a/product_docs/docs/epas/15/database_administration/10_edb_resource_manager/dirty_buffer_throttling.mdx b/product_docs/docs/epas/15/database_administration/10_edb_resource_manager/dirty_buffer_throttling.mdx
index b76c12a008a..5f6ad2cf0ee 100644
--- a/product_docs/docs/epas/15/database_administration/10_edb_resource_manager/dirty_buffer_throttling.mdx
+++ b/product_docs/docs/epas/15/database_administration/10_edb_resource_manager/dirty_buffer_throttling.mdx
@@ -107,7 +107,7 @@ edb=# INSERT INTO t1 VALUES (generate_series (1,10000), 'aaa');
INSERT 0 10000
```
-The following example shows the results from the `INSERT` command:
+This example shows the results from the `INSERT` command:
```sql
edb=# SELECT query, rows, total_time, shared_blks_dirtied FROM
@@ -122,9 +122,9 @@ __OUTPUT__
The actual dirty rate is calculated as follows:
-- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 13496.184 ms, which yields *0.74117247 blocks per millisecond*.
-- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields *741.17247 blocks per second*.
-- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately *6072 kilobytes per second*.
+- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 13496.184 ms, which yields 0.74117247 blocks per millisecond.
+- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 741.17247 blocks per second.
+- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 6072 kilobytes per second.
The actual dirty rate of 6072 kilobytes per second is close to the dirty rate limit for the resource group, which is 6144 kilobytes per second.
@@ -152,7 +152,7 @@ edb=# INSERT INTO t1 VALUES (generate_series (1,10000), 'aaa');
INSERT 0 10000
```
-The following example shows the results from the `INSERT` command without the use of a resource group:
+This example shows the results from the `INSERT` command without the use of a resource group:
```sql
edb=# SELECT query, rows, total_time, shared_blks_dirtied FROM
@@ -169,9 +169,9 @@ The total time was only 2432.165 milliseconds, compared to 13496.184 millisecond
The actual dirty rate without the use of a resource group is calculated as follows:
-- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 2432.165 ms, which yields *4.112797 blocks per millisecond*.
-- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields *4112.797 blocks per second*.
-- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately *33692 kilobytes per second*.
+- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 2432.165 ms, which yields 4.112797 blocks per millisecond.
+- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 4112.797 blocks per second.
+- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 33692 kilobytes per second.
The actual dirty rate of 33692 kilobytes per second is much higher than when the resource group with a dirty rate limit of 6144 kilobytes per second was used.
@@ -229,7 +229,7 @@ INSERT 0 10000
!!! Note
The `INSERT` commands in session 1 and session 2 started after the `SELECT pg_stat_statements_reset()` command in session 2 ran.
-The following example shows the results from the `INSERT` commands in the two sessions. `RECORD 3` shows the results from session 1. `RECORD 2` shows the results from session 2.
+This example shows the results from the `INSERT` commands in the two sessions. `RECORD 3` shows the results from session 1. `RECORD 2` shows the results from session 2.
```sql
edb=# SELECT query, rows, total_time, shared_blks_dirtied FROM
@@ -256,15 +256,15 @@ The total time was 33215.334 milliseconds for session 1 and 30591.551 millisecon
The actual dirty rate for session 1 is calculated as follows:
-- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 33215.334 ms, which yields *0.30115609 blocks per millisecond*.
-- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields *301.15609 blocks per second*.
-- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately *2467 kilobytes per second*.
+- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 33215.334 ms, which yields 0.30115609 blocks per millisecond.
+- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 301.15609 blocks per second.
+- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 2467 kilobytes per second.
The actual dirty rate for session 2 is calculated as follows:
-- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 30591.551 ms, which yields *0.32698571 blocks per millisecond*.
-- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields *326.98571 blocks per second*.
-- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately *2679 kilobytes per second*.
+- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 30591.551 ms, which yields 0.32698571 blocks per millisecond.
+- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 326.98571 blocks per second.
+- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 2679 kilobytes per second.
The combined dirty rate from session 1 (2467 kilobytes per second) and from session 2 (2679 kilobytes per second) yields 5146 kilobytes per second, which is below the set dirty rate limit of the resource group (6144 kilobytes per seconds).
@@ -325,7 +325,7 @@ INSERT 0 10000
!!! Note
The `INSERT` commands in all four sessions started after the `SELECT pg_stat_statements_reset()` command in session 4 ran.
-The following example shows the results from the `INSERT` commands in the four sessions:
+This example shows the results from the `INSERT` commands in the four sessions:
- `RECORD 3` shows the results from session 1. `RECORD 2` shows the results from session 2.
@@ -366,29 +366,29 @@ The times of session 1 (28407.435) and session 2 (31343.458) are close to each o
The actual dirty rate for session 1 is calculated as follows:
-- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 28407.435 ms, which yields *0.35212612 blocks per millisecond*.
-- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields *352.12612 blocks per second*.
-- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately *2885 kilobytes per second*.
+- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 28407.435 ms, which yields 0.35212612 blocks per millisecond.
+- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 352.12612 blocks per second.
+- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 2885 kilobytes per second.
The actual dirty rate for session 2 is calculated as follows:
-- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 31343.458 ms, which yields *0.31914156 blocks per millisecond*.
-- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields *319.14156 blocks per second*.
-- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately *2614 kilobytes per second*.
+- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 31343.458 ms, which yields 0.31914156 blocks per millisecond.
+- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 319.14156 blocks per second.
+- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 2614 kilobytes per second.
The combined dirty rate from session 1 (2885 kilobytes per second) and from session 2 (2614 kilobytes per second) yields 5499 kilobytes per second, which is near the set dirty rate limit of the resource group (6144 kilobytes per seconds).
The actual dirty rate for session 3 is calculated as follows:
-- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 52727.846 ms, which yields *0.18971001 blocks per millisecond*.
-- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields *189.71001 blocks per second*.
-- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately *1554 kilobytes per second*.
+- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 52727.846 ms, which yields 0.18971001 blocks per millisecond.
+- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 189.71001 blocks per second.
+- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 1554 kilobytes per second.
The actual dirty rate for session 4 is calculated as follows:
-- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 56063.697 ms, which yields *0.17842205 blocks per millisecond*.
-- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields *178.42205 blocks per second*.
-- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately *1462 kilobytes per second*.
+- The number of blocks dirtied per millisecond (ms) is 10003 blocks / 56063.697 ms, which yields 0.17842205 blocks per millisecond.
+- Multiply the result by 1000 to give the number of shared blocks dirtied per second (1 second = 1000 ms), which yields 178.42205 blocks per second.
+- Multiply the result by 8.192 to give the number of kilobytes dirtied per second (1 block = 8.192 kilobytes), which yields approximately 1462 kilobytes per second.
The combined dirty rate from session 3 (1554 kilobytes per second) and from session 4 (1462 kilobytes per second) yields 3016 kilobytes per second, which is near the set dirty rate limit of the resource group (3072 kilobytes per seconds).
diff --git a/product_docs/docs/epas/15/database_administration/14_edb_clone_schema/copying_a_remote_schema.mdx b/product_docs/docs/epas/15/database_administration/14_edb_clone_schema/copying_a_remote_schema.mdx
index 80ee0ea6a19..36110b453dd 100644
--- a/product_docs/docs/epas/15/database_administration/14_edb_clone_schema/copying_a_remote_schema.mdx
+++ b/product_docs/docs/epas/15/database_administration/14_edb_clone_schema/copying_a_remote_schema.mdx
@@ -110,7 +110,7 @@ __OUTPUT__
(1 row)
```
-The following example displays the status from the log file during various points in the cloning process:
+This example displays the status from the log file during various points in the cloning process:
```sql
tgtdb=# SELECT edb_util.process_status_from_log('clone_rmt_src_tgt');
@@ -283,7 +283,7 @@ Number of background workers to perform the clone in parallel. The default value
The same cloning operation is performed as the example in [`remotecopyschema`](#remotecopyschema) but using the non-blocking function `remotecopyschema_nb`.
-The following command starts pgAgent on the target database `tgtdb`. The `pgagent` program file is located in the `bin` subdirectory of the EDB Postgres Advanced Server installation directory.
+This command starts pgAgent on the target database `tgtdb`. The `pgagent` program file is located in the `bin` subdirectory of the EDB Postgres Advanced Server installation directory.
```shell
[root@localhost bin]# ./pgagent -l 1 -s /tmp/pgagent_tgtdb_log hostaddr=127.0.0.1 port=5444
diff --git a/product_docs/docs/epas/15/database_administration/14_edb_clone_schema/setting_up_edb_clone_schema.mdx b/product_docs/docs/epas/15/database_administration/14_edb_clone_schema/setting_up_edb_clone_schema.mdx
index 10cb301e516..796546f1933 100644
--- a/product_docs/docs/epas/15/database_administration/14_edb_clone_schema/setting_up_edb_clone_schema.mdx
+++ b/product_docs/docs/epas/15/database_administration/14_edb_clone_schema/setting_up_edb_clone_schema.mdx
@@ -150,7 +150,7 @@ CREATE USER MAPPING FOR enterprisedb SERVER local_server
For more information about using the `CREATE USER MAPPING` command, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/sql-createusermapping.html).
-These `psql` commands show the foreign server and user mapping:
+These psql commands show the foreign server and user mapping:
```sql
edb=# \des+
@@ -235,7 +235,7 @@ CREATE USER MAPPING FOR enterprisedb SERVER src_server
```
## Displaying foreign servers and user mappings
-These `psql` commands show the foreign servers and user mappings:
+These psql commands show the foreign servers and user mappings:
```sql
tgtdb=# \des+
diff --git a/product_docs/docs/epas/15/epas_rel_notes/epas15_2_0_rel_notes.mdx b/product_docs/docs/epas/15/epas_rel_notes/epas15_2_0_rel_notes.mdx
index 5fa5300340f..402c04ce197 100644
--- a/product_docs/docs/epas/15/epas_rel_notes/epas15_2_0_rel_notes.mdx
+++ b/product_docs/docs/epas/15/epas_rel_notes/epas15_2_0_rel_notes.mdx
@@ -1,7 +1,10 @@
---
-title: "Version 15.2.0"
+title: EDB Postgres Advanced Server 15.2.0 release notes
+navTitle: "Version 15.2.0"
---
+Released: 14 Feb 2023
+
EDB Postgres Advanced Server 15.2.0 includes the following enhancements and bug fixes:
| Type | Description | Category |
diff --git a/product_docs/docs/epas/15/epas_rel_notes/epas15_3_0_rel_notes.mdx b/product_docs/docs/epas/15/epas_rel_notes/epas15_3_0_rel_notes.mdx
index 896039e386f..768c1e15c78 100644
--- a/product_docs/docs/epas/15/epas_rel_notes/epas15_3_0_rel_notes.mdx
+++ b/product_docs/docs/epas/15/epas_rel_notes/epas15_3_0_rel_notes.mdx
@@ -1,7 +1,10 @@
---
-title: "Version 15.3.0"
+title: "EDB Postgres Advanced Server 15.3.0 release notes"
+navTitle: Version 15.3.0
---
+Released: 11 May 2023
+
EDB Postgres Advanced Server 15.3.0 includes the following enhancements and bug fixes:
| Type | Description | Category |
diff --git a/product_docs/docs/epas/15/epas_rel_notes/epas15_4_0_rel_notes.mdx b/product_docs/docs/epas/15/epas_rel_notes/epas15_4_0_rel_notes.mdx
index 524d5d631c0..ab3c24e9da7 100644
--- a/product_docs/docs/epas/15/epas_rel_notes/epas15_4_0_rel_notes.mdx
+++ b/product_docs/docs/epas/15/epas_rel_notes/epas15_4_0_rel_notes.mdx
@@ -1,34 +1,47 @@
---
-title: "Version 15.4.0"
+title: "EDB Postgres Advanced Server 15.4.0 release notes"
+navTitle: Version 15.4.0
hideToC: true
---
+Released: 21 Aug 2023
+
+Updated: 30 Aug 2023
+
!!! Important Upgrading
-Once you have upgraded to this version of EDB Postgres Advanced Server, you will need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application will check that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool.
+After you upgrade to this version of EDB Postgres Advanced Server, you need to run `edb_sqlpatch` on all your databases to complete the upgrade. This application checks that your databases system objects are up to date with this version. See the [EDB SQL Patch](/tools/edb_sqlpatch) documentation for more information on how to deploy this tool.
+!!!
+
+!!! Note After applying patches
+Users making use of the UTL_MAIL package now require EXECUTE permission on the UTL_SMTP and UTL_TCP packages in addition to EXECUTE permission on UTL_MAIL.
+
+Users making use of the UTL_SMTP package now require EXECUTE permission on the UTL_TCP packages in addition to EXECUTE permission on UTL_SMTP.
!!!
-#### EDB Postgres Advanced Server 15.4.0 includes the following enhancements and bug fixes:
-
-| Type | Description | Addresses |
-| -------------- | -------------------------------------------------------------------------------------------------------------------------------------| --------------------- |
-| Security fix | EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path. | [CVE-2023-XXXXX-1](/security/advisories/cve2023xxxxx1/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser. | [CVE-2023-XXXXX-2](/security/advisories/cve2023xxxxx2/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory() | [CVE-2023-XXXXX-3](/security/advisories/cve2023xxxxx3/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass | [CVE-2023-XXXXX-4](/security/advisories/cve2023xxxxx4/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for materialized views | [CVE-2023-XXXXX-5](/security/advisories/cve2023xxxxx5/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL | [CVE-2023-XXXXX-6](/security/advisories/cve2023xxxxx6/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for large objects | [CVE-2023-XXXXX-7](/security/advisories/cve2023xxxxx7/) | 11+
-| Security fix | EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission | [CVE-2023-XXXXX-8](/security/advisories/cve2023xxxxx8/) | 11+
-| Bug fix | Allowed subtypes in INDEX BY clause of the packaged collection. | #1371 | 11+
-| Bug fix | Fixed %type resolution when pointing to a packaged type field. | #1243 | 11+
-| Bug fix | Profile: Fixed upgrade when `REUSE` constraints were `ENABLED`/`DISABLED`. | #92739 | 11+
-| Bug fix | Set correct collation for packaged cursor parameters. | #92739 | 11+
-| Bug fix | Rolled back autonomous transaction creating pg_temp in case of error. | #91614 | 11+
-| Bug fix | Added checks to ensure required WAL logging in EXCHANGE PARTITION command.| | 13+
-| Bug fix | Dumped/restored the sequences created for GENERATED AS IDENTITY constraint. | #90658 | 14+
-| Bug fix | Skipped updating the last DDL time for the parent table in CREATE INDEX. | #91270 | 14+
-| Bug fix | Removed existing package private procedure or function entries from the edb_last_ddl_time while replacing the package body. | | 14+
-| Bug fix | Fixed libpq to allow multiple PQprepare() calls under the same transaction. | #94735 | 14+
+EDB Postgres Advanced Server 15.4.0 includes the following enhancements and bug fixes:
+
+| Type | Description | Addresses | |
+| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- |
+| Upstream merge | Merged with community PostgreSQL 15.4. See the [PostgreSQL 15 Release Notes](https://www.postgresql.org/docs/15/release-15-4.html) for more information. | |
+| Security fix | EDB Postgres Advanced Server (EPAS) SECURITY DEFINER functions and procedures may be hijacked via search_path. | [CVE-2023-41117](/security/advisories/cve202341117/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) dbms_aq helper function may run arbitrary SQL as a superuser. | [CVE-2023-41119](/security/advisories/cve202341119/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permissions bypass via accesshistory() | [CVE-2023-41113](/security/advisories/cve202341113/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) UTL_FILE permission bypass | [CVE-2023-41118](/security/advisories/cve202341118/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for materialized views | [CVE-2023-41116](/security/advisories/cve202341116/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) authenticated users may fetch any URL | [CVE-2023-41114](/security/advisories/cve202341114/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) permission bypass for large objects | [CVE-2023-41115](/security/advisories/cve202341115/) |
+| Security fix | EDB Postgres Advanced Server (EPAS) DBMS_PROFILER data may be removed without permission | [CVE-2023-41120](/security/advisories/cve202341120/) |
+| Bug fix | Allowed subtypes in INDEX BY clause of the packaged collection. | #1371 |
+| Bug fix | Fixed %type resolution when pointing to a packaged type field. | #1243 |
+| Bug fix | Profile: Fixed upgrade when `REUSE` constraints were `ENABLED`/`DISABLED`. | #92739 |
+| Bug fix | Set correct collation for packaged cursor parameters. | #92739 |
+| Bug fix | Rolled back autonomous transaction creating pg_temp in case of error. | #91614 |
+| Bug fix | Added checks to ensure required WAL logging in EXCHANGE PARTITION command. | |
+| Bug fix | Dumped/restored the sequences created for GENERATED AS IDENTITY constraint. | #90658 |
+| Bug fix | Skipped updating the last DDL time for the parent table in CREATE INDEX. | #91270 |
+| Bug fix | Removed existing package private procedure or function entries from the edb_last_ddl_time while replacing the package body. | |
+| Bug fix | Fixed libpq to allow multiple PQprepare() calls under the same transaction. | #94735 |
+| Bug fix | Fixed a memory leak experienced when using EDB Postgres Distributed (PGD) with Transparent Data Encryption (TDE). | #93936 |
!!! Note Addresses
Entries in the Addresses column are either CVE numbers or, if preceded by #, a customer case number.
diff --git a/product_docs/docs/epas/15/epas_rel_notes/index.mdx b/product_docs/docs/epas/15/epas_rel_notes/index.mdx
index 039dd690f8c..6739f08a0e5 100644
--- a/product_docs/docs/epas/15/epas_rel_notes/index.mdx
+++ b/product_docs/docs/epas/15/epas_rel_notes/index.mdx
@@ -13,9 +13,9 @@ The EDB Postgres Advanced Server documentation describes the latest version of E
| Version | Release date | Upstream merges |
| ------------------------------------- | ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| [15.4.0](epas15_4_0_rel_notes) | 2023 Aug 21 | [15.4](https://www.postgresql.org/docs/release/15.4/)
-| [15.3.0](epas15_3_0_rel_notes) | 2023 May 11 | [15.3](https://www.postgresql.org/docs/release/15.3/) |
-| [15.2.0](epas15_2_0_rel_notes) | 2023 Feb 14 | [15.0](https://www.postgresql.org/docs/release/15.0/), [15.1](https://www.postgresql.org/docs/release/15.1/), [15.2](https://www.postgresql.org/docs/release/15.2/) |
+| [15.4.0](epas15_4_0_rel_notes) | 21 Aug 2023 | [15.4](https://www.postgresql.org/docs/release/15.4/)
+| [15.3.0](epas15_3_0_rel_notes) | 11 May 2023 | [15.3](https://www.postgresql.org/docs/release/15.3/) |
+| [15.2.0](epas15_2_0_rel_notes) | 14 Feb 2023 | [15.0](https://www.postgresql.org/docs/release/15.0/), [15.1](https://www.postgresql.org/docs/release/15.1/), [15.2](https://www.postgresql.org/docs/release/15.2/) |
## Component certification
diff --git a/product_docs/docs/epas/15/fundamentals/sql_fundamentals/02_sql_tutorial/01_sample_database/01_sample_database_installation.mdx b/product_docs/docs/epas/15/fundamentals/sql_fundamentals/02_sql_tutorial/01_sample_database/01_sample_database_installation.mdx
index a32847461e0..0afd2cab5b1 100644
--- a/product_docs/docs/epas/15/fundamentals/sql_fundamentals/02_sql_tutorial/01_sample_database/01_sample_database_installation.mdx
+++ b/product_docs/docs/epas/15/fundamentals/sql_fundamentals/02_sql_tutorial/01_sample_database/01_sample_database_installation.mdx
@@ -7,7 +7,7 @@ redirects:
-When EDB Postgres Advanced Server is installed, a sample database named `edb` is automatically created. This sample database contains the tables and programs used in this tutorial after you execute the script `edb-sample.sql`, located in the `/usr/edb/as14/share` directory.
+When EDB Postgres Advanced Server is installed, a sample database named `edb` is automatically created. This sample database contains the tables and programs used in this tutorial after you execute the script `edb-sample.sql`, located in the `/usr/edb/as15/share` directory.
This script does the following:
diff --git a/product_docs/docs/epas/15/fundamentals/sql_fundamentals/02_sql_tutorial/08_deletions.mdx b/product_docs/docs/epas/15/fundamentals/sql_fundamentals/02_sql_tutorial/08_deletions.mdx
index 0692a11f52b..51bb75ca035 100644
--- a/product_docs/docs/epas/15/fundamentals/sql_fundamentals/02_sql_tutorial/08_deletions.mdx
+++ b/product_docs/docs/epas/15/fundamentals/sql_fundamentals/02_sql_tutorial/08_deletions.mdx
@@ -54,10 +54,11 @@ __OUTPUT__
```
!!! Warning
- Be careful when giving a `DELETE` command without a `WHERE` clause such as the following:
+Be careful when giving a `DELETE` command without a `WHERE` clause such as the following:
```sql
DELETE FROM tablename;
```
- This statement removes all rows from the given table, leaving it completely empty. The system doesn't request confirmation before doing this.
+This statement removes all rows from the given table, leaving it completely empty. The system doesn't request confirmation before doing this.
+!!!
diff --git a/product_docs/docs/epas/15/installing/linux_install_details/component_locations.mdx b/product_docs/docs/epas/15/installing/linux_install_details/component_locations.mdx
index 2619fc2125c..bcdd65f4541 100644
--- a/product_docs/docs/epas/15/installing/linux_install_details/component_locations.mdx
+++ b/product_docs/docs/epas/15/installing/linux_install_details/component_locations.mdx
@@ -18,15 +18,12 @@ The RPM installers place EDB Postgres Advanced Server components in the director
| --------------------------------- | ------------------------------------------ |
| Executables | `/usr/edb/as15/bin` |
| Libraries | `/usr/edb/as15/lib` |
-| Cluster configuration files | `/etc/edb/as15` |
+| Cluster configuration files | `/var/lib/edb/as15` |
| Documentation | `/usr/edb/as15/share/doc` |
| Contrib | `/usr/edb/as15/share/contrib` |
| Data | `/var/lib/edb/as15/data` |
-| Logs | `/var/log/as15` |
-| Lock files | `/var/lock/as15` |
-| Log rotation file | `/etc/logrotate.d/as15` |
-| Sudo configuration file | `/etc/sudoers.d/as15` |
-| Binary to access VIP without sudo | `/usr/edb/as15/bin/secure` |
+| Logs | `/var/log/edb/as15` |
+| Lock files | `/var/lock/edb/as15` |
| Backup area | `/var/lib/edb/as15/backups` |
| Templates | `/usr/edb/as15/share` |
| Procedural Languages | `/usr/edb/as15/lib or /usr/edb/as15/lib64` |
diff --git a/product_docs/docs/epas/15/reference/application_programmer_reference/07_reference/ecpgplus_statements.mdx b/product_docs/docs/epas/15/reference/application_programmer_reference/07_reference/ecpgplus_statements.mdx
index 4e1811f9467..ec4a5dcb04a 100644
--- a/product_docs/docs/epas/15/reference/application_programmer_reference/07_reference/ecpgplus_statements.mdx
+++ b/product_docs/docs/epas/15/reference/application_programmer_reference/07_reference/ecpgplus_statements.mdx
@@ -66,9 +66,6 @@ legacyRedirectsGenerated:
- "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.100.html"
- "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.102.html"
- "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.101.html"
-redirects:
- - /epas/latest/ecpgplus_guide/07_reference/ #generated for docs/epas/reorg-role-use-case-mode
- - ../../../application_programming/ecpgplus_guide/07_reference/
---
diff --git a/product_docs/docs/epas/15/reference/sql_reference/02_data_types/02_character_types.mdx b/product_docs/docs/epas/15/reference/sql_reference/02_data_types/02_character_types.mdx
index 984bb6b15cb..1a0746f45ac 100644
--- a/product_docs/docs/epas/15/reference/sql_reference/02_data_types/02_character_types.mdx
+++ b/product_docs/docs/epas/15/reference/sql_reference/02_data_types/02_character_types.mdx
@@ -39,8 +39,9 @@ VARCHAR, VARCHAR2, NVARCHAR and NVARCHAR2
If the string to assign is shorter than `n`, values of type `VARCHAR`, `VARCHAR2`, `NVARCHAR`, and `NVARCHAR2` store the shorter string without padding.
- !!! Note
- The trailing spaces are semantically significant in `VARCHAR` values.
+!!! Note
+The trailing spaces are semantically significant in `VARCHAR` values.
+!!!
If you explicitly cast a value to a `VARCHAR` type, an over-length value is truncated to `n` characters without raising an error (as specified by the SQL standard).
@@ -55,4 +56,4 @@ VARCHAR, VARCHAR2, NVARCHAR and NVARCHAR2
Thus, use of the `CLOB` type is limited by what can be done for `TEXT`, such as a maximum size of approximately 1 GB.
- For larger amounts of data, instead of using the `CLOB` data type, use the PostgreSQL *large objects* feature that relies on the `pg_largeobject` system catalog. For information on large objects, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/largeobjects.html).
\ No newline at end of file
+ For larger amounts of data, instead of using the `CLOB` data type, use the PostgreSQL *large objects* feature that relies on the `pg_largeobject` system catalog. For information on large objects, see the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/largeobjects.html).
diff --git a/product_docs/docs/eprs/7/01_introduction/03_certified_supported_versions.mdx b/product_docs/docs/eprs/7/01_introduction/03_certified_supported_versions.mdx
index a7f6073b5a3..6c6cefbddc8 100644
--- a/product_docs/docs/eprs/7/01_introduction/03_certified_supported_versions.mdx
+++ b/product_docs/docs/eprs/7/01_introduction/03_certified_supported_versions.mdx
@@ -5,13 +5,17 @@ title: "Certified and supported product versions"
You can use the following database product versions with Replication Server:
- PostgreSQL versions 11, 12, 13, 14, and 15
-- Advanced Server versions 11, 12, 13, 14, and 15
+- EDB Postgres Advanced Server versions 11, 12, 13, 14, and 15
- Oracle 11g Release 2 version 11.2.0.2.0 is explicitly certified. Newer minor versions in the 11.2 line are supported as well.
- Oracle 12c version 12.1.0.2.0 is explicitly certified. Newer minor versions in the 12.1 line are supported as well.
- Oracle 18c version 18.1.0.2.0 is explicitly certified. Newer minor versions in the 18.1 line are supported as well.
- Oracle 19c version 19.1.0.2.0 is explicitly certified. Newer minor versions in the 19.1 line are supported as well.
- SQL Server 2014 version 12.0.5000.0 is explicitly certified. Newer minor versions in the 12.0 line are supported as well.
+!!!Note
+ All PostgreSQL and EDB Postgres Advanced Server versions available as BigAnimal single-node and primary/standby high availability cluster types are also supported for SMR configurations. Consult the BigAnimal (EDB’s managed database cloud service) [documentation](/biganimal/latest) for more information about BigAnimal’s [supported cluster types](/biganimal/latest/overview/02_high_availability/) and [database version policy](/biganimal/latest/overview/05_database_version_policy/) for the versions of PostgreSQL and EDB Postgres Advanced Server available in BigAnimal.
+
+
As of Replication Server 7.1.0:
- SQL Server 2016 version 13.00.5026 is explicitly certified. Newer minor versions in the 13.0 line are supported as well.
- SQL Server 2017 version 14.0.1000.169 is explicitly certified. Newer minor versions in the 14.0 line are supported as well.
diff --git a/product_docs/docs/eprs/7/01_introduction/04_permitted_conf_and_permutations.mdx b/product_docs/docs/eprs/7/01_introduction/04_permitted_conf_and_permutations.mdx
index 0bba95596e5..58ddd45c6dd 100644
--- a/product_docs/docs/eprs/7/01_introduction/04_permitted_conf_and_permutations.mdx
+++ b/product_docs/docs/eprs/7/01_introduction/04_permitted_conf_and_permutations.mdx
@@ -40,7 +40,7 @@ The following table shows the combinations of source and target database server
-| Source/target | Oracle | Microsoft SQL service| PostgreSQL | EDB Postgres Advanced Server (Oracle compatible) | EDB Postgres Advanced Server (PostgreSQL compatible) |
+| Source/target | Oracle | Microsoft SQL Server | PostgreSQL | EDB Postgres Advanced Server (Oracle compatible) | EDB Postgres Advanced Server (PostgreSQL compatible) |
| ------------------------------------------- | ------ | -------------------- | ---------- | ----------------------------------- | --------------------------------------- |
| Oracle | No | No | Yes | Yes | Yes |
| Microsoft SQL Server | No | No | Yes | Yes | Yes |
diff --git a/product_docs/docs/eprs/7/05_smr_operation/02_creating_publication/04_control_schema_objects_created_for_publication.mdx b/product_docs/docs/eprs/7/05_smr_operation/02_creating_publication/04_control_schema_objects_created_for_publication.mdx
index 3e969e562cb..a9e46eded50 100644
--- a/product_docs/docs/eprs/7/05_smr_operation/02_creating_publication/04_control_schema_objects_created_for_publication.mdx
+++ b/product_docs/docs/eprs/7/05_smr_operation/02_creating_publication/04_control_schema_objects_created_for_publication.mdx
@@ -411,7 +411,7 @@ _edb_replicator_pub.sp_dropsequence SQL_STORED_PROCEDURE
_edb_replicator_sub.rrep_common_seq USER_TABLE
```
-SQL Server versions 2012 and 2014 support creation of sequence objects that can now perform the functionality previously provided by the preceding list of objects. The following are the sequence objects that are now used when the publication database is SQL Server 2012 or 2014:
+SQL Server versions 2014 and 2012 support creation of sequence objects that can now perform the functionality previously provided by the preceding list of objects. The following are the sequence objects that are now used when the publication database is SQL Server 2012 or 2014:
```shell
1> USE edb;
diff --git a/product_docs/docs/eprs/7/08_xdb_cli/03_xdb_cli_commands/52_reload_conf_file.mdx b/product_docs/docs/eprs/7/08_xdb_cli/03_xdb_cli_commands/52_reload_conf_file.mdx
index d72d21eb650..a95c78c6c34 100644
--- a/product_docs/docs/eprs/7/08_xdb_cli/03_xdb_cli_commands/52_reload_conf_file.mdx
+++ b/product_docs/docs/eprs/7/08_xdb_cli/03_xdb_cli_commands/52_reload_conf_file.mdx
@@ -85,11 +85,18 @@ The table shows whether a configuration property can be reloaded.
This example reloads the configuration file.
-```shell
-$ java -jar edb-repcli.jar -reloadconf -repsvrfile ~/subsvr.prop
+!!! Note Note
+When you execute the reloadconf command, if any configuration options have been changed from their default values, the output includes the configuration option and its new value.
+```shell
+java -jar edb-repcli.jar -reloadconf -repsvrfile subsvr.prop
+__OUTPUT__
Reloading Subscription Server configuration file...
Reloaded configuration options from ../etc/xdb_subserver.conf...
+The conf option 'snapshotParallelTableLoaderLimit' set to '1'
+The conf option 'skipCheckConst' set to 'false'
+The conf option 'snapshotParallelLoadCount' set to '1'
Configuration was reloaded successfully.
```
+!!!
diff --git a/product_docs/docs/eprs/7/10_appendix/03_miscellaneous_xdb_processing_topics/01_publications_and_subscriptions_server_conf_options/01_controlling_logging_level.mdx b/product_docs/docs/eprs/7/10_appendix/03_miscellaneous_xdb_processing_topics/01_publications_and_subscriptions_server_conf_options/01_controlling_logging_level.mdx
index a7662b54ed4..90931a7714a 100644
--- a/product_docs/docs/eprs/7/10_appendix/03_miscellaneous_xdb_processing_topics/01_publications_and_subscriptions_server_conf_options/01_controlling_logging_level.mdx
+++ b/product_docs/docs/eprs/7/10_appendix/03_miscellaneous_xdb_processing_topics/01_publications_and_subscriptions_server_conf_options/01_controlling_logging_level.mdx
@@ -1,5 +1,6 @@
---
-title: "Controlling logging level, log file sizes, and rotation count"
+title: "Controlling logging level, log file sizes, rotation count, and locale"
+navTitle: "Controlling message logging"
redirects:
- /eprs/latest/10_appendix/04_miscellaneous_xdb_processing_topics/01_publications_and_subscriptions_server_conf_options/01_controlling_logging_level
---
@@ -13,7 +14,7 @@ The following options control various aspects of message logging in the publicat
See [Publication and subscription server startup failures](../../02_resolving_problems/02_where_to_look_for_errors/#pub_and_sub_startup_failures) and [Snapshot replication failures](../../02_resolving_problems/02_where_to_look_for_errors/#snapshot_replication_failures) for more information.
-`logging.level`
+## `logging.level`
Set the `logging.level` option to control the severity of messages written to the publication server log file and the subscription server log file.
@@ -21,7 +22,7 @@ Set the `logging.level` option to control the severity of messages written to th
The default value is `WARNING`.
-`logging.file.size`
+## `logging.file.size`
Set the `logging.file.size` option to control the maximum file size (in megabytes) of the publication server log file and the subscription server log file.
@@ -32,7 +33,7 @@ Set the `logging.file.size` option to control the maximum file size (in megabyte
The default value is `50`, in megabytes.
-`logging.file.count`
+## `logging.file.count`
Set the `logging.file.count` option to control the number of files in the log file rotation history of the publication server log file and the subscription server log file.
@@ -57,7 +58,21 @@ When log file rotation is enabled and the current, active log file (`pubserver.l
- Each remaining log file is renamed with the next greater integer suffix (`pubserver.log.m` is renamed to `pubserver.log.m+1`, with m varying from `0` to `n-2`).
- A new, active log file is created (`pubserver.log.0`).
-`mtk.logging.file.size`
+## `logging.default.locale`
+
+Set the `logging.default.locale` option to use either the current system locale or English (en) for publication and subscription logs.
+
+`logging.default.locale={system | en}`
+
+The default value is `system`.
+
+!!!Note
+This option is only applicable for publication and subscription logs and isn't supported for mtk.log.
+
+The RepCLI and RepConsole logs continue showing text in the default locale.
+!!!
+
+## `mtk.logging.file.size`
!!! Note
This option applies only to the publication server.
@@ -68,7 +83,7 @@ Set the `mtk.logging.file.size` option to control the maximum file size (in mega
The default value is `50`, in megabytes.
-`mtk.logging.file.count`
+## `mtk.logging.file.count`
!!! Note
This option applies only to the publication server.
@@ -94,3 +109,5 @@ When the current, active log file (`mtk.log`) reaches the size specified by `mtk
- Each remaining log file with a suffix is renamed with the next greater integer suffix (`mtk.log.m` is renamed to `mtk.log.m+1`, with `m` varying from `1` to `n-1`).
- Log file `mtk.log` is renamed to `mtk.log.1`.
- A new, active log file is created (`mtk.log`).
+
+
diff --git a/product_docs/docs/eprs/7/eprs_rel_notes/eprs_rel_notes_7.6.0.mdx b/product_docs/docs/eprs/7/eprs_rel_notes/eprs_rel_notes_7.6.0.mdx
new file mode 100644
index 00000000000..b9db5af9199
--- /dev/null
+++ b/product_docs/docs/eprs/7/eprs_rel_notes/eprs_rel_notes_7.6.0.mdx
@@ -0,0 +1,22 @@
+---
+title: Replication Server 7.6.0 release notes
+navTitle: "Version 7.6.0"
+---
+
+Released: 07 Sep 2023
+
+New features, enhancements, bug fixes, and other changes in Replication Server 7.6.0 include the following:
+
+| Type | Description |
+| ------- |------------ |
+| Enhancement | EDB Replication Server now supports logging Publication and Subscription server logs in the English language, overriding the default locale, using the `logging.default.locale` configuration parameter. [Support ticket #89877] |
+| Enhancement | The snapshot operation now uses the table-level parallel loading capability, which reduces overhead on the source database by using range-based criterion for loading each individual table data chunk instead of a fetch-offset approach. This optimization is applicable when the table primary key/unique constraint is based on a non-composite numeric type attribute. [Support ticket # 93360] |
+| Enhancement | To help investigate data synchronization gaps, Replication Server’s logging now logs when rows are skipped due to filter criteria. [Support ticket #91296] |
+| Bug fix | Fixed an issue where metadata from the primary controller database wasn't replicated when a SQL Server or an Oracle publication database is added as a standby controller database. [Support ticket #82050 and #91884] |
+| Bug fix | Fixed the issues related to foreign key violations in the standby controller database that prevented upgrading from version 6.2.x to 7.x. [Support ticket #93129, #92056 and #91588] |
+| Bug fix | Corrected a few code paths to release unused resources for timely garbage collection and optimized memory utilization. [Support ticket #91588] |
+| Bug fix | Fixed a Data Validator Oracle edge case resulting in a `String index out of range` error for an Oracle to EDB Postgres Advanced Server validation. |
+| Bug fix | Fixed an issue resulting in a synchronization failure for `nchar`, `nvarchar`, `xml`, and `sqlvariant` when using the mssql-jdbc-10.2.1.jre8.jar file for a SQL Server to EDB Postgres Advanced Server cluster setup. |
+| Bug fix | Updated database type name references of “Postgres Plus Advanced Server” in the Replication Console and Replication CLI to “EDB Postgres Advanced Server”. |
+| Bug fix | Fixed an issue that prevented logging of changed configuration parameters at Publication and Subscription server start or when the `reloadconf` command is executed. |
+| Bug fix | Fixed a regression that led to an `Invalid custom column type mapping` error being observed for Publication tables with no column mapping. |
diff --git a/product_docs/docs/eprs/7/eprs_rel_notes/index.mdx b/product_docs/docs/eprs/7/eprs_rel_notes/index.mdx
index 36623d6ebb4..f24b2ccc714 100644
--- a/product_docs/docs/eprs/7/eprs_rel_notes/index.mdx
+++ b/product_docs/docs/eprs/7/eprs_rel_notes/index.mdx
@@ -1,8 +1,9 @@
---
-title: "Release Notes"
+title: "Release notes"
redirects:
- ../01_whats_new/
navigation:
+ - eprs_rel_notes_7.6.0
- eprs_rel_notes_7.5.1
- eprs_rel_notes_7.5.0
- eprs_rel_notes_7.4.0
@@ -13,15 +14,16 @@ The Replication Server documentation describes the latest version including mino
| Version | Release Date |
| -------------------------------- | ------------ |
-| [7.5.1](eprs_rel_notes_7.5.1) | 2023 May 26 |
-| [7.5.0](eprs_rel_notes_7.5.0) | 2023 Feb 14 |
-| [7.4.0](eprs_rel_notes_7.4.0) | 2022 Nov 29 |
-| [7.3.0](15_eprs_rel_notes_7.3.0) | 2022 Nov 15 |
-| [7.2.1](16_eprs_rel_notes_7.2.1) | 2022 Jul 25 |
-| [7.2.0](17_eprs_rel_notes_7.2.0) | 2022 Jun 24 |
-| [7.1.0](18_eprs_rel_notes_7.1.0) | 2022 Mar 21 |
-| [7.0.1](19_eprs_rel_notes_7.0.1) | 2022 Mar 03 |
-| [7.0.0](20_eprs_rel_notes_7.0.0) | 2021 Dec 01 |
+| [7.6.0](eprs_rel_notes_7.6.0) | 07 Sep 2023 |
+| [7.5.1](eprs_rel_notes_7.5.1) | 26 May 2023 |
+| [7.5.0](eprs_rel_notes_7.5.0) | 14 Feb 2023 |
+| [7.4.0](eprs_rel_notes_7.4.0) | 29 Nov 2022 |
+| [7.3.0](15_eprs_rel_notes_7.3.0) | 15 Nov 2022 |
+| [7.2.1](16_eprs_rel_notes_7.2.1) | 25 Jul 2022 |
+| [7.2.0](17_eprs_rel_notes_7.2.0) | 24 Jun 2022 |
+| [7.1.0](18_eprs_rel_notes_7.1.0) | 21 Mar 2022 |
+| [7.0.1](19_eprs_rel_notes_7.0.1) | 03 Mar 2022 |
+| [7.0.0](20_eprs_rel_notes_7.0.0) | 01 Dec 2021 |
## Supported upgrade paths
diff --git a/product_docs/docs/eprs/7/installing/upgrading_replication_server/index.mdx b/product_docs/docs/eprs/7/installing/upgrading_replication_server/index.mdx
index 35567da06db..f0e949a0905 100644
--- a/product_docs/docs/eprs/7/installing/upgrading_replication_server/index.mdx
+++ b/product_docs/docs/eprs/7/installing/upgrading_replication_server/index.mdx
@@ -15,11 +15,12 @@ It is assumed that you are installing Replication Server 7.x on the same host ma
If you are using a version of Replication Server earlier than 6.2.x on Linux, first upgrade to 6.2.x, and then upgrade to version 7.x. See:
- [Upgrading from a Replication Server 6.2 installation on Linux](upgrading_with_xdb_rpm_package)
-
For more details on upgrading Replication Server, see:
- [Updating the publication and subscription server](updating_sub_and_pub_ports)
- [Upgrading from a Replication Server 7.x installation on Linux](upgrading_linux)
- [Upgrading with the graphical user interface installer](upgrading_with_gui_installer)
+After upgrading and before using Replication Server, you need to download a JDBC driver and create a symlink to it (for Linux) or rename the driver (for Windows). See [Installing a JDBC driver](../installing_jdbc_driver/) for more information.
+
diff --git a/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_linux.mdx b/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_linux.mdx
index 9649957e183..5c7febc4aa4 100644
--- a/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_linux.mdx
+++ b/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_linux.mdx
@@ -15,3 +15,6 @@ If you have an existing Replication Server 7.x installation on Linux, you can us
`yum upgrade edb-xdb*`
If you are upgrading from a Replication Server 6.2 installation on Linux, see [Upgrading from a Replication Server 6.2 installation on Linux](upgrading_with_xdb_rpm_package) for details.
+
+After upgrading and before using Replication Server, you need to download a JDBC driver and create a symlink to it. See [Installing a JDBC driver](../installing_jdbc_driver/) for more information.
+
diff --git a/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_with_gui_installer.mdx b/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_with_gui_installer.mdx
index 6ea33561b9a..7de246140a8 100644
--- a/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_with_gui_installer.mdx
+++ b/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_with_gui_installer.mdx
@@ -47,7 +47,9 @@ You can upgrade to Replication Server 7 using the graphical user interface insta
If you don't need to adjust the port numbers, register the publication server and subscription server with the Replication Server console as described in [Registering a publication server](../../05_smr_operation/02_creating_publication/01_registering_publication_server/#registering_publication_server) and [Registering a subscription server](../../05_smr_operation/03_creating_subscription/01_registering_subscription_server/#registering_subscription_server). The existing replication systems appear in the replication tree of the Replication Server Console.
-You are now ready to use Replication Server 7 to create new replication systems and manage existing ones.
+After upgrading and before using Replication Server, you need to download a JDBC driver and create a symlink to it (for Linux) or rename the driver (for Windows). See [Installing a JDBC driver](../installing_jdbc_driver/) for more information.
+
+You're now ready to use Replication Server 7 to create new replication systems and manage existing ones.
!!! Note
**For Windows:** If you give a new admin password during an upgrade, it is ignored. After the upgrade, Replication Server picks the old admin user name and password (which is saved in `edb-replconf`).
diff --git a/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_with_xdb_rpm_package.mdx b/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_with_xdb_rpm_package.mdx
index ef0d1a47e7e..8c4758286d8 100644
--- a/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_with_xdb_rpm_package.mdx
+++ b/product_docs/docs/eprs/7/installing/upgrading_replication_server/upgrading_with_xdb_rpm_package.mdx
@@ -183,4 +183,6 @@ If you're using Replication Server 6.2.x that was installed using the Replicatio
If you don't need to adjust the port numbers, register the publication server and subscription server with the Replication Server console as described in [Registering a publication server](../../05_smr_operation/02_creating_publication/01_registering_publication_server/#registering_publication_server) and [Registering a subscription server](../../05_smr_operation/03_creating_subscription/01_registering_subscription_server/#registering_subscription_server)). The existing replication systems appear in the replication tree of the Replication Server console.
-You are now ready to use Replication Server 7 to create new replication systems and manage existing ones.
+After upgrading and before using Replication Server, you need to download a JDBC driver and create a symlink to it. See [Installing a JDBC driver](../installing_jdbc_driver/) for more information.
+
+You're now ready to use Replication Server 7 to create new replication systems and manage existing ones.
diff --git a/product_docs/docs/eprs/7/supported_platforms.mdx b/product_docs/docs/eprs/7/supported_platforms.mdx
index 58e1e24015a..c142c40aef6 100644
--- a/product_docs/docs/eprs/7/supported_platforms.mdx
+++ b/product_docs/docs/eprs/7/supported_platforms.mdx
@@ -1,5 +1,5 @@
---
-title: "Supported platforms"
+title: "Supported Java platforms"
redirects:
- /eprs/latest/01_introduction/04_supported_jdk_versions/
- /eprs/latest/01_introduction/05_supported_jdk_versions/
@@ -22,4 +22,4 @@ Replication Server is certified to work with the following Java platforms:
| Debian 10 and 11 | Red Hat OpenJDK 11 |
| Ubuntu 18, 20, 22 | OpenJDK 11 |
-See [Product Compatibility](https://www.enterprisedb.com/platform-compatibility#eprs) for more information.
+See [Product Compatibility](https://www.enterprisedb.com/platform-compatibility#eprs) for more information on operating system support.
diff --git a/product_docs/docs/hadoop_data_adapter/2/hadoop_rel_notes/index.mdx b/product_docs/docs/hadoop_data_adapter/2/hadoop_rel_notes/index.mdx
index 04658f3c2c7..e435172e198 100644
--- a/product_docs/docs/hadoop_data_adapter/2/hadoop_rel_notes/index.mdx
+++ b/product_docs/docs/hadoop_data_adapter/2/hadoop_rel_notes/index.mdx
@@ -18,11 +18,11 @@ The Hadoop Foreign Data Wrapper documentation describes the latest version inclu
| Version | Release Date |
| --------------------------------| ------------ |
-| [2.3.1](hadoop_rel_notes_2.3.1) | 2023 Jul 20 |
-| [2.3.0](hadoop_rel_notes_2.3.0) | 2023 Jan 06 |
-| [2.2.0](hadoop_rel_notes_2.2.0) | 2022 May 26 |
-| [2.1.0](hadoop_rel_notes_2.1.0) | 2021 Dec 02 |
-| [2.0.8](hadoop_rel_notes_2.0.8) | 2021 Jun 24 |
-| [2.0.7](hadoop_rel_notes_2.0.7) | 2020 Nov 23 |
-| [2.0.5](hadoop_rel_notes_2.0.5) | 2019 Dec 10 |
-| [2.0.4](hadoop_rel_notes_2.0.4) | 2018 Nov 28 |
+| [2.3.1](hadoop_rel_notes_2.3.1) | 20 Jul 2023 |
+| [2.3.0](hadoop_rel_notes_2.3.0) | 06 Jan 2023 |
+| [2.2.0](hadoop_rel_notes_2.2.0) | 26 May 2022 |
+| [2.1.0](hadoop_rel_notes_2.1.0) | 02 Dec 2021 |
+| [2.0.8](hadoop_rel_notes_2.0.8) | 24 Jun 2021 |
+| [2.0.7](hadoop_rel_notes_2.0.7) | 23 Nov 2020 |
+| [2.0.5](hadoop_rel_notes_2.0.5) | 10 Dec 2019 |
+| [2.0.4](hadoop_rel_notes_2.0.4) | 28 Nov 2018 |
diff --git a/product_docs/docs/jdbc_connector/42.5.4.1/01_jdbc_rel_notes/index.mdx b/product_docs/docs/jdbc_connector/42.5.4.1/01_jdbc_rel_notes/index.mdx
index 020b7b15338..0f26c6d3125 100644
--- a/product_docs/docs/jdbc_connector/42.5.4.1/01_jdbc_rel_notes/index.mdx
+++ b/product_docs/docs/jdbc_connector/42.5.4.1/01_jdbc_rel_notes/index.mdx
@@ -13,16 +13,16 @@ These release notes describe what's new in each release. When a minor or patch r
| Version | Release Date |
| ---------------------------------------- | ------------ |
-| [42.5.4.1](jdbc_42.5.4.1_rel_notes) | 2023 Mar 16 |
-| [42.5.1.2](jdbc_42.5.1.2_rel_notes) | 2023 Feb 14 |
-| [42.5.1.1](jdbc_42.5.1.1_rel_notes) | 2022 Dec 9 |
-| [42.5.0.1](jdbc_42.5.0.1_rel_notes) | 2022 Sep 1 |
-| [42.3.3.1](08_jdbc_42.3.3.1_rel_notes) | 2022 Apr 20 |
-| [42.3.2.1](09_jdbc_42.3.2.1_rel_notes) | 2022 Feb 15 |
-| [42.2.24.1](10_jdbc_42.2.24.1_rel_notes) | 2021 Nov 5 |
-| [42.2.19.1](12_jdbc_42.2.19.1_rel_notes) | 2021 Apr 15 |
-| [42.2.12.3](14_jdbc_42.2.12.3_rel_notes) | 2020 Oct 22 |
-| [42.2.9.1](16_jdbc_42.2.9.1_rel_notes) | 2020 May 18 |
-| [42.2.8.1](18_jdbc_42.2.8.1_rel_notes) | 2019 Oct 21 |
+| [42.5.4.1](jdbc_42.5.4.1_rel_notes) | 16 Mar 2023 |
+| [42.5.1.2](jdbc_42.5.1.2_rel_notes) | 14 Feb 2023 |
+| [42.5.1.1](jdbc_42.5.1.1_rel_notes) | 09 Dec 2022 |
+| [42.5.0.1](jdbc_42.5.0.1_rel_notes) | 01 Sep 2022 |
+| [42.3.3.1](08_jdbc_42.3.3.1_rel_notes) | 20 Apr 2022 |
+| [42.3.2.1](09_jdbc_42.3.2.1_rel_notes) | 15 Feb 2022 |
+| [42.2.24.1](10_jdbc_42.2.24.1_rel_notes) | 5 Nov 2021 |
+| [42.2.19.1](12_jdbc_42.2.19.1_rel_notes) | 15 Apr 2021 |
+| [42.2.12.3](14_jdbc_42.2.12.3_rel_notes) | 22 Oct 2020 |
+| [42.2.9.1](16_jdbc_42.2.9.1_rel_notes) | 18 May 2020 |
+| [42.2.8.1](18_jdbc_42.2.8.1_rel_notes) | 21 Oct 2019 |
diff --git a/product_docs/docs/migration_toolkit/55/07_invoking_mtk/08_mtk_command_options.mdx b/product_docs/docs/migration_toolkit/55/07_invoking_mtk/08_mtk_command_options.mdx
index e1316c08099..b0d38582974 100644
--- a/product_docs/docs/migration_toolkit/55/07_invoking_mtk/08_mtk_command_options.mdx
+++ b/product_docs/docs/migration_toolkit/55/07_invoking_mtk/08_mtk_command_options.mdx
@@ -393,6 +393,8 @@ The following options control the way MTK migrates data in parallel.
Use the `-loaderCount` option to specify the number of parallel threads available in the pool to perform data load in parallel. This option is particularly useful if the host system that's running Migration Toolkit has high-end CPU and RAM resources. While `value` can be any nonzero, positive number, we recommend that value not exceed the number of CPU cores. For example, a dual-core CPU has an optimal value of `2`. The default is `1`.
+The number of table-level threads can introduce overhead on the source database server depending on the table size and certain memory configurations, like `shared_buffers` and `work_mem` (memory configuration should be set to optimal values for the database server). It is recommended that for very large tables you try the lower `-loaderCount` option value, according to the total cores available on the machine, to calculate the optimal `-loaderCount` value.
+
!!! Note
When multiple threads are used, depending on the actual number of threads and table data size, you might need to adjust the memory heap size for the Migration Toolkit.
diff --git a/product_docs/docs/migration_toolkit/55/mtk_rel_notes/index.mdx b/product_docs/docs/migration_toolkit/55/mtk_rel_notes/index.mdx
index 71b24680f38..e960b0f91f1 100644
--- a/product_docs/docs/migration_toolkit/55/mtk_rel_notes/index.mdx
+++ b/product_docs/docs/migration_toolkit/55/mtk_rel_notes/index.mdx
@@ -3,6 +3,7 @@ title: "Release notes"
redirects:
- ../01_whats_new/
navigation:
+ - mtk_5561_rel_notes
- mtk_556_rel_notes
- mtk_555_rel_notes
- mtk_554_rel_notes
@@ -19,14 +20,15 @@ The Migration Toolkit documentation describes the latest version of Migration To
| Version | Release Date |
| ------- | ------------ |
-| [55.6.0](mtk_556_rel_notes) | 2023 May 25 |
-| [55.5.0](mtk_555_rel_notes) | 2023 Feb 14 |
-| [55.4.0](mtk_554_rel_notes) | 2022 Nov 29 |
-| [55.3.0](mtk_553_rel_notes) | 2022 Oct 06 |
-| [55.2.3](mtk_5523_rel_notes) | 2022 Jun 16 |
-| [55.2.2](05_mkt_5522_rel_notes) | 2022 Mar 10 |
-| [55.2.1](06_mkt_5521_rel_notes) | 2022 Jan 13 |
-| [55.2.0](07_mkt_552_rel_notes) | 2021 Dec 2 |
-| [55.1.0](08_mkt_551_rel_notes) | 2021 Sep 20 |
-| [55.0.0](09_mkt_55_rel_notes) | 2021 Mar 19 |
+| [55.6.1](mtk_5561_rel_notes) | 06 Sep 2023 |
+| [55.6.0](mtk_556_rel_notes) | 25 May 2023 |
+| [55.5.0](mtk_555_rel_notes) | 14 Feb 2023 |
+| [55.4.0](mtk_554_rel_notes) | 29 Nov 2022 |
+| [55.3.0](mtk_553_rel_notes) | 06 Oct 2022 |
+| [55.2.3](mtk_5523_rel_notes) | 16 Jun 2022 |
+| [55.2.2](05_mkt_5522_rel_notes) | 10 Mar 2022 |
+| [55.2.1](06_mkt_5521_rel_notes) | 13 Jan 2022 |
+| [55.2.0](07_mkt_552_rel_notes) | 2 Dec 2021 |
+| [55.1.0](08_mkt_551_rel_notes) | 20 Sep 2021 |
+| [55.0.0](09_mkt_55_rel_notes) | 19 Mar 2021 |
diff --git a/product_docs/docs/migration_toolkit/55/mtk_rel_notes/mtk_5561_rel_notes.mdx b/product_docs/docs/migration_toolkit/55/mtk_rel_notes/mtk_5561_rel_notes.mdx
new file mode 100644
index 00000000000..18bff63a20a
--- /dev/null
+++ b/product_docs/docs/migration_toolkit/55/mtk_rel_notes/mtk_5561_rel_notes.mdx
@@ -0,0 +1,16 @@
+---
+title: "Version 55.6.1"
+---
+
+Released: 06 Sep 2023
+
+New features, enhancements, bug fixes, and other changes in Migration Toolkit 55.6.1 include:
+
+| Type | Description |
+| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Enhancement | Updated the table-level parallel loading capability with to reduce the overhead on the source database by using range-based criterion for loading each individual table data chunk instead of a fetch-offset approach. This optimization is applicable when the table primary key/unique constraint is based on a non-composite numeric type attribute. [Support ticket #93360] |
+| Bug fix | Fixed the issue where a data copy fails when migrating a source Oracle table that has a full-width character in its name. [Support ticket #93552] |
+| Bug fix | Fixed an issue that caused a data copy to fail when a source MySQL table name contains a dash character. [Support ticket #95794] |
+| Bug fix | Fixed an issue that resulted in table creation failure for a source MySQL table that has a `DEFAULT` clause defined with one or more single-quote characters. [Support ticket #95794]|
+| Bug fix | Fixed an issue that caused a data copy to fail when a source MySQL table uses a MySQL reserved word as its name. [Support ticket #94822] |
+| Bug fix | Fixed the issue encountered during Microsoft SQL Server to EDB Postgres Advanced Server, Oracle to EDB Postgres Advanced Server, or EDB Postgres Advanced Server to Oracle `-dataOnly` migrations where foreign key constraints are not restored after the data copy completes if the `-tables` option is specified and the parent table isn't included in the list of specified tables. |
diff --git a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx
index 3e99eee7f41..92d5ea532ba 100644
--- a/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx
+++ b/product_docs/docs/mongo_data_adapter/5/mongo_rel_notes/index.mdx
@@ -17,13 +17,13 @@ The Mongo Foreign Data Wrapper documentation describes the latest version of Mon
| Version | Release date |
| ----------------------------- | ------------ |
-| [5.5.1](mongo5.5.1_rel_notes) | 2023 Jul 20 |
-| [5.5.0](mongo5.5.0_rel_notes) | 2023 Jan 06 |
-| [5.4.0](mongo5.4.0_rel_notes) | 2022 May 26 |
-| [5.3.0](mongo5.3.0_rel_notes) | 2021 Dec 02 |
-| [5.2.9](mongo5.2.9_rel_notes) | 2021 Jun 24 |
-| [5.2.8](mongo5.2.8_rel_notes) | 2020 Nov 23 |
-| [5.2.6](mongo5.2.8_rel_notes) | 2019 Sep 27 |
-| [5.2.3](mongo5.2.8_rel_notes) | 2018 Nov 01 |
+| [5.5.1](mongo5.5.1_rel_notes) | 20 Jul 2023 |
+| [5.5.0](mongo5.5.0_rel_notes) | 06 Jan 2023 |
+| [5.4.0](mongo5.4.0_rel_notes) | 26 May 2022 |
+| [5.3.0](mongo5.3.0_rel_notes) | 02 Dec 2021 |
+| [5.2.9](mongo5.2.9_rel_notes) | 24 Jun 2021 |
+| [5.2.8](mongo5.2.8_rel_notes) | 23 Nov 2020 |
+| [5.2.6](mongo5.2.8_rel_notes) | 27 Sep 2019 |
+| [5.2.3](mongo5.2.8_rel_notes) | 01 Nov 2018 |
diff --git a/product_docs/docs/mysql_data_adapter/2/mysql_rel_notes/index.mdx b/product_docs/docs/mysql_data_adapter/2/mysql_rel_notes/index.mdx
index ed38e10191e..99debbb99dd 100644
--- a/product_docs/docs/mysql_data_adapter/2/mysql_rel_notes/index.mdx
+++ b/product_docs/docs/mysql_data_adapter/2/mysql_rel_notes/index.mdx
@@ -17,11 +17,11 @@ The MySQL Foreign Data Wrapper documentation describes the latest version of MyS
| Version | Release Date |
| ----------------------------- | ------------ |
-| [2.9.1](mysql2.9.1_rel_notes) | 2023 Jul 20 |
-| [2.9.0](mysql2.9.0_rel_notes) | 2023 Jan 06 |
-| [2.8.0](mysql2.8.0_rel_notes) | 2022 May 26 |
-| [2.7.0](mysql2.7.0_rel_notes) | 2021 Dec 02 |
-| [2.6.0](mysql2.6.0_rel_notes) | 2021 May 18 |
-| [2.5.5](mysql2.5.5_rel_notes) | 2020 Nov 23 |
-| [2.5.3](mysql2.5.3_rel_notes) | 2019 Dec 10 |
-| [2.5.1](mysql2.5.1_rel_notes) | 2018 Nov 28 |
\ No newline at end of file
+| [2.9.1](mysql2.9.1_rel_notes) | 20 Jul 2023 |
+| [2.9.0](mysql2.9.0_rel_notes) | 06 Jan 2023 |
+| [2.8.0](mysql2.8.0_rel_notes) | 26 May 2022 |
+| [2.7.0](mysql2.7.0_rel_notes) | 02 Dec 2021 |
+| [2.6.0](mysql2.6.0_rel_notes) | 18 May 2021 |
+| [2.5.5](mysql2.5.5_rel_notes) | 23 Nov 2020 |
+| [2.5.3](mysql2.5.3_rel_notes) | 10 Dec 2019 |
+| [2.5.1](mysql2.5.1_rel_notes) | 28 Nov 2018 |
\ No newline at end of file
diff --git a/product_docs/docs/ocl_connector/15/02_supported_platforms.mdx b/product_docs/docs/ocl_connector/15/02_supported_platforms.mdx
index 9bf7285cd7b..0036e328fa5 100644
--- a/product_docs/docs/ocl_connector/15/02_supported_platforms.mdx
+++ b/product_docs/docs/ocl_connector/15/02_supported_platforms.mdx
@@ -5,5 +5,5 @@ title: "Supported platforms"
-The EDB OCL Connector is supported on the same platforms as EDB Postgres Advanced Server. To determine the platform support for the EDB OCL Connector, you can either refer to the platform support for EDB Postgres Advanced Server on the [Platform Compatibility page](https://www.enterprisedb.com/platform-compatibility#epas) on the EDB website or refer to [Installing EDB OCL Connector](./installing).
+The EDB OCL Connector is supported on the same platforms as EDB Postgres Advanced Server. To determine the platform support for the EDB OCL Connector, you can refer either to the platform support for EDB Postgres Advanced Server on the [Platform Compatibility page](https://www.enterprisedb.com/platform-compatibility#epas) on the EDB website or to [Installing EDB OCL Connector](./installing).
diff --git a/product_docs/docs/ocl_connector/15/03_libpq_compatibility.mdx b/product_docs/docs/ocl_connector/15/03_libpq_compatibility.mdx
index 078f6e096e6..6f749a2bf6e 100644
--- a/product_docs/docs/ocl_connector/15/03_libpq_compatibility.mdx
+++ b/product_docs/docs/ocl_connector/15/03_libpq_compatibility.mdx
@@ -5,8 +5,34 @@ title: "libpq cross-version compatibility"
-EDB OCL installation always uses the latest libpq. The different scenarios supported under libpq cross-version compatibility are as following:
+EDB OCL installation always uses the latest libpq. When upgrading to a new major release of EDB Postgres Advanced Server, the different scenarios supported under libpq cross-version compatibility are as follows:
- If the latest libpq is installed on the machine, OCL uses it.
- If the latest libpq isn't already installed, OCL installs it. It doesn't use the existing libpq of older versions even if it's installed.
- If you upgrade the OCL version, then libpq is also upgraded to its latest version.
+
+If you're upgrading to a minor release, you need to manually upgrade libpq.
+
+## Upgrading libpq for minor releases of EDB Postgres Advanced Server
+
+For minor releases of EDB Postgres Advanced Server, you might need to upgrade libpq to a required version on the client machine where you installed EDB OCL Connector. (Any new libpq version dependencies are listed in the release notes.) If you need to upgrade libpq, run the appropriate command for your operating system.
+
+### For Ubuntu/Debian
+
+```
+sudo apt-get install edb-as15-libpq5
+```
+
+### For RHEL and SLES
+
+```
+sudo install edb-as15-server-libs
+```
+
+Where `` is the package manager used with your operating system:
+
+| Package manager | Operating system |
+| --------------- | -------------------------------- |
+| dnf | RHEL 8/9 and derivatives |
+| yum | RHEL 7 and derivatives, CentOS 7 |
+| zypper | SLES |
\ No newline at end of file
diff --git a/product_docs/docs/ocl_connector/15/04_open_client_library/02_forming_a_connection_string.mdx b/product_docs/docs/ocl_connector/15/04_open_client_library/02_forming_a_connection_string.mdx
index 260392ad1fd..77ee84e425f 100644
--- a/product_docs/docs/ocl_connector/15/04_open_client_library/02_forming_a_connection_string.mdx
+++ b/product_docs/docs/ocl_connector/15/04_open_client_library/02_forming_a_connection_string.mdx
@@ -21,7 +21,7 @@ postgresql://[user[:password]@][host][:port][/dbname]
[?param1=value1&...]
```
-You can also use a Postgres-style URI to specify multiple host components (each with an optional port component) in a single URI. A multi-host connection string takes the form:
+You can also use a Postgres-style URI to specify multiple host components, each with an optional port component, in a single URI. A multi-host connection string takes the form:
`postgresql://:@host1:port1,host2:port2,host3:port3/`
@@ -31,13 +31,13 @@ Where:
`password` is the password associated with the connecting user.
- `host` is the host name or IP address to which you are connecting. To specify an IPV6 address, enclose the address in square brackets.
+ `host` is the host name or IP address to which you're connecting. To specify an IPV6 address, enclose the address in square brackets.
- `port` is the port number to which you are connecting.
+ `port` is the port number to which you're connecting.
- `dbname` is the name of the database with which you are connecting.
+ `dbname` is the name of the database with which you're connecting.
- `paramx=valuex` pairs specify extra (application-specific) connection properties.
+ `paramx=valuex` pairs specify extra, application-specific connection properties.
For example, each of the following connection strings establishes a connection to the `edb` database on port `5444` of a system with an IP address of `10.0.0.4`:
diff --git a/product_docs/docs/ocl_connector/15/04_open_client_library/03_compiling_and_linking_a_program.mdx b/product_docs/docs/ocl_connector/15/04_open_client_library/03_compiling_and_linking_a_program.mdx
index f6ff8c06e62..9cf803a12ba 100644
--- a/product_docs/docs/ocl_connector/15/04_open_client_library/03_compiling_and_linking_a_program.mdx
+++ b/product_docs/docs/ocl_connector/15/04_open_client_library/03_compiling_and_linking_a_program.mdx
@@ -7,13 +7,9 @@ title: "Compiling and linking a program"
The EDB Open Client Library allows applications written using the Oracle Call Interface API to connect to and access an EDB database with minimal changes to the C source code. The EDB Open Client Library files are named:
- On Linux:
+- On Linux: `libedboci.so`
- `libedboci.so`
-
- On Windows:
-
- `edboci.dll`
+- On Windows: `edboci.dll`
The files are installed in the `oci/lib` subdirectory.
@@ -21,30 +17,25 @@ The files are installed in the `oci/lib` subdirectory.
This example compiles and links the sample program `edb_demo.c` in a Linux environment. The `edb_demo.c` file is located in the `oci/samples` subdirectory.
-1. Set the `ORACLE_HOME` and `EDB_HOME` environment variables.
-
-2. Set `ORACLE_HOME` to the complete pathname of the Oracle home directory.
-
- For example:
+1. Set `ORACLE_HOME` to the complete pathname of the Oracle home directory, for example:
`export ORACLE_HOME=/usr/lib/oracle/xe/app/oracle/product/10.2.0/server`
-3. Set `EDB_HOME` to the complete pathname of the home directory.
-
- For example:
+1. Set `EDB_HOME` to the complete pathname of the home directory, for example:
`export EDB_HOME=/usr/edb`
-4. Set `LD_LIBRARY_PATH` to the complete path of `libpthread.so`. By default, `libpthread.so` is located in `/lib64`.
+1. Set `LD_LIBRARY_PATH` to the complete path of `libpthread.so`. By default, `libpthread.so` is located in `/lib64`.
`export LD_LIBRARY_PATH=/lib64/lib:$LD_LIBRARY_PATH`
-5. Set `LD_LIBRARY_PATH` to include the EDB Postgres Advanced Server Open Client library. By default, `libedboci.so` is located in `$EDB_HOME/oci/lib`.
+1. Set `LD_LIBRARY_PATH` to include the EDB Postgres Advanced Server Open Client library. By default, `libedboci.so` is located in `$EDB_HOME/oci/lib`.
`export LD_LIBRARY_PATH=$EDB_HOME/oci:$EDB_HOME/oci/lib:$LD_LIBRARY_PATH`
-6. Then, compile and link the OCL API program.
-
- `cd $EDB_HOME/oci/samples`
+1. Compile and link the OCL API program:
- `make`
+ ```text
+ cd $EDB_HOME/oci/samples
+ make
+ ```
diff --git a/product_docs/docs/ocl_connector/15/04_open_client_library/04_ref_cursor_support.mdx b/product_docs/docs/ocl_connector/15/04_open_client_library/04_ref_cursor_support.mdx
index b1f5902c2fb..cb84ed028d4 100644
--- a/product_docs/docs/ocl_connector/15/04_open_client_library/04_ref_cursor_support.mdx
+++ b/product_docs/docs/ocl_connector/15/04_open_client_library/04_ref_cursor_support.mdx
@@ -17,7 +17,7 @@ The EDB Postgres Advanced Server Open Client Library supports the use of `REF CU
The EDB OCL Connector also supports the `SQLT_RSET` data type.
-This example invokes a stored procedure that opens a cursor and returns a `REF CURSOR` as an output parameter. The code sample assumes that a PL/SQL procedure named `openCursor` (with an `OUT` parameter of type `REF CURSOR`) was created on the database server and that the required handles were allocated:
+This example invokes a stored procedure that opens a cursor and returns a `REF CURSOR` as an output parameter. The code sample assumes that a PL/SQL procedure named `openCursor`, with an `OUT` parameter of type `REF CURSOR`, was created on the database server and that the required handles were allocated:
```c
char* openCursor = "begin \
diff --git a/product_docs/docs/ocl_connector/15/04_open_client_library/05_ocl_function_reference.mdx b/product_docs/docs/ocl_connector/15/04_open_client_library/05_ocl_function_reference.mdx
index 731f5be95e7..04ff3cd9d87 100644
--- a/product_docs/docs/ocl_connector/15/04_open_client_library/05_ocl_function_reference.mdx
+++ b/product_docs/docs/ocl_connector/15/04_open_client_library/05_ocl_function_reference.mdx
@@ -5,7 +5,7 @@ title: "OCL function reference"
-The following tables list the functions supported by the EDB OCL connector. Any and all header files must be supplied by the user. EDB Postgres Advanced Server doesn't supply any such files.
+The following tables list the functions supported by the EDB OCL connector. You must supply any header files. EDB Postgres Advanced Server doesn't supply header files.
## Connect, authorize, and initialize functions
@@ -29,7 +29,7 @@ The following tables list the functions supported by the EDB OCL connector. Any
### Using the tnsnames.ora file
-The `OCIServerAttach` and `OCILogon` methods use `NET_SERVICE_NAME` as a connection descriptor specified in the `dblink` parameter of the `tnsnames.ora` file. Use the `tnsnames.ora` file (compatible with Oracle databases) to specify database connection details. OCL searches your home directory for a file named `.tnsnames.ora`. If OCL doesn't find the `.tnsnames.ora` file in the home directory, it searches for `tnsnames.ora` on the path specified in `TNS_ADMIN` environment variable.
+The `OCIServerAttach` and `OCILogon` methods use `NET_SERVICE_NAME` as a connection descriptor specified in the `dblink` parameter of the `tnsnames.ora` file. Use the `tnsnames.ora` file (compatible with Oracle databases) to specify database connection details. OCL searches your home directory for a file named `.tnsnames.ora`. If OCL doesn't find the `.tnsnames.ora` file there, it searches for `tnsnames.ora` on the path specified in `TNS_ADMIN` environment variable.
You can specify multiple descriptors `(NET_SERVICE_NAME)` in the `tnsnames.ora` file.
@@ -78,7 +78,7 @@ If you don't have a `tnsnames.ora` file, supply the connection string in the for
### EDB_ATTR_EMPTY_STRINGS
-By default, EDB Postgres Advanced Server treats an empty string as a `NULL` value. You can use the `EDB_ATTR_EMPTY_STRINGS` environment attribute to control the behavior of the OCL connector when mapping empty strings. To modify the mapping behavior, use the `OCIAttrSet()` function to set `EDB_ATTR_EMPTY_STRINGS` to one of the following.
+By default, EDB Postgres Advanced Server treats an empty string as a NULL value. You can use the `EDB_ATTR_EMPTY_STRINGS` environment attribute to control the behavior of the OCL connector when mapping empty strings. To modify the mapping behavior, use the `OCIAttrSet()` function to set `EDB_ATTR_EMPTY_STRINGS` to one of the following.
| Value | Description |
| ----------------------- | ------------------------------------------------- |
@@ -104,7 +104,7 @@ When EDB Postgres Advanced Server executes a `SELECT` statement, it examines the
If the `EDB_ATTR_HOLDABLE` attribute in the `OCIServer` handle is set to `EDB_WITHOUT_HOLD`, the query is executed as a normal prepared statement.
-If the `EDB_ATTR_HOLDABLE` attribute in the `OCIServer` handle is set to `OCI_DEFAULT`, EDB Postgres Advanced Server uses the value of the `EDB_ATTR_HOLDABLE` attribute in the `OCIServer` handle. (If the `EDB_ATTR_HOLDABLE` attribute in the `OCIServer` is set to `EDB_WITH_HOLD`, the query executes as a `WITH HOLD` cursor. Otherwise, the query executes as a protocol-prepared statement).
+If the `EDB_ATTR_HOLDABLE` attribute in the `OCIServer` handle is set to `OCI_DEFAULT`, EDB Postgres Advanced Server uses the value of the `EDB_ATTR_HOLDABLE` attribute in the `OCIServer` handle. (If the `EDB_ATTR_HOLDABLE` attribute in the `OCIServer` is set to `EDB_WITH_HOLD`, the query executes as a `WITH HOLD` cursor. Otherwise, the query executes as a protocol-prepared statement.)
### EDB_HOLD_CURSOR_ACTION
@@ -146,13 +146,13 @@ OCIStmtExecute(...);
If your application doesn't run properly with the extra commits added by `EDB_COMMIT_AFTER_CURSOR`, you can try setting `EDB_ATTR_HOLD_CURSOR_ACTION` to `EDB_CURSOR_WITHOUT_XACT_BLK`. With this action, the OCL doesn't begin a new transaction chain. If you create a `WITH HOLD` cursor immediately after committing or rolling back a transaction, the cursor is created in its own transaction, the database server commits that transaction, and the cursor persists.
-You might still experience errors if the cursor declaration is not the first statement in a transaction. If you execute some other statement before declaring the cursor, the `WITH HOLD` cursor is created in a transaction block and can be rolled back if an error occurs (or if your application calls `OCITransRollback()`).
+You might still experience errors if the cursor declaration isn't the first statement in a transaction. If you execute some other statement before declaring the cursor, the `WITH HOLD` cursor is created in a transaction block and can be rolled back if an error occurs or if your application calls `OCITransRollback()`.
-You can set the `EDB_HOLD_CURSOR_ACTION` on the server level (`OCIServer`) or for each statement handle (`OCIStmt`). If the statement attribute is set to a value other than `OCI_DEFAULT`, the value is derived from the statement handle. Otherwise (if the statement attribute is set to `OCI_DEFAULT`), the value is taken from the server handle. So you can define a server-wide default action by setting the attribute in the server handle and leaving the attribute set to `OCI_DEFAULT` in the statement handles. You can use different values for each statement handle (or server handle) as you see fit.
+You can set the `EDB_HOLD_CURSOR_ACTION` on the server level (`OCIServer`) or for each statement handle (`OCIStmt`). If the statement attribute is set to a value other than `OCI_DEFAULT`, the value is derived from the statement handle. Otherwise, if the statement attribute is set to `OCI_DEFAULT`, the value is taken from the server handle. So you can define a server-wide default action by setting the attribute in the server handle and leaving the attribute set to `OCI_DEFAULT` in the statement handles. You can use different values for each statement handle or server handle as you see fit.
### EDB_ATTR_STMT_LVL_TX
-Unless otherwise instructed, the OCL connector rolls back the current transaction whenever the server reports an error. You can override the automatic `ROLLBACK` with the `edb_stmt_level_tx` parameter, which preserves modifications in a transaction, even if one (or several) statements raise an error in the transaction.
+Unless otherwise instructed, the OCL connector rolls back the current transaction whenever the server reports an error. You can override the automatic `ROLLBACK` with the `edb_stmt_level_tx` parameter, which preserves modifications in a transaction, even if one or more statements raise an error in the transaction.
You can use the `OCIServer` attribute with `OCIAttrSet()` and `OCIAttrGet()` to enable or disable `EDB_ATTR_STMT_LEVEL_TX`. By default, `edb_stmt_level_tx` is disabled. To enable `edb_stmt_level_tx`, the client application must call `OCIAttrSet()`:
@@ -224,7 +224,7 @@ OCIAttrSet(server,
### xaoSvcCtx
-To use the `xaoSvcCtx` function, extensions in the `xaoSvcCtx` or `xa_open` connection string format must be provided as follows:
+To use the `xaoSvcCtx` function, provide extensions in the `xaoSvcCtx` or `xa_open` connection string format as follows:
`Oracle_XA{+ ...}`
diff --git a/product_docs/docs/ocl_connector/15/04_open_client_library/06_ocl_error_codes_reference.mdx b/product_docs/docs/ocl_connector/15/04_open_client_library/06_ocl_error_codes_reference.mdx
index fe6f48d510c..13360a97423 100644
--- a/product_docs/docs/ocl_connector/15/04_open_client_library/06_ocl_error_codes_reference.mdx
+++ b/product_docs/docs/ocl_connector/15/04_open_client_library/06_ocl_error_codes_reference.mdx
@@ -7,7 +7,7 @@ title: "OCL error codes (reference)"
The following table lists the error code mappings defined by the OCL Connector. When the database server reports an error code or condition (shown in the first or second column), the OCL converts the value to the compatible value displayed in the third column.
-| Error Code | Condition Name | Oracle Error Code |
+| Error code | Condition name | Oracle error code |
| ---------- | ------------------------------------------------- | ----------------- |
| 42601 | syntax_error | ORA-16945 |
| 42P01 | undefined_table | ORA-00942 |
diff --git a/product_docs/docs/ocl_connector/15/04_open_client_library/08_otl_support.mdx b/product_docs/docs/ocl_connector/15/04_open_client_library/08_otl_support.mdx
index c039d1a8b3d..5b5a1880ae4 100644
--- a/product_docs/docs/ocl_connector/15/04_open_client_library/08_otl_support.mdx
+++ b/product_docs/docs/ocl_connector/15/04_open_client_library/08_otl_support.mdx
@@ -5,16 +5,16 @@ title: "OTL support"
-Oracle Template Library (OTL)) is a C++ library for database access. It consists of a single header file. To know more about OTL, see the [Oracle, Odbc and DB2-CLI Template Library Programmer's Guide](http://otl.sourceforge.net/).
+Oracle Template Library (OTL) is a C++ library for database access. It consists of a single header file. To know more about OTL, see the [Oracle, Odbc and DB2-CLI Template Library Programmer's Guide](http://otl.sourceforge.net/).
## OTL certification
-The EDB OCL Connector, version 13.1.4.2, is certified with OTL 4.0. To use OTL-supported data types and for other OTL-specific behavior, define the OTL environment variable (the value is not important) on the shell before running an OTL-based app. For example: You can export `OTL=TRUE` for conditional execution of scenarios that are related to OTL.
+The EDB OCL Connector, version 13.1.4.2, is certified with OTL 4.0. To use OTL-supported data types and for other OTL-specific behavior, define the OTL environment variable (the value isn't important) on the shell before running an OTL-based app. For example, you can export `OTL=TRUE` for conditional execution of scenarios that are related to OTL.
EDB OCL Connector is certified with the following OTL features:
-- Connect, disconnect, commit, and rollback using `otl_connect`.
-- Constant SQL statements (a SQL statement is constant if it doesn't have any bind variables) using the static function `otl_cursor::direct_exec`. It includes most DDL statements like `CREATE TABLE` and `CREATE PROCEDURE/FUNCTION`.
+- Connect, disconnect, commit, and roll back using `otl_connect`.
+- Constant SQL statements using the static function `otl_cursor::direct_exec`. (A SQL statement is constant if it doesn't have any bind variables.) It includes most DDL statements like `CREATE TABLE` and `CREATE PROCEDURE/FUNCTION`.
- SQL statements with bind variable using `otl_stream class`. It includes most DML statements like `SELECT`, `UPDATE`, `DELETE`, `INSERT`, and `PROCEDURE/FUNCTION` calls.
- Date/Time data types using `otl_datetime`.
- Raw/Long Raw data types using `otl_long_string`.
@@ -22,7 +22,7 @@ EDB OCL Connector is certified with the following OTL features:
## Connect and log in
-The following example initializes OCL and connects to a database using `tnsnames.ora` based connection string:
+This example initializes OCL and connects to a database using a `tnsnames.ora`-based connection string:
```c
otl_connect db;
@@ -35,7 +35,7 @@ if(db.connected)
## CREATE TABLE, INSERT, and SELECT
-The following example uses `otl_cursor::direct_exec` to create a table and then insert a row in this table. You can then use `otl_stream` to retrieve the inserted row.
+This example uses `otl_cursor::direct_exec` to create a table and then insert a row in the table. You can then use `otl_stream` to retrieve the inserted row.
```c
char* createstmt =
@@ -65,7 +65,7 @@ while (!otlCur.eof()) {
## UPDATE
-The following example uses bind parameters in an `UPDATE` statement:
+This example uses bind parameters in an `UPDATE` statement:
```c
char* updatestmt = "UPDATE testtable SET c1=:c1 "
@@ -80,7 +80,7 @@ otlCur << data << whereValue;
## Stored procedure
-The following example creates a stored procedure using `otl_cursor::direct_exec` and then calls it using `otl_stream`:
+This example creates a stored procedure using `otl_cursor::direct_exec` and then calls it using `otl_stream`:
```c
otl_cursor::direct_exec(
@@ -108,7 +108,7 @@ cout << "B: " << b << endl;
## Function
-The following example creates a function using `otl_cursor::direct_exec` and then calls it using `otl_stream`:
+This example creates a function using `otl_cursor::direct_exec` and then calls it using `otl_stream`:
!!! Note
This example is using the `emp` table in the `edb` sample database.
@@ -145,7 +145,7 @@ cout << "Retrieved Value: " << eno << endl;
## REF CURSOR
-The following example creates a package with a procedure that returns three ref cursors as `OUT` parameters and then calls it.
+This example creates a package with a procedure that returns three ref cursors as `OUT` parameters and then calls it.
!!! Note
This example is using the `emp` table in the `edb` sample database.
diff --git a/product_docs/docs/ocl_connector/15/05_generating_the_ocl_trace.mdx b/product_docs/docs/ocl_connector/15/05_generating_the_ocl_trace.mdx
index a5e12ee95a5..b6ef4d4b8da 100644
--- a/product_docs/docs/ocl_connector/15/05_generating_the_ocl_trace.mdx
+++ b/product_docs/docs/ocl_connector/15/05_generating_the_ocl_trace.mdx
@@ -5,7 +5,7 @@ title: "Generating the OCL trace"
-The OCL tracing option logs direct communication (queries, updates, etc.) with the backend in the specified `OCI_DEBUG_LOG file`. It also logs the functions/APIs that were invoked. The trace files are generated in the default working directory (`oci_log_file_name`). If you append the path with a file name (`directory path/oci_log_file_name`), then the trace files are generated at specific location.
+The OCL tracing option logs direct communication (queries, updates, and so on) with the backend in the specified `OCI_DEBUG_LOG file`. It also logs the functions/APIs that were invoked. The trace files are generated in the default working directory (`oci_log_file_name`). If you append the path with a file name (`directory path/oci_log_file_name`), then the trace files are generated at that specific location.
A trace file is generated for each connection in text-file (readable) format.
@@ -16,8 +16,8 @@ To generate the OCL trace:
1. Enable the EDB client-side tracing for OCL. You can enable the OCL tracing by setting these environment variables:
-`export OCI_DEBUG_LEVEL=4`
+ - `export OCI_DEBUG_LEVEL=4`
-`export OCI_DEBUG_LOG=oci_log_file`
+ - `export OCI_DEBUG_LOG=oci_log_file`
2. After you export the environment variables, run the application. The OCL trace files are generated in the specified directory.
diff --git a/product_docs/docs/ocl_connector/15/06_using_ssl.mdx b/product_docs/docs/ocl_connector/15/06_using_ssl.mdx
index fa87a5fcecf..24810e0bada 100644
--- a/product_docs/docs/ocl_connector/15/06_using_ssl.mdx
+++ b/product_docs/docs/ocl_connector/15/06_using_ssl.mdx
@@ -13,9 +13,7 @@ EDB Postgres Advanced Server provides native support for using SSL connections t
1. Configure the server and client-side certificates. For detailed information about configuring SSL client and server-side certificates, refer to the [PostgreSQL SSL documentation](https://www.postgresql.org/docs/12/libpq-ssl.html).
-2. Enable the SSL OCL connection:
-
- In an OCL client application, you can enable SSL mode by setting the `EDB_ATTR_SSL` attribute in `Session`.
+2. Enable the SSL OCL connection. In an OCL client application, you can enable SSL mode by setting the `EDB_ATTR_SSL` attribute in `Session`:
```c
char* sslmode = "verify-full";
@@ -30,7 +28,7 @@ retValue = OCIAttrSet((dvoid*)authp,
!!! Note
`EDB_ATTR_SSL` is defined in the `edboci.h` header file available in the installation directory.
-3. After setting SSL attribute, you can use the `OCILogon` function to create a connection:
+3. After setting the SSL attribute, you can use the `OCILogon` function to create a connection:
```c
OCILogon(pEnv,pError,&pSvc,(OraText*)pUsername,ub4)UsernameLen,
diff --git a/product_docs/docs/ocl_connector/15/07_scram_compatibility.mdx b/product_docs/docs/ocl_connector/15/07_scram_compatibility.mdx
index d9e7d2f2b27..76b6fb8ed5d 100644
--- a/product_docs/docs/ocl_connector/15/07_scram_compatibility.mdx
+++ b/product_docs/docs/ocl_connector/15/07_scram_compatibility.mdx
@@ -5,4 +5,4 @@ title: "Scram compatibility"
-The EDB OCL driver provides SCRAM-SHA-256 support for EDB Postgres Advanced Server version 11 and later. This support is available from EDB OCL 11.0.1 release and later.
+The EDB OCL driver provides SCRAM-SHA-256 support for EDB Postgres Advanced Server version 11 and later. This support is available in EDB OCL 11.0.1 release and later.
diff --git a/product_docs/docs/ocl_connector/15/installing/upgrading.mdx b/product_docs/docs/ocl_connector/15/installing/upgrading.mdx
index 46baa8d506e..376b5dbdcfa 100644
--- a/product_docs/docs/ocl_connector/15/installing/upgrading.mdx
+++ b/product_docs/docs/ocl_connector/15/installing/upgrading.mdx
@@ -30,3 +30,4 @@ Where `` is the package manager used with your operating system
| apt-get | Debian and Ubuntu |
+
diff --git a/product_docs/docs/ocl_connector/15/installing/windows.mdx b/product_docs/docs/ocl_connector/15/installing/windows.mdx
index 808dd931752..0dfc9bb3234 100644
--- a/product_docs/docs/ocl_connector/15/installing/windows.mdx
+++ b/product_docs/docs/ocl_connector/15/installing/windows.mdx
@@ -5,11 +5,11 @@ redirects:
- /ocl_connector/latest/04_open_client_library/01_installing_and_configuring_the_ocl_connector/install_on_windows/
---
-EDB provides a graphical interactive installer for Windows. You can access it two ways:
+EDB provides a graphical interactive installer for Windows. You can access it in two ways:
- Download the graphical installer from the [Downloads page](https://www.enterprisedb.com/software-downloads-postgres#connectors), and invoke the installer directly. See [Installing directly](#installing-directly).
-- Use StackBuilder Plus (with EDB Postgres Advanced Server) to download the EDB installer package and invoke the graphical installer. See [Using StackBuilder Plus](#using-stackbuilder-plus).
+- Use StackBuilder Plus with EDB Postgres Advanced Server to download the EDB installer package and invoke the graphical installer. See [Using StackBuilder Plus](#using-stackbuilder-plus).
## Installing directly
@@ -27,7 +27,7 @@ If you're using EDB Postgres Advanced Server, you can invoke the graphical insta
1. In StackBuilder Plus, follow the prompts until you get to the module selection page.
- On the Welcome page, select the target server installation from the list of available servers. If your network requires you to use a proxy server to access the internet, select **Proxy servers** and specify a server. Select **Next**.
+ On the Welcome page, from the list of available servers, select the target server installation. If your network requires you to use a proxy server to access the internet, select **Proxy servers** and specify a server. Select **Next**.
1. Expand the **Database Drivers** node, and select **EnterpriseDB OCI Connector**.
@@ -40,7 +40,7 @@ If you're using EDB Postgres Advanced Server, you can invoke the graphical insta
1. On the Setup OCI page, select **Next**.
-1. Browse to a directory where you want OCI to be installed, or leave the directory set to the default location. Select **Next**.
+1. Browse to a directory where you want to install OCI, or leave the directory set to the default location. Select **Next**.
1. On the Ready to Install page, select **Next**.
diff --git a/product_docs/docs/ocl_connector/15/ocl_rel_notes/15.2.0.4_ocl_release_notes.mdx b/product_docs/docs/ocl_connector/15/ocl_rel_notes/15.2.0.4_ocl_release_notes.mdx
new file mode 100644
index 00000000000..8d7d07d7528
--- /dev/null
+++ b/product_docs/docs/ocl_connector/15/ocl_rel_notes/15.2.0.4_ocl_release_notes.mdx
@@ -0,0 +1,14 @@
+---
+title: "Version 15.2.0.4"
+---
+The EDB OCL Connector provides an API similar to the Oracle Call Interface.
+
+New features, enhancements, bug fixes, and other changes in the EDB OCL Connector 15.2.0.4 include:
+
+| Type | Description |
+| ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Bug fix | Fixed a regression whereby a spurious rollback/begin occurs after deallocating a statement. [Support ticket: #94735]|
+
+!!!Important
+ This version requires EDB Postgres Advanced Server libpq version 15.4 or later on the client machine where the EDB OCL Connector is installed. See [Upgrading libpq for minor releases](../03_libpq_compatibility/#upgrading-libpq-for-minor-releases-of-edb-postgres-advanced-server).
+
diff --git a/product_docs/docs/ocl_connector/15/ocl_rel_notes/index.mdx b/product_docs/docs/ocl_connector/15/ocl_rel_notes/index.mdx
index 6312a14c160..861724d4958 100644
--- a/product_docs/docs/ocl_connector/15/ocl_rel_notes/index.mdx
+++ b/product_docs/docs/ocl_connector/15/ocl_rel_notes/index.mdx
@@ -2,6 +2,7 @@
title: "EDB OCL Connector release notes"
navTitle: Release Notes
navigation:
+ - 15.2.0.4_ocl_release_notes
- 15.2.0.3_ocl_release_notes
- 15.2.0.2_ocl_release_notes
- 01_ocl_release_notes
@@ -13,7 +14,8 @@ Release notes describe what's new in a release. When a minor or patch release in
| Version | Release date |
| -------------------------------------- | ------------ |
-| [15.2.0.3](15.2.0.3_ocl_release_notes) | 2023 Jun 20 |
-| [15.2.0.2](15.2.0.2_ocl_release_notes) | 2023 May 19 |
-| [15.2.0.1](01_ocl_release_notes) | 2023 Feb 14 |
+| [15.2.0.4](15.2.0.4_ocl_release_notes) | 24 Aug 2023 |
+| [15.2.0.3](15.2.0.3_ocl_release_notes) | 20 Jun 2023 |
+| [15.2.0.2](15.2.0.2_ocl_release_notes) | 19 May 2023 |
+| [15.2.0.1](01_ocl_release_notes) | 14 Feb 2023 |
diff --git a/product_docs/docs/odbc_connector/12/01_odbc_rel_notes/index.mdx b/product_docs/docs/odbc_connector/12/01_odbc_rel_notes/index.mdx
index 9e63dbfb4c3..63ce06b4210 100644
--- a/product_docs/docs/odbc_connector/12/01_odbc_rel_notes/index.mdx
+++ b/product_docs/docs/odbc_connector/12/01_odbc_rel_notes/index.mdx
@@ -8,6 +8,6 @@ Release notes describe what is new in a release. When a minor or patch release i
| Version | Release Date |
| ------------------------------------------- | ------------ |
-| [12.02.0000.02](01_odbc_12.2.0.2_rel_notes) | 2021 Nov 16 |
-| [12.00.0000.02](03_odbc_12.0.0.2_rel_notes) | 2020 Aug 9 |
+| [12.02.0000.02](01_odbc_12.2.0.2_rel_notes) | 16 Nov 2021 |
+| [12.00.0000.02](03_odbc_12.0.0.2_rel_notes) | 09 Aug 2020 |
diff --git a/product_docs/docs/odbc_connector/13/01_odbc_rel_notes/index.mdx b/product_docs/docs/odbc_connector/13/01_odbc_rel_notes/index.mdx
index f4f7db8dacc..1263f196cd5 100644
--- a/product_docs/docs/odbc_connector/13/01_odbc_rel_notes/index.mdx
+++ b/product_docs/docs/odbc_connector/13/01_odbc_rel_notes/index.mdx
@@ -10,8 +10,8 @@ Release notes describe what's new in a release. When a minor or patch release in
| Version | Release date |
| ----------------------------------------- | ------------ |
-| [13.02.0000.02](odbc_13.2.0.02_rel_notes) | 2023 Feb 14 |
-| [13.02.0000.01](02_odbc_13.2.0.01_rel_notes) | 2022 May 17 |
-| [13.01.0000.02](03_odbc_13.1.0.02_rel_notes) | 2021 Dec 12 |
-| [13.01.0000.01](04_odbc_13.1.0.01_rel_notes) | 2021 Sep 14 |
-| [13.00.0000.01](05_odbc_13.0.0.01_rel_notes) | 2020 Nov 19 |
+| [13.02.0000.02](odbc_13.2.0.02_rel_notes) | 14 Feb 2023|
+| [13.02.0000.01](02_odbc_13.2.0.01_rel_notes) | 17 May 2022|
+| [13.01.0000.02](03_odbc_13.1.0.02_rel_notes) | 12 Dec 2021 |
+| [13.01.0000.01](04_odbc_13.1.0.01_rel_notes) | 14 Sep 2021 |
+| [13.00.0000.01](05_odbc_13.0.0.01_rel_notes) | 19 Nov 2020 |
diff --git a/product_docs/docs/odbc_connector/13/installing/index.mdx b/product_docs/docs/odbc_connector/13/installing/index.mdx
index 46665b52e39..822069c7cad 100644
--- a/product_docs/docs/odbc_connector/13/installing/index.mdx
+++ b/product_docs/docs/odbc_connector/13/installing/index.mdx
@@ -10,7 +10,6 @@ redirects:
- ../03_edb-odbc_overview/01_installing_edb-odbc
- /odbc_connector/13/03_installing_edb_odbc/
- /odbc_connector/13/03_installing_edb_odbc/01_installing_linux/
- - /odbc_connector/13/03_installing_edb_odbc/14_installing_windows/
- /odbc_connector/13/03_installing_edb_odbc/01_installing_linux/07_odbc13_ubuntu20_deb10_x86/
- /odbc_connector/13/03_installing_edb_odbc/01_installing_linux/ibm_power_ppc64le/12_odbc13_sles12_ppcle/
diff --git a/product_docs/docs/pem/8/considerations/pem_pgbouncer/configuring_pgBouncer.mdx b/product_docs/docs/pem/8/considerations/pem_pgbouncer/configuring_pgBouncer.mdx
index 62c7c935f0b..bdc762bd704 100644
--- a/product_docs/docs/pem/8/considerations/pem_pgbouncer/configuring_pgBouncer.mdx
+++ b/product_docs/docs/pem/8/considerations/pem_pgbouncer/configuring_pgBouncer.mdx
@@ -5,11 +5,6 @@ legacyRedirectsGenerated:
- "/edb-docs/d/edb-postgres-enterprise-manager/installation-getting-started/pgbouncer-configuration-guide/8.0/configuring_pgBouncer.html"
redirects:
- /pem/latest/pem_pgbouncer/03_configuring_pgBouncer/
- - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/
- - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/01_pem_pgbouncer_server_agent_connection/
- - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/02_pem_pgbouncer_preparing_dbserver/
- - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/03_pem_pgbouncer_configuring_pgbouncer/
- - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/04_pem_pgbouncer_configuring_pem_agent/
---
You must configure PgBouncer to work with the PEM database server. This example runs PgBouncer as the enterprisedb system user and outlines the process of configuring pgBouncer version 1.9 or later.
diff --git a/product_docs/docs/pem/8/installing/index.mdx b/product_docs/docs/pem/8/installing/index.mdx
index cc4f62e1ff7..39625ef568d 100644
--- a/product_docs/docs/pem/8/installing/index.mdx
+++ b/product_docs/docs/pem/8/installing/index.mdx
@@ -11,10 +11,8 @@ redirects:
- /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/
- /pem/8/installing_pem_server/
- /pem/8/installing_pem_server/pem_server_inst_linux/
- - /pem/8/installing_pem_server/pem_server_inst_linux/
- - /pem/8/installing_pem_server/pem_server_inst_linux/configuring_the_pem_server_on_linux/
- - /pem/8/installing_pem_server/pem_server_inst_windows/
- - /pem/8/installing_pem_server/prerequisites_for_installing_pem_server/
+ - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
+ - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
navigation:
- prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_ppc64le/pem_rhel_8.mdx b/product_docs/docs/pem/8/installing/linux_ppc64le/pem_rhel_8.mdx
index 2954fa7420d..b3f840ce754 100644
--- a/product_docs/docs/pem/8/installing/linux_ppc64le/pem_rhel_8.mdx
+++ b/product_docs/docs/pem/8/installing/linux_ppc64le/pem_rhel_8.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_rhel8_ppcle/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_rhel8_ppcle/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_rhel8_ppcle/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_ppc64le/pem_rhel_9.mdx b/product_docs/docs/pem/8/installing/linux_ppc64le/pem_rhel_9.mdx
index b5a1c8361af..559b1533294 100644
--- a/product_docs/docs/pem/8/installing/linux_ppc64le/pem_rhel_9.mdx
+++ b/product_docs/docs/pem/8/installing/linux_ppc64le/pem_rhel_9.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_rhel9_ppcle/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_rhel9_ppcle/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_rhel9_ppcle/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_ppc64le/pem_sles_12.mdx b/product_docs/docs/pem/8/installing/linux_ppc64le/pem_sles_12.mdx
index cb1cc3ecae9..b6dfb9a2131 100644
--- a/product_docs/docs/pem/8/installing/linux_ppc64le/pem_sles_12.mdx
+++ b/product_docs/docs/pem/8/installing/linux_ppc64le/pem_sles_12.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_sles12_ppcle/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_sles12_ppcle/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_sles12_ppcle/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
!!! Note
diff --git a/product_docs/docs/pem/8/installing/linux_ppc64le/pem_sles_15.mdx b/product_docs/docs/pem/8/installing/linux_ppc64le/pem_sles_15.mdx
index 6b8c7bc75ba..ebfa6a0afe6 100644
--- a/product_docs/docs/pem/8/installing/linux_ppc64le/pem_sles_15.mdx
+++ b/product_docs/docs/pem/8/installing/linux_ppc64le/pem_sles_15.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_sles15_ppcle/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_sles15_ppcle/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_sles15_ppcle/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
!!! Note
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_centos_7.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_centos_7.mdx
index f86ae70a1fe..e8d93c7f10a 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_centos_7.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_centos_7.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_centos7_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_centos7_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_centos7_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_debian_10.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_debian_10.mdx
index a7e5cdc3343..a0154d4672c 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_debian_10.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_debian_10.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_deb10_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_deb10_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_deb10_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_debian_11.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_debian_11.mdx
index f8e481ab671..376d1ed8678 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_debian_11.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_debian_11.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_deb11_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_deb11_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_deb11_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_other_linux_8.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_other_linux_8.mdx
index 2543ec0d33f..43009817530 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_other_linux_8.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_other_linux_8.mdx
@@ -12,8 +12,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_other_linux8_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_other_linux8_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_other_linux8_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_other_linux_9.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_other_linux_9.mdx
index ac12dfa0eb0..5415e049ae6 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_other_linux_9.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_other_linux_9.mdx
@@ -12,8 +12,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_other_linux9_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_other_linux9_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_other_linux9_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_7.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_7.mdx
index 0dca89ff010..a3376575db7 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_7.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_7.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_rhel7_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_rhel7_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_rhel7_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_8.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_8.mdx
index e2f334a8819..4933c0eb673 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_8.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_8.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_rhel8_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_rhel8_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_rhel8_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_9.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_9.mdx
index 61169163d8c..bc70cc9d94d 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_9.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_rhel_9.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_rhel9_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_rhel9_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_rhel9_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_sles_12.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_sles_12.mdx
index b82c9d8dcef..6b81f0c92fb 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_sles_12.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_sles_12.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_sles12_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_sles12_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_sles12_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
!!! Note
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_sles_15.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_sles_15.mdx
index 77f2958477a..a1a8e5d47c4 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_sles_15.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_sles_15.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_sles15_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_sles15_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
!!! Note
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_18.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_18.mdx
index 245a147b1f3..23f710a0f30 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_18.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_18.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_ubuntu18_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_ubuntu18_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_ubuntu18_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_20.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_20.mdx
index c0b149e6835..c483d47fc3a 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_20.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_20.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_ubuntu20_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_ubuntu20_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_ubuntu20_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_22.mdx b/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_22.mdx
index 93c484e6d68..ae21bff5cb5 100644
--- a/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_22.mdx
+++ b/product_docs/docs/pem/8/installing/linux_x86_64/pem_ubuntu_22.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_ubuntu22_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_ubuntu22_x86/
- /pem/8/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_ubuntu22_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/8/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_pgBouncer.mdx b/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_pgBouncer.mdx
index c25887e3c24..39c05d55cc9 100644
--- a/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_pgBouncer.mdx
+++ b/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_pgBouncer.mdx
@@ -5,6 +5,7 @@ legacyRedirectsGenerated:
- "/edb-docs/d/edb-postgres-enterprise-manager/installation-getting-started/pgbouncer-configuration-guide/8.0/configuring_pgBouncer.html"
redirects:
- /pem/latest/pem_pgbouncer/03_configuring_pgBouncer/
+ - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/03_pem_pgbouncer_configuring_pgbouncer/
---
You must configure PgBouncer to work with the PEM database server.
diff --git a/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_the_pem_agent.mdx b/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_the_pem_agent.mdx
index 3bebefdbc2c..ca24ea8aec0 100644
--- a/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_the_pem_agent.mdx
+++ b/product_docs/docs/pem/9/considerations/pem_pgbouncer/configuring_the_pem_agent.mdx
@@ -5,6 +5,7 @@ legacyRedirectsGenerated:
- "/edb-docs/d/edb-postgres-enterprise-manager/installation-getting-started/pgbouncer-configuration-guide/8.0/configuring_the_pem_agent.html"
redirects:
- /pem/latest/pem_pgbouncer/04_configuring_the_pem_agent/
+ - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/04_pem_pgbouncer_configuring_pem_agent/
---
You can use an RPM package to install a PEM agent. For detailed installation information, see [Installating the PEM agent](../../installing_pem_agent/).
diff --git a/product_docs/docs/pem/9/considerations/pem_pgbouncer/index.mdx b/product_docs/docs/pem/9/considerations/pem_pgbouncer/index.mdx
index 8a962aabf53..d6a460e5bd7 100644
--- a/product_docs/docs/pem/9/considerations/pem_pgbouncer/index.mdx
+++ b/product_docs/docs/pem/9/considerations/pem_pgbouncer/index.mdx
@@ -8,6 +8,7 @@ legacyRedirectsGenerated:
- "/edb-docs/d/edb-postgres-enterprise-manager/installation-getting-started/pgbouncer-configuration-guide/8.0/index.html"
redirects:
- /pem/latest/pem_pgbouncer/
+- /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/
navigation:
- pem_server_pem_agent_connection_management_mechanism
diff --git a/product_docs/docs/pem/9/considerations/pem_pgbouncer/pem_server_pem_agent_connection_management_mechanism.mdx b/product_docs/docs/pem/9/considerations/pem_pgbouncer/pem_server_pem_agent_connection_management_mechanism.mdx
index e2a76510044..ff5a0a68569 100644
--- a/product_docs/docs/pem/9/considerations/pem_pgbouncer/pem_server_pem_agent_connection_management_mechanism.mdx
+++ b/product_docs/docs/pem/9/considerations/pem_pgbouncer/pem_server_pem_agent_connection_management_mechanism.mdx
@@ -5,6 +5,7 @@ legacyRedirectsGenerated:
- "/edb-docs/d/edb-postgres-enterprise-manager/installation-getting-started/pgbouncer-configuration-guide/8.0/the_pem_server_pem_agent_connection_management_mechanism.html"
redirects:
- /pem/latest/pem_pgbouncer/01_the_pem_server_pem_agent_connection_management_mechanism/
+ - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/01_pem_pgbouncer_server_agent_connection/
---
Each PEM agent connects to the PEM database server using the SSL certificates for each user. For example, an agent with `ID#1` connects to the PEM database server using the agent1 user.
diff --git a/product_docs/docs/pem/9/considerations/pem_pgbouncer/preparing_the_pem_database_server.mdx b/product_docs/docs/pem/9/considerations/pem_pgbouncer/preparing_the_pem_database_server.mdx
index 6aef1d3d6e0..d86c607e09c 100644
--- a/product_docs/docs/pem/9/considerations/pem_pgbouncer/preparing_the_pem_database_server.mdx
+++ b/product_docs/docs/pem/9/considerations/pem_pgbouncer/preparing_the_pem_database_server.mdx
@@ -5,6 +5,7 @@ legacyRedirectsGenerated:
- "/edb-docs/d/edb-postgres-enterprise-manager/installation-getting-started/pgbouncer-configuration-guide/8.0/preparing_the_pem_database_server.html"
redirects:
- /pem/latest/pem_pgbouncer/02_preparing_the_pem_database_server/
+ - /pem/latest/pem_online_help/09_toc_pem_configure_pgbouncer/02_pem_pgbouncer_preparing_dbserver/
---
You must configure the PEM database server to work with PgBouncer. This example shows how to configure the PEM database server.
diff --git a/product_docs/docs/pem/9/images/alert_copy.png b/product_docs/docs/pem/9/images/alert_copy.png
old mode 100755
new mode 100644
index 02f990ef970..b3fea692ae8
--- a/product_docs/docs/pem/9/images/alert_copy.png
+++ b/product_docs/docs/pem/9/images/alert_copy.png
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:72b715546e266575e40f4c4b85fdf883f3f4126345a48bdca981a2a93cc22d36
-size 62210
+oid sha256:da3c35c608983fc063285d304923bb05eb1dc4119270eafeeb910f8fe7659f19
+size 71544
diff --git a/product_docs/docs/pem/9/images/alert_history_report_general.png b/product_docs/docs/pem/9/images/alert_history_report_general.png
new file mode 100644
index 00000000000..173690a76d0
--- /dev/null
+++ b/product_docs/docs/pem/9/images/alert_history_report_general.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:063b3e8aab631ae507765aac68f7b0c60cf0b12eb8c5fdbf90138a1f4d9e92f4
+size 56260
diff --git a/product_docs/docs/pem/9/images/alerting_manage_alerts.png b/product_docs/docs/pem/9/images/alerting_manage_alerts.png
index 8571dbe614d..3c65cb44852 100644
--- a/product_docs/docs/pem/9/images/alerting_manage_alerts.png
+++ b/product_docs/docs/pem/9/images/alerting_manage_alerts.png
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:5f65c79ea86b8355493a226ef6ae144ab1ced63ece1a1f12965e829ad5bd83b6
-size 227680
+oid sha256:94c7cb1020cd098f3a31c31da60f9f66990ea091aea34ebcef8235e28922c2b0
+size 197737
diff --git a/product_docs/docs/pem/9/images/default_email_template.png b/product_docs/docs/pem/9/images/default_email_template.png
new file mode 100644
index 00000000000..8290d5ce8fe
--- /dev/null
+++ b/product_docs/docs/pem/9/images/default_email_template.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79c0add8779b0b871605bc038df4e01545309fc135a694ce253f11f0c49fce2c
+size 162139
diff --git a/product_docs/docs/pem/9/images/email_template_edit.png b/product_docs/docs/pem/9/images/email_template_edit.png
new file mode 100644
index 00000000000..21cab62c1ae
--- /dev/null
+++ b/product_docs/docs/pem/9/images/email_template_edit.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:606f6ac861a890c127c5a802358ca0715e5ee51fd9d6157abbda2c8a19e7e5ed
+size 205416
diff --git a/product_docs/docs/pem/9/images/email_template_restore.png b/product_docs/docs/pem/9/images/email_template_restore.png
new file mode 100644
index 00000000000..b8accf8e661
--- /dev/null
+++ b/product_docs/docs/pem/9/images/email_template_restore.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26887be4bb2bb27c5f2b55ad33b5ce1326f0d41ab2ba0596700112ce4bde3458
+size 155889
diff --git a/product_docs/docs/pem/9/images/pem_management_menu.png b/product_docs/docs/pem/9/images/pem_management_menu.png
index 87f65dadd79..d1cd49508d5 100644
--- a/product_docs/docs/pem/9/images/pem_management_menu.png
+++ b/product_docs/docs/pem/9/images/pem_management_menu.png
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e22937a7945d6e32f306794c2fb11b016f92e1b3ed56059c6f9982993fb59224
-size 145437
+oid sha256:2fd691019a28583b02c0a378e239bf7a5d967485b4f062814e06c90a2f664083
+size 213670
diff --git a/product_docs/docs/pem/9/installing/configuring_the_pem_server_on_linux.mdx b/product_docs/docs/pem/9/installing/configuring_the_pem_server_on_linux.mdx
index 370ef1e2387..6feec547ee0 100644
--- a/product_docs/docs/pem/9/installing/configuring_the_pem_server_on_linux.mdx
+++ b/product_docs/docs/pem/9/installing/configuring_the_pem_server_on_linux.mdx
@@ -22,18 +22,24 @@ The PEM server package includes a script (`configure-pem-server.sh`) to help aut
When invoking the script, you can include command line options to specify configuration properties. The script prompts you for values that you omit on the command line. The accepted options are:
-| Option | Description |
-| ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `-acp` | Defines PEM agent certificate path. The default is `/root/.pem`. |
-| `-ci` | CIDR-formatted network address range that agents connect to the server from, to be added to the server's `pg_hba.conf` file, for example, `192.168.1.0/24`. The default is `0.0.0.0/0`. |
-| `-dbi` | The directory for the database server installation, for example, `/usr/edb/as12` for EDB Postgres Advanced Server or `/usr/pgsql-12` for PostgreSQL. |
-| `-ds` | The unit file name of the PEM database server. For EDB Postgres Advanced Server, the default file name is `edb-as-12`. For PostgreSQL, it's `postgresql-12`. |
-| `-ho` | The host address of the PEM database server. |
-| `-p` | The port number of the PEM database server. |
-| `-ps` | The service name of the pemagent. The default value is `pemagent`. |
-| `-sp` | The superuser password of the PEM database server. This value is required. |
-| `-su` | The superuser name of the PEM database server. |
-| `-t` | The installation type: Specify `1` if the configuration is for web services and backend database, `2` if you're configuring web services, or `3` if you're configuring the backend database. If you specify `3`, the database must reside on the local host. |
+| Option | Description |
+| ----------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `-acp` or `--pemagent-certificate-path` | Defines PEM agent certificate path. The default is `/root/.pem`. |
+| `-ci` or `--cidr-address` | CIDR-formatted network address range that agents connect to the server from, to be added to the server's `pg_hba.conf` file, for example, `192.168.1.0/24`. The default is `0.0.0.0/0`. |
+| `-dbi` or `--db-install-path` | Directory for the database server installation, for example, `/usr/edb/as12` for EDB Postgres Advanced Server or `/usr/pgsql-12` for PostgreSQL. |
+| `-ds` or `--db-unitfile` | Unit file name of the PEM database server. For EDB Postgres Advanced Server, the default file name is `edb-as-12`. For PostgreSQL, it's `postgresql-12`. |
+| `-ho` or `--host` | Host address of the PEM database server. |
+| `-p` or `--port` | Port number of the PEM database server. |
+| `-ps` or `--pemagent-servicename` | Service name of the pemagent. The default value is `pemagent`. |
+| `-sp` or `--superpassword` | Superuser password of the PEM database server. This value is required. |
+| `-su` or `--superuser` | Superuser name of the PEM database server. |
+| `-au` or `--use-agent-user` | PEM agent user name. |
+| `-t` or `--type` | Installation type: Specify `1` if the configuration is for web services and backend database, `2` if you're configuring web services, or `3` if you're configuring the backend database. If you specify `3`, the database must reside on the local host. |
+| `-un` or `--uninstall-pem-server` | Uninstalls the PEM server. |
+| `-nhc` or `--no-hba-change` | Skips the changes done to `pg_hba.conf` and `pg_config` files. |
+| `-uac` or `--use-agent-sslcert` | Reuses the existing agent SSL certificate while configuring the PEM server. |
+| `-uak` or `--use-agent-sslkey` | Reuses the existing agent SSL key while configuring the PEM server. |
+| `-h` or `--help` | Lists all the available options while configuring the PEM server. |
If you don't provide configuration properties on the command line, the script prompts you for values. When you invoke the script, choose from:
@@ -46,7 +52,7 @@ If you don't provide configuration properties on the command line, the script pr
!!! Note
If the web server and the backend database (PEM server) reside on separate hosts, configure the database server first (option 3) and then web services (option 2). The script proceeds only if the backend database is configured before web services.
-After selecting a configuration option, the script prompts you for configuration properties. When the script completes, it creates the objects required by the PEM server or performs the configuration steps required. To view help for the script, use the command:
+After selecting a configuration option, the script prompts you for configuration properties. When the script finishes, it creates the objects required by the PEM server or performs the configuration steps required. To view help for the script, use the command:
```shell
/usr/edb/pem/bin/configure-pem-server.sh -help
diff --git a/product_docs/docs/pem/9/installing/index.mdx b/product_docs/docs/pem/9/installing/index.mdx
index 102ddf5859b..68168cac232 100644
--- a/product_docs/docs/pem/9/installing/index.mdx
+++ b/product_docs/docs/pem/9/installing/index.mdx
@@ -11,10 +11,8 @@ redirects:
- /pem/latest/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/
- /pem/latest/installing_pem_server/
- /pem/latest/installing_pem_server/pem_server_inst_linux/
- - /pem/latest/installing_pem_server/pem_server_inst_linux/
- - /pem/latest/installing_pem_server/pem_server_inst_linux/configuring_the_pem_server_on_linux/
- - /pem/latest/installing_pem_server/pem_server_inst_windows/
- - /pem/latest/installing_pem_server/prerequisites_for_installing_pem_server/
+ - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
+ - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
navigation:
- prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_ppc64le/pem_rhel_8.mdx b/product_docs/docs/pem/9/installing/linux_ppc64le/pem_rhel_8.mdx
index 15010186495..5bb7291718e 100644
--- a/product_docs/docs/pem/9/installing/linux_ppc64le/pem_rhel_8.mdx
+++ b/product_docs/docs/pem/9/installing/linux_ppc64le/pem_rhel_8.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_rhel8_ppcle/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_rhel8_ppcle/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_rhel8_ppcle/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_ppc64le/pem_rhel_9.mdx b/product_docs/docs/pem/9/installing/linux_ppc64le/pem_rhel_9.mdx
index fdb53e2792d..b703600ed07 100644
--- a/product_docs/docs/pem/9/installing/linux_ppc64le/pem_rhel_9.mdx
+++ b/product_docs/docs/pem/9/installing/linux_ppc64le/pem_rhel_9.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_rhel9_ppcle/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_rhel9_ppcle/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_rhel9_ppcle/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_ppc64le/pem_sles_12.mdx b/product_docs/docs/pem/9/installing/linux_ppc64le/pem_sles_12.mdx
index 611a6053641..be0456cfbdb 100644
--- a/product_docs/docs/pem/9/installing/linux_ppc64le/pem_sles_12.mdx
+++ b/product_docs/docs/pem/9/installing/linux_ppc64le/pem_sles_12.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_sles12_ppcle/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_sles12_ppcle/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_sles12_ppcle/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
!!! Note
diff --git a/product_docs/docs/pem/9/installing/linux_ppc64le/pem_sles_15.mdx b/product_docs/docs/pem/9/installing/linux_ppc64le/pem_sles_15.mdx
index 1939ad416cb..42298c5aa22 100644
--- a/product_docs/docs/pem/9/installing/linux_ppc64le/pem_sles_15.mdx
+++ b/product_docs/docs/pem/9/installing/linux_ppc64le/pem_sles_15.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_sles15_ppcle/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_sles15_ppcle/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_sles15_ppcle/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
!!! Note
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_centos_7.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_centos_7.mdx
index db1eaf2e00e..7a9dbc8084a 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_centos_7.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_centos_7.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_centos7_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_centos7_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_centos7_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_debian_10.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_debian_10.mdx
index ee1f6f4cec0..6d503840d49 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_debian_10.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_debian_10.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_deb10_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_deb10_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_deb10_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_debian_11.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_debian_11.mdx
index dc11f912c93..cd181e97167 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_debian_11.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_debian_11.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_deb11_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_deb11_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_deb11_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_other_linux_8.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_other_linux_8.mdx
index dadd4cc0ccf..0f858ebf69a 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_other_linux_8.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_other_linux_8.mdx
@@ -12,8 +12,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_other_linux8_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_other_linux8_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_other_linux8_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_other_linux_9.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_other_linux_9.mdx
index def653b48af..0c84be7ed56 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_other_linux_9.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_other_linux_9.mdx
@@ -12,8 +12,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_other_linux9_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_other_linux9_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_other_linux9_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_7.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_7.mdx
index 493922d9c21..ce5050e7473 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_7.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_7.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_rhel7_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_rhel7_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_rhel7_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_8.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_8.mdx
index ab08f624605..c17e2a189dd 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_8.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_8.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_rhel8_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_rhel8_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_rhel8_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_9.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_9.mdx
index 6632df499df..0c48e0fe3ad 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_9.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_rhel_9.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_rhel9_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_rhel9_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_rhel9_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_sles_12.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_sles_12.mdx
index bd839cf13b1..bfeb88058ac 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_sles_12.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_sles_12.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_sles12_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_sles12_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_sles12_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
!!! Note
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_sles_15.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_sles_15.mdx
index c3c5cccf5a9..dd7a7a09612 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_sles_15.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_sles_15.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_sles15_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_sles15_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
!!! Note
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_18.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_18.mdx
index 83e10fc3333..50b2cca834b 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_18.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_18.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_ubuntu18_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_ubuntu18_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_ubuntu18_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_20.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_20.mdx
index 80bf10367c4..94f4d591b49 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_20.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_20.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_ubuntu20_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_ubuntu20_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_ubuntu20_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_22.mdx b/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_22.mdx
index b06f1939453..58f86de0312 100644
--- a/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_22.mdx
+++ b/product_docs/docs/pem/9/installing/linux_x86_64/pem_ubuntu_22.mdx
@@ -11,8 +11,6 @@ redirects:
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/ppc64le/pem_server_ubuntu22_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86_amd64/pem_server_ubuntu22_x86/
- /pem/9/installing_pem_server/installing_on_linux/using_edb_repository/x86/pem_server_ubuntu22_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/03_pem_server_sles15_x86/
- - /pem/9/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/03_installing_pem_server_using_edb_repository/04_pem_server_sles12_x86/
---
## Prerequisites
diff --git a/product_docs/docs/pem/9/monitoring_performance/alerts.mdx b/product_docs/docs/pem/9/monitoring_performance/alerts.mdx
index 0c8ca924e5a..d84e2b4ed4b 100644
--- a/product_docs/docs/pem/9/monitoring_performance/alerts.mdx
+++ b/product_docs/docs/pem/9/monitoring_performance/alerts.mdx
@@ -62,6 +62,7 @@ Use the Quick Links toolbar to open dialog boxes and tabs for managing alerts:
- Select **Copy Alerts** to open the Copy Alert Configuration dialog box and copy an alert definition.
- Select **Alert Templates** to open the **Alert Template** tab and modify or create an alert template.
+- Select **Email Templates** to open the Email Template dialog box and modify the default email template to customize an email notification.
- Select **Email Groups** to open the **Email Groups** tab and modify or create an email group.
- Select **Webhooks** to open the **Webhooks** tab and create or manage the webhooks endpoints.
- Select **Server Configurations** to open the Server Configuration dialog box and review or modify server configuration settings.
@@ -628,7 +629,11 @@ The Copy Alert Configuration dialog box copies all alerts from the object select
To copy alerts to multiple objects at once, select a parent node of the targets. For example, to copy the alerts from one table to all tables in a schema, select the check box next to the schema. PEM copies alerts only to targets that are the same type as the source object.
-Select **Ignore duplicates** to prevent PEM from updating any existing alerts on the target objects with the same name as those being copied. Select **Replace duplicates** to replace existing alerts with alerts of the same name from the source object.
+Select **Ignore duplicates** to prevent PEM from updating any existing alerts on the target objects with the same name as those being copied.
+
+Select **Replace duplicates** to replace existing alerts with alerts of the same name from the source object.
+
+Select **Delete Existing Alerts** to delete all the alerts from the target object and copy all the alerts from the source object to the target object.
Select **Configure Alerts** to copy the alerts from the source object to all objects of the same type in or under those objects selected on the Copy Alert Configuration dialog box.
diff --git a/product_docs/docs/pem/9/monitoring_performance/notifications.mdx b/product_docs/docs/pem/9/monitoring_performance/notifications.mdx
index eb0d1ba755c..f8c131552fc 100644
--- a/product_docs/docs/pem/9/monitoring_performance/notifications.mdx
+++ b/product_docs/docs/pem/9/monitoring_performance/notifications.mdx
@@ -265,3 +265,26 @@ You can use the following command to confirm that Nagios is properly configured:
After confirming that Nagios is configured correctly, restart the Nagios service:
`/usr/local/nagios/bin/nagios -d /usr/local/nagios/etc/nagios.cfg`
+
+## Customizing email templates
+
+PEM monitors your system for conditions that require user attention and sends email notifications. Use **Email Templates** to customize the email subject and the payload that the server sends if the current values deviate from the threshold values specified by an alert.
+
+These are the default email templates that are customizable:
+
+- Alert detected
+- Alert level increased
+- Alert level decreased
+- Alert cleared
+- Alert reminder
+- Job success
+- Job failure
+- Job cancellation
+- Job step
+- Job step (Database server)
+
+To customize the email template, select the edit button next to the email template name. As needed, edit the **Subject** and **Payload** fields from the available list of placeholders. After modifying the field values, select **Save**.
+
+If any of the default email templates are customized, then a green tick mark is displayed for that template in the `Custom Template?` column on the **Email Templates** tab.
+
+To restore the customized email template as the default template, select the custom template and select the undo button at the top of the **Email Templates** tab.
diff --git a/product_docs/docs/pem/9/pem_rel_notes/930_rel_notes.mdx b/product_docs/docs/pem/9/pem_rel_notes/930_rel_notes.mdx
new file mode 100644
index 00000000000..2d7a69d1158
--- /dev/null
+++ b/product_docs/docs/pem/9/pem_rel_notes/930_rel_notes.mdx
@@ -0,0 +1,19 @@
+---
+title: "Version 9.3.0"
+---
+
+New features, enhancements, bug fixes, and other changes in PEM 9.3.0 include:
+
+| Type | Description |
+| ----------- | -------------------------------------------------------------------------------------------------|
+| Enhancement | Added the functionality to delete all the existing alerts while using the copy alert feature. |
+| Enhancement | Added PostgreSQL and EDB Postgres Advanced Server 16 beta support as a backend database server and monitoring server. |
+| Enhancement | Added support to customize the default email templates. |
+| Enhancement | Added support to download the alert history report for agents and servers. |
+| Enhancement | Added options to reuse the existing agent SSL certificate and key file while configuring the PEM server. |
+| Bug Fix | Added the functionality to add/view ignore mount points in the agent REST API. |
+| Bug Fix | Added the `hostaddr` body parameter to the server REST API. |
+| Bug Fix | Fixed an issue by removing the newline character in the alert details from the payload of Webhook. |
+| Bug Fix | Added support to allow the Webhook endpoint registration without providing the client SSL certificates and key files or CA certificates using `allow_insecure_webhooks` parameter in the `agent.cfg` file.|
+| Bug Fix | Fixed an issue related to the backup sizes displaying on the PEM Barman dashboard. |
+| Bug Fix | Fixed an issue with unformatted data while importing the audit logs. |
\ No newline at end of file
diff --git a/product_docs/docs/pem/9/pem_rel_notes/index.mdx b/product_docs/docs/pem/9/pem_rel_notes/index.mdx
index 778ae3c93c2..03b08dbda47 100644
--- a/product_docs/docs/pem/9/pem_rel_notes/index.mdx
+++ b/product_docs/docs/pem/9/pem_rel_notes/index.mdx
@@ -1,6 +1,7 @@
---
title: "Release notes"
navigation:
+ - 930_rel_notes
- 922_rel_notes
- 921_rel_notes
- 920_rel_notes
@@ -13,6 +14,7 @@ The Postgres Enterprise Manager (PEM) documentation describes the latest version
| Version | Release Date | Upstream Merges | Accessibility Conformance |
| ------------------------- | ------------ | --------------------------------------------------------------------------| --------------------------------------------------------------------------------------------------- |
+| [9.3.0](930_rel_notes) | 31 Aug 2023 | NA | [Conformance Report](https://www.enterprisedb.com/accessibility) |
| [9.2.2](922_rel_notes) | 14 Jul 2023 | NA | [Conformance Report](https://www.enterprisedb.com/accessibility) |
| [9.2.1](921_rel_notes) | 03 Jul 2023 | NA | [Conformance Report](https://www.enterprisedb.com/accessibility) |
| [9.2.0](920_rel_notes) | 24 May 2023 | NA | [Conformance Report](https://www.enterprisedb.com/accessibility) |
diff --git a/product_docs/docs/pem/9/pem_web_interface.mdx b/product_docs/docs/pem/9/pem_web_interface.mdx
index a9acc238663..d824e58d78e 100644
--- a/product_docs/docs/pem/9/pem_web_interface.mdx
+++ b/product_docs/docs/pem/9/pem_web_interface.mdx
@@ -306,7 +306,7 @@ Use the **Management** menu to access the following PEM features.
| **Postgres Log Analysis Expert** | Open the Postgres Log Analysis Expert dialog box to analyze log file contents for usage trends. |
| **Scheduled Tasks** | Open the **Scheduled Tasks** tab and review tasks that are pending or recently completed. |
| **Tuning Wizard** | Open the Tuning Wizard dialog box to generate a set of tuning recommendations for your server. |
-| **Reports** | Open the Reports dialog box to generate the system configuration report and core usage report for your server. |
+| **Reports** | Open the Reports dialog box to generate the alert history report, system configuration report, or core usage report for your server. |
| **Schedule Alert Blackout** | Open the Schedule Alert Blackout dialog box and schedule the alerts blackout for your servers and agents. |
### Dashboards menu
diff --git a/product_docs/docs/pem/9/reports.mdx b/product_docs/docs/pem/9/reports.mdx
index 2d51069dd4d..c3a85f1dbc1 100644
--- a/product_docs/docs/pem/9/reports.mdx
+++ b/product_docs/docs/pem/9/reports.mdx
@@ -11,8 +11,9 @@ redirects:
You can generate the System Configuration report and Core Usage report for all locally and remotely managed servers. To generate this report, select **Management > Reports**.
-Reports has following options:
+Reports has the following options:
+- Alert History Report (JSON)
- System Configuration Report (JSON)
- System Configuration Report (HTML)
- Core Usage Report (JSON)
@@ -22,9 +23,21 @@ Only superusers or the users with the pem_admin role can download the System Con
Information in these reports shows the latest probe run time.
-## System Configuration Report
+## Alert History report
-The System Configuration Report provides detailed information about the PEM Agents group, PEM Server Directory group, and custom groups listed under the browser tree. These groups can contain Postgres Enterprise Manager, PEM agent, and database servers. You can download this report in HTML and JSON formats.
+The Alert History report provides detailed information about the alerts history at the agent or server level in JSON format.
+
+Select **Management > Reports > Alert History Report**. From the dialog box, select your options:
+
+- **Agent/Server Name** — Select the agents and servers from the list. The **Overall System Report** option is also available to generate a report for all the registered and active agents and servers.
+
+- **Timeframe** — Select the required timeframe from the list.
+
+- **Alert Types** — Select the alert types you want to generate the alert history report for.
+
+## System Configuration report
+
+The System Configuration report provides detailed information about the PEM Agents group, PEM Server Directory group, and custom groups listed under the browser tree. These groups can contain PEM server, PEM agent, and database servers. You can download this report in HTML and JSON formats.
The Postgres Enterprise Manager Summary provides details about:
diff --git a/product_docs/docs/pgbouncer/1/installing/index.mdx b/product_docs/docs/pgbouncer/1/installing/index.mdx
index 48b3dbc03ec..4583671ac0f 100644
--- a/product_docs/docs/pgbouncer/1/installing/index.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/index.mdx
@@ -11,6 +11,8 @@ redirects:
- ../03_installing_pgbouncer_on_an_sles_host
- ../01_installation
- ../01_installation/install_on_linux/
+ - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
+ - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
navigation:
- linux_x86_64
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_rhel_8.mdx b/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_rhel_8.mdx
index fef7c052c73..f5a9b3381b5 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_rhel_8.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_rhel_8.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on RHEL 8 ppc64le
redirects:
- /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/pgbouncer_rhel8_ppcle
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_rhel_9.mdx b/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_rhel_9.mdx
index f86856ae73c..2cc997e8224 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_rhel_9.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_rhel_9.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on RHEL 9 ppc64le
redirects:
- /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/pgbouncer_rhel9_ppcle
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_sles_12.mdx b/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_sles_12.mdx
index ec38db3e14b..3dd802684ac 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_sles_12.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_sles_12.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on SLES 12 ppc64le
redirects:
- /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/pgbouncer_sles12_ppcle
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_sles_15.mdx b/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_sles_15.mdx
index df902a91c31..413a5d3e0b2 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_sles_15.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_ppc64le/pgbouncer_sles_15.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on SLES 15 ppc64le
redirects:
- /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/pgbouncer_sles15_ppcle
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_centos_7.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_centos_7.mdx
index 1e64adf784a..e0f96e36003 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_centos_7.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_centos_7.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on CentOS 7 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_centos7_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_debian_10.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_debian_10.mdx
index 09a59ea7efc..a47083850e4 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_debian_10.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_debian_10.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on Debian 10 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_deb10_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_debian_11.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_debian_11.mdx
index a4ca6c3f58c..772872c195d 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_debian_11.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_debian_11.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on Debian 11 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_deb11_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_other_linux_8.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_other_linux_8.mdx
index 0a12fc2e65c..ab517586f63 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_other_linux_8.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_other_linux_8.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on AlmaLinux 8 or Rocky Linux 8 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_other_linux8_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_other_linux_9.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_other_linux_9.mdx
index 821ca7a91c7..394ce1b8825 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_other_linux_9.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_other_linux_9.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on AlmaLinux 9 or Rocky Linux 9 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_other_linux9_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_7.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_7.mdx
index 6c7b285d8ef..849a32c9176 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_7.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_7.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on RHEL 7 or OL 7 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_rhel7_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_8.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_8.mdx
index 6f76c41d65d..5b0672115df 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_8.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_8.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on RHEL 8 or OL 8 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_rhel8_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_9.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_9.mdx
index 252b18ce311..dd9da039b93 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_9.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_rhel_9.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on RHEL 9 or OL 9 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_rhel9_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_sles_12.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_sles_12.mdx
index 09af0f048bf..1a391ebfcda 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_sles_12.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_sles_12.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on SLES 12 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_sles12_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_sles_15.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_sles_15.mdx
index 84aff31be51..21bd3c97d2b 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_sles_15.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_sles_15.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on SLES 15 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_sles15_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_18.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_18.mdx
index f44aaa397c1..7b5c43a712a 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_18.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_18.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on Ubuntu 18.04 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_ubuntu18_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_20.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_20.mdx
index ae0010efaa3..d9be77de9e5 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_20.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_20.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on Ubuntu 20.04 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_ubuntu20_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_22.mdx b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_22.mdx
index 7ecb661ac78..c080c34c4ec 100644
--- a/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_22.mdx
+++ b/product_docs/docs/pgbouncer/1/installing/linux_x86_64/pgbouncer_ubuntu_22.mdx
@@ -7,8 +7,6 @@ title: Installing EDB pgBouncer on Ubuntu 22.04 x86_64
redirects:
- /pgbouncer/1/01_installation/install_on_linux/x86_amd64/pgbouncer_ubuntu22_x86
- - /pgbouncer/1/01_installation/02_installing_pgbouncer_on_a_debian_or_ubuntu_host/
- - /pgbouncer/1/01_installation/install_on_linux/ibm_power_ppc64le/09_pgbouncer_rhel8_ppcle/
---
## Prerequisites
diff --git a/product_docs/docs/pgbouncer/1/pgbouncer_rel_notes/index.mdx b/product_docs/docs/pgbouncer/1/pgbouncer_rel_notes/index.mdx
index c8c1c6e8829..c2a53ccf12d 100644
--- a/product_docs/docs/pgbouncer/1/pgbouncer_rel_notes/index.mdx
+++ b/product_docs/docs/pgbouncer/1/pgbouncer_rel_notes/index.mdx
@@ -8,9 +8,9 @@ The EDB PgBouncer documentation describes the latest version of EDB PgBouncer 1,
| Version | Release date | Upstream merges |
| ------------------------------ | ------------ | --------------------------------------------------------------------------------- |
-| [1.19.0.0](06_11900_rel_notes) | 2023 Jun 07 | Upstream [1.19.0.0](https://www.pgbouncer.org/changelog.html#pgbouncer-119x)
-| [1.18.0.0](07_11800_rel_notes) | 2023 Feb 14 | Upstream [1.18.0.0](https://www.pgbouncer.org/changelog.html#pgbouncer-118x)
-| [1.17.0.0](08_11700_rel_notes) | 2022 Aug 04 | Upstream [1.17.0.0](https://www.pgbouncer.org/changelog.html#pgbouncer-117x)
-| [1.16.1.0](09_11610_rel_notes) | 2021 Dec 11 | Upstream [1.16.1.0](https://www.pgbouncer.org/changelog.html#pgbouncer-116x) |
-| [1.16.0.1](10_11601_rel_notes) | 2021 Jun 10 | Upstream [1.16.0.1](https://www.pgbouncer.org/changelog.html#pgbouncer-116x) |
+| [1.19.0.0](06_11900_rel_notes) | 07 Jun 2023 | Upstream [1.19.0.0](https://www.pgbouncer.org/changelog.html#pgbouncer-119x)
+| [1.18.0.0](07_11800_rel_notes) | 14 Feb 2023 | Upstream [1.18.0.0](https://www.pgbouncer.org/changelog.html#pgbouncer-118x)
+| [1.17.0.0](08_11700_rel_notes) | 04 Aug 2022 | Upstream [1.17.0.0](https://www.pgbouncer.org/changelog.html#pgbouncer-117x)
+| [1.16.1.0](09_11610_rel_notes) | 11 Dec 2021 | Upstream [1.16.1.0](https://www.pgbouncer.org/changelog.html#pgbouncer-116x) |
+| [1.16.0.1](10_11601_rel_notes) | 10 Jun 2021 | Upstream [1.16.0.1](https://www.pgbouncer.org/changelog.html#pgbouncer-116x) |
diff --git a/product_docs/docs/pgd/3.7/bdr/release-notes.mdx b/product_docs/docs/pgd/3.7/bdr/release-notes.mdx
index ce71ecd32d6..6806efce768 100644
--- a/product_docs/docs/pgd/3.7/bdr/release-notes.mdx
+++ b/product_docs/docs/pgd/3.7/bdr/release-notes.mdx
@@ -5,6 +5,53 @@ originalFilePath: release-notes.md
---
+## BDR 3.7.22 (2023 Aug 31)
+
+This is a maintenance release for BDR 3.7 that includes minor
+improvements as well as fixes for issues identified in previous
+versions.
+
+Also check the release notes for [pglogical 3.7.22](/pgd/3.7/pglogical/release-notes/#pglogical-3722) for resolved issues
+that affect BDR as well.
+
+
+### Resolved issues
+
+- Changed `bdr.autopartition_drop_partition()` signature to use text.
+
+- Autopartition: Drop partition if it exists
+ It will help in recover from the cases when duplicate drop_partition workitems are created.
+
+- Fixed memory leak in `bdr.sequence_alloc` by modifying the missing catalog signature.
+
+- Prevented superuser check when GUC was specified on PG command line.
+
+- Fixed check for malformed connection string tp prevent failure in `bdr.create_node()`. (RT95453)
+
+- Backport `bdr.accept_connections` GUC.
+
+- Fixed a memory leak in `bdr.sequence_alloc`.
+
+- Remove txn_config entry from ReorderBuffer hash table
+
+- Ignore global_lock check from repset_func when SDW enabled
+
+- Added check for conflicting node names.
+
+- Fixed an issue whereby a crash occurred when BDR extension is used with pgaudit.
+
+- Fixed an issue by allowing a logical join of node if there are foreign key constraints violations. (RT91745)
+
+
+### Improvements
+
+### Upgrades
+
+This release supports upgrading from the following versions of BDR:
+
+ - 3.7.9 and higher
+ - 3.6.29 and higher
+
## BDR 3.7.21 (2023 May 16)
This is a maintenance release for BDR 3.7 that includes minor
diff --git a/product_docs/docs/pgd/3.7/harp/01_release_notes/index.mdx b/product_docs/docs/pgd/3.7/harp/01_release_notes/index.mdx
index 722e81914b4..190f1efe85f 100644
--- a/product_docs/docs/pgd/3.7/harp/01_release_notes/index.mdx
+++ b/product_docs/docs/pgd/3.7/harp/01_release_notes/index.mdx
@@ -25,15 +25,15 @@ The release notes in this section provide information on what was new in each re
| Version | Release Date |
| ----------------------- | ------------ |
-| [2.3.1](harp2.3.1_rel_notes) | 2023 Jul 27 |
-| [2.3.0](harp2.3.0_rel_notes) | 2023 Jul 12 |
-| [2.2.3](harp2.2.3_rel_notes) | 2023 May 16 |
-| [2.2.2](harp2.2.2_rel_notes) | 2023 Mar 30 |
-| [2.2.1](harp2.2.1_rel_notes) | 2022 Nov 16 |
-| [2.2.0](harp2.2.0_rel_notes) | 2022 Aug 22 |
-| [2.1.1](harp2.1.1_rel_notes) | 2022 Jun 21 |
-| [2.1.0](harp2.1.0_rel_notes) | 2022 May 17 |
-| [2.0.3](harp2.0.3_rel_notes) | 2022 Mar 31 |
-| [2.0.2](harp2.0.2_rel_notes) | 2022 Feb 24 |
-| [2.0.1](harp2.0.1_rel_notes) | 2021 Jan 31 |
-| [2.0.0](harp2_rel_notes) | 2021 Dec 01 |
+| [2.3.1](harp2.3.1_rel_notes) | 27 Jul 2023 |
+| [2.3.0](harp2.3.0_rel_notes) | 12 Jul 2023 |
+| [2.2.3](harp2.2.3_rel_notes) | 16 May 2023 |
+| [2.2.2](harp2.2.2_rel_notes) | 30 Mar 2023 |
+| [2.2.1](harp2.2.1_rel_notes) | 16 Nov 2022 |
+| [2.2.0](harp2.2.0_rel_notes) | 22 Aug 2022 |
+| [2.1.1](harp2.1.1_rel_notes) | 21 Jun 2022 |
+| [2.1.0](harp2.1.0_rel_notes) | 17 May 2022 |
+| [2.0.3](harp2.0.3_rel_notes) | 31 Mar 2022 |
+| [2.0.2](harp2.0.2_rel_notes) | 24 Feb 2022 |
+| [2.0.1](harp2.0.1_rel_notes) | 31 Jan 2021 |
+| [2.0.0](harp2_rel_notes) | 01 Dec 2021 |
diff --git a/product_docs/docs/pgd/3.7/pglogical/release-notes.mdx b/product_docs/docs/pgd/3.7/pglogical/release-notes.mdx
index a9e95e49632..ba1ca43544a 100644
--- a/product_docs/docs/pgd/3.7/pglogical/release-notes.mdx
+++ b/product_docs/docs/pgd/3.7/pglogical/release-notes.mdx
@@ -5,6 +5,28 @@ originalFilePath: release-notes.md
---
+## pglogical 3.7.22
+
+This is a maintenance release for pglogical 3.7 which includes minor
+improvements as well as fixes for issues identified previously.
+
+### Resolved Issues
+
+- Fixed a bug in handling of memory context in presence of triggers (RT95948).
+
+- Ensured that table slots are allocated in query context and not in per-tuple context
+
+- Fixed a bug during binary upgrade to avoid issue during hook execution.
+
+
+### Upgrades
+
+This release supports upgrading from following versions of pglogical:
+
+ - 3.7.9 and higher
+ - 3.6.29 and higher
+ - 2.4.0 and 2.4.1
+
## pglogical 3.7.21
This is a maintenance release for pglogical 3.7 which includes minor
diff --git a/product_docs/docs/pgd/4/harp/06_harp_manager.mdx b/product_docs/docs/pgd/4/harp/06_harp_manager.mdx
index c29559bf894..4c0f362f540 100644
--- a/product_docs/docs/pgd/4/harp/06_harp_manager.mdx
+++ b/product_docs/docs/pgd/4/harp/06_harp_manager.mdx
@@ -109,20 +109,3 @@ There are no arguments to launch `harp-manager` as a forked daemon.
This software is designed to be launched through systemd or in a container
as a top-level process. This also means output is directed to STDOUT and STDERR
for capture and access through journald or an attached container terminal.
-
-## Disabling and reenabling HARP Manager control of Postgres
-
-You can temporarily pause HARP Manager control of Postgres. This
-results in a state where the daemon continues running but doesn't perform any
-operations that can affect existing behavior of the cluster. Reenabling
-management causes it to resume operation.
-
-An example of temporarily disabling node management is:
-
-```bash
-harpctl unmanage node node1
-```
-
-See [harpctl command-line tool](08_harpctl) for more details.
-
-Node management by HARP Manager is enabled by default.
diff --git a/product_docs/docs/pgd/4/rel_notes/index.mdx b/product_docs/docs/pgd/4/rel_notes/index.mdx
index 75e5ab566bc..777c4fe8636 100644
--- a/product_docs/docs/pgd/4/rel_notes/index.mdx
+++ b/product_docs/docs/pgd/4/rel_notes/index.mdx
@@ -2,10 +2,11 @@
title: "EDB Postgres Distributed Release notes"
navTitle: "Release notes"
navigation:
-- pgd_4.3.1-2_rel_notes
-- pgd_4.3.1-1_rel_notes
+- pgd_4.3.2_rel_notes
+- pgd_4.3.1+p2_rel_notes
+- pgd_4.3.1+p1_rel_notes
- pgd_4.3.1_rel_notes
-- pgd_4.3.0-1_rel_notes
+- pgd_4.3.0+p1_rel_notes
- pgd_4.3.0_rel_notes
- pgd_4.2.2_rel_notes
- pgd_4.2.1_rel_notes
@@ -25,19 +26,23 @@ The EDB Postgres Distributed documentation describes the latest version of EDB P
| Release Date | EDB Postgres Distributed | BDR | HARP | CLI | TPAexec |
| ------------ | ---------------------------- | ----- | ----- | ----- | -------------------------------------------------------------------------------- |
-| 2023 July 27 | [4.3.1-2 ](pgd_4.3.1-2_rel_notes)| 4.3.1 | 2.3.1 | 1.1.1 | [23.19](/tpa/latest/rel_notes/tpa_23.19_rel_notes) |
-| 2023 July 12 | [4.3.1-1 ](pgd_4.3.1-1_rel_notes)| 4.3.1 | 2.3.0 | 1.1.1 | [23.19](/tpa/latest/rel_notes/tpa_23.19_rel_notes) |
-| 2023 May 17 | [4.3.1](pgd_4.3.1_rel_notes) | 4.3.1 | 2.2.3 | 1.1.1 | [23.17](/tpa/latest/rel_notes/tpa_23.17_rel_notes) |
-| 2023 Mar 30 | [4.3.0-1](pgd_4.3.0-1_rel_notes) | 4.3.0 | 2.2.2 | 1.1.0 | [23.9](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-239) |
-| 2023 Feb 14 | [4.3.0](pgd_4.3.0_rel_notes) | 4.3.0 | 2.2.1 | 1.1.0 | [23.9](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-239) |
-| 2022 Dec 14 | [4.2.2](pgd_4.2.2_rel_notes) | 4.2.2 | 2.2.1 | 1.1.0 | [23.9](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-239) |
-| 2022 Nov 16 | [4.2.1](pgd_4.2.1_rel_notes) | 4.2.1 | 2.2.1 | 1.1.0 | [23.7](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-237) |
-| 2022 Aug 22 | [4.2.0](pgd_4.2.0_rel_notes) | 4.2.0 | 2.2.0 | 1.1.0 | [23.5](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-235) |
-| 2022 June 21 | [4.1.1](pgd_4.1.1_rel_notes) | 4.1.1 | 2.1.1 | 1.0.0 | [23.2](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-232) |
-| 2022 May 17 | [4.1.0](pgd_4.1.0_rel_notes) | 4.1.0 | 2.1.0 | 1.0.0 | [23.1](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-231) |
-| 2022 Mar 31 | [4.0.3](pgd_4.0.3_rel_notes) | - | 2.0.3 | - | 22.10 |
-| 2022 Feb 24 | [4.0.2](pgd_4.0.2_rel_notes) | 4.0.2 | 2.0.2 | - | 22.9 |
-| 2022 Jan 31 | [4.0.1](pgd_4.0.1_rel_notes) | 4.0.1 | 2.0.1 | - | 22.6 |
-| 2021 Dec 01 | [4.0.0](pgd_4.0.0_rel_notes) | 4.0.0 | 2.0.0 | - | 21.9 |
+| 31 Aug 2023 | [4.3.2 ](pgd_4.3.2_rel_notes)| 4.3.2 | 2.3.1 | 1.1.1 | [23.20](/tpa/latest/rel_notes/tpa_23.20_rel_notes) |
+| 27 Jul 2023 | [4.3.1+p2 ](pgd_4.3.1+p2_rel_notes)| 4.3.1 | 2.3.1 | 1.1.1 | [23.19](/tpa/latest/rel_notes/tpa_23.19_rel_notes) |
+| 12 Jul 2023 | [4.3.1+p1 ](pgd_4.3.1+p1_rel_notes)| 4.3.1 | 2.3.0 | 1.1.1 | [23.19](/tpa/latest/rel_notes/tpa_23.19_rel_notes) |
+| 17 May 2023 | [4.3.1](pgd_4.3.1_rel_notes) | 4.3.1 | 2.2.3 | 1.1.1 | [23.17](/tpa/latest/rel_notes/tpa_23.17_rel_notes) |
+| 30 Mar 2023 | [4.3.0+p1](pgd_4.3.0+p1_rel_notes) | 4.3.0 | 2.2.2 | 1.1.0 | [23.9](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-239) |
+| 14 Feb 2023 | [4.3.0](pgd_4.3.0_rel_notes) | 4.3.0 | 2.2.1 | 1.1.0 | [23.9](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-239) |
+| 14 Dec 2022 | [4.2.2](pgd_4.2.2_rel_notes) | 4.2.2 | 2.2.1 | 1.1.0 | [23.9](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-239) |
+| 16 Nov 2022 | [4.2.1](pgd_4.2.1_rel_notes) | 4.2.1 | 2.2.1 | 1.1.0 | [23.7](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-237) |
+| 22 Aug 2022 | [4.2.0](pgd_4.2.0_rel_notes) | 4.2.0 | 2.2.0 | 1.1.0 | [23.5](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-235) |
+| 21 June 2022 | [4.1.1](pgd_4.1.1_rel_notes) | 4.1.1 | 2.1.1 | 1.0.0 | [23.2](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-232) |
+| 17 May 2022 | [4.1.0](pgd_4.1.0_rel_notes) | 4.1.0 | 2.1.0 | 1.0.0 | [23.1](/tpa/latest/rel_notes/tpa_23.1-11_rel_notes/#tpa-231) |
+| 31 Mar 2022 | [4.0.3](pgd_4.0.3_rel_notes) | - | 2.0.3 | - | 22.10 |
+| 24 Feb 2022 | [4.0.2](pgd_4.0.2_rel_notes) | 4.0.2 | 2.0.2 | - | 22.9 |
+| 31 Jan 2022 | [4.0.1](pgd_4.0.1_rel_notes) | 4.0.1 | 2.0.1 | - | 22.6 |
+| 01 Dec 2021 | [4.0.0](pgd_4.0.0_rel_notes) | 4.0.0 | 2.0.0 | - | 21.9 |
+!!! Note About version numbers
+PGD Version 4 takes its version number from the BDR version bundled with the release. Where HARP, TPA or the CLI have been updated without a change in BDR version, that new package is shown as +p1 or +p2 and so on appended to the version number.
+!!!
diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0-1_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0+p1_rel_notes.mdx
similarity index 68%
rename from product_docs/docs/pgd/4/rel_notes/pgd_4.3.0-1_rel_notes.mdx
rename to product_docs/docs/pgd/4/rel_notes/pgd_4.3.0+p1_rel_notes.mdx
index 188e5649f2f..0f0f4902fc5 100644
--- a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0-1_rel_notes.mdx
+++ b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0+p1_rel_notes.mdx
@@ -1,14 +1,15 @@
---
-title: "Release notes for EDB Postgres Distributed version 4.3.0-1"
-navTitle: "Version 4.3.0-1"
+title: "Release notes for EDB Postgres Distributed version 4.3.0+p1"
+navTitle: "Version 4.3.0+p1"
---
-EDB Postgres Distributed version 4.3.0-1 is a patch release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions.
+EDB Postgres Distributed version 4.3.0+p1 is a patch release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions.
It includes a patch to HARP 2.2.1 to address a security vulnerability. If you are using HARP 2.2.1 or earlier, we recommend that you upgrade to HARP 2.2.2.
!!! Note
- This version is required for EDB Postgres Advanced Server versions 12.14.18, 13.10.14, 14.7.0 and later.
+This version is required for EDB Postgres Advanced Server versions 12.14.18, 13.10.14, 14.7.0 and later.
+!!!
| Component | Version | Type | Description |
| --------- | ------- | --------------- | ------------------------------------------------------------------------------------------------------------------------|
diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0_rel_notes.mdx
index dcda6dfd9cc..78a8c2e004f 100644
--- a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0_rel_notes.mdx
+++ b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.0_rel_notes.mdx
@@ -8,7 +8,8 @@ redirects:
EDB Postgres Distributed version 4.3.0 is a patch release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions.
!!! Note
- This version is required for EDB Postgres Advanced Server versions 12.14.18, 13.10.14, 14.7.0 and later.
+This version is required for EDB Postgres Advanced Server versions 12.14.18, 13.10.14, 14.7.0 and later.
+!!!
| Component | Version | Type | Description |
| --------- | ------- | --------------- | ------------------------------------------------------------------------------------------------------------------------|
diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1-1_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1+p1_rel_notes.mdx
similarity index 70%
rename from product_docs/docs/pgd/4/rel_notes/pgd_4.3.1-1_rel_notes.mdx
rename to product_docs/docs/pgd/4/rel_notes/pgd_4.3.1+p1_rel_notes.mdx
index a24fd832aed..ee94f2bc591 100644
--- a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1-1_rel_notes.mdx
+++ b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1+p1_rel_notes.mdx
@@ -1,15 +1,15 @@
---
-title: "Release notes for EDB Postgres Distributed version 4.3.1-1"
-navTitle: "Version 4.3.1-1"
+title: "Release notes for EDB Postgres Distributed version 4.3.1+p1"
+navTitle: "Version 4.3.1+p1"
---
-EDB Postgres Distributed version 4.3.1-1 is a minor release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions.
+EDB Postgres Distributed version 4.3.1+p1 is a minor release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions.
If you are using any previous release of HARP, we recommend that you upgrade to HARP 2.3.0.
!!! Note
- This version is required for EDB Postgres Advanced Server versions 12.15, 13.11, 14.8, and later.
-
+This version is required for EDB Postgres Advanced Server versions 12.15, 13.11, 14.8, and later.
+!!!
Component | Version | Type | Description
--------- | ------- | --------------- | ------------------------------------------------------------------------------------------------------------------------
diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1+p2_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1+p2_rel_notes.mdx
new file mode 100644
index 00000000000..31d2fc1e375
--- /dev/null
+++ b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1+p2_rel_notes.mdx
@@ -0,0 +1,18 @@
+---
+title: "Release notes for EDB Postgres Distributed version 4.3.1+p2"
+navTitle: "Version 4.3.1+p2"
+---
+
+EDB Postgres Distributed version 4.3.1+p2 is a patch release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions.
+
+If you are using any previous release of HARP, we recommend that you upgrade to HARP 2.3.1.
+
+!!! Note
+This version is required for EDB Postgres Advanced Server versions 12.15, 13.11, 14.8, and later.
+!!!
+
+| Component | Version | Type | Description |
+| --------- | ------- | --------------- | ------------------------------------------------------------------------------------------------------------------------|
+| HARP | 2.3.1 | Bug fix | HARP Proxy cannot determine the leader after an extended outage - (BDR-3768). |
+| HARP | 2.3.1 | Bug fix | Upgrade database driver library version which fixes `connect_timeout` issue when `sslmode=allow` or `sslmode=prefer`. |
+
diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1-2_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1-2_rel_notes.mdx
deleted file mode 100644
index 987366fdb48..00000000000
--- a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1-2_rel_notes.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
----
-title: "Release notes for EDB Postgres Distributed version 4.3.1-2"
-navTitle: "Version 4.3.1-2"
----
-
-EDB Postgres Distributed version 4.3.1-2 is a patch release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions.
-
-If you are using any previous release of HARP, we recommend that you upgrade to HARP 2.3.1.
-
-!!! Note
- This version is required for EDB Postgres Advanced Server versions 12.15, 13.11, 14.8, and later.
-
-
- Component | Version | Type | Description
- --------- | ------- | --------------- | ------------------------------------------------------------------------------------------------------------------------
- HARP | 2.3.1 | Bug fix | HARP Proxy cannot determine the leader after an extended outage - (BDR-3768).
- HARP | 2.3.1 | Bug fix | Upgrade database driver library version which fixes `connect_timeout` issue when `sslmode=allow` or `sslmode=prefer`.
diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1_rel_notes.mdx
index 66501aeccda..e567b8b0981 100644
--- a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1_rel_notes.mdx
+++ b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.1_rel_notes.mdx
@@ -6,7 +6,8 @@ navTitle: "Version 4.3.1"
EDB Postgres Distributed version 4.3.1 is a minor release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions.
!!! Note
- This version is required for EDB Postgres Advanced Server versions 12.15, 13.11, 14.8, and later.
+This version is required for EDB Postgres Advanced Server versions 12.15, 13.11, 14.8, and later.
+!!!
| Component | Version | Type | Description |
| --------- | ------- | --------------- | ------------------------------------------------------------------------------------------------------------------------|
diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.2_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.2_rel_notes.mdx
new file mode 100644
index 00000000000..0de7b66df27
--- /dev/null
+++ b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.2_rel_notes.mdx
@@ -0,0 +1,27 @@
+---
+title: "Release notes for EDB Postgres Distributed version 4.3.2"
+navTitle: "Version 4.3.2"
+---
+
+Released: 31 Aug 2023
+
+Updated: 04 Sep 2023
+
+EDB Postgres Distributed version 4.3.2 is a minor release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions.
+
+!!! Note
+This version is required for EDB Postgres Advanced Server versions 12.15, 13.11, 14.8 and later.
+!!!
+
+| Component | Version | Type | Description |
+| --------- | ------- | --------------- | ------------------------------------------------------------------------------------------------------------------------|
+| BDR | 4.3.2 | Bug fix | Fixed a crash in conflict triggers during bulk inserts. |
+| BDR | 4.3.2 | Bug fix | Prevented the overflow in group commit transaction tracking on large clusters. |
+| BDR | 4.3.2 | Bug fix | Added a missing columns to the `bdr.sequence_alloc` catalog. |
+| BDR | 4.3.2 | Bug fix | Added a validatation in node connection strings in `bdr.create_node()`. (RT95453) |
+| BDR | 4.3.2 | Bug fix | Fixed a memory leak in group commit and camo caused by transaction tracking. |
+| BDR | 4.3.2 | Bug fix | Added check for conflicting node names. |
+| BDR | 4.3.2 | Bug fix | Fixed the consensus snapshot compatibility with PGD 3.7. (RT93022) |
+| BDR | 4.3.2 | Bug fix | When joining a node, don’t check foreign keys to match the behavior of replication. (RT91745) |
+| BDR | 4.3.2 | Bug fix | Improved consumption of replication queue on busy subscriptions. |
+| BDR | 4.3.2 | Bug fix | Fixed a deadlock on autopartition catalogs when a concurrent DROP EXTENSION is being executed.|
diff --git a/product_docs/docs/pgd/4/upgrades/upgrade_paths.mdx b/product_docs/docs/pgd/4/upgrades/upgrade_paths.mdx
index 740a02f67a3..5d682abe58e 100644
--- a/product_docs/docs/pgd/4/upgrades/upgrade_paths.mdx
+++ b/product_docs/docs/pgd/4/upgrades/upgrade_paths.mdx
@@ -4,31 +4,28 @@ title: Supported BDR upgrade paths
## Upgrading within version 4
-Beginning with version 4, EDB Postgres Distributed has adopted semantic versioning. All changes within the same major will be backward compatible lowering the risk when upgrading and allowing you to choose any later minor or patch release as the upgrade target.
+Beginning with version 4, EDB Postgres Distributed has adopted [semantic versioning](https://semver.org/). All changes within the same major version will be backward compatible, lowering the risk when upgrading and allowing you to choose any later minor or patch release as the upgrade target. You can upgrade from any version 4.x release to a later 4.x release.
-| 4.0.0 | 4.0.1 | 4.0.2 | 4.1.0 | 4.1.1 | Target BDR version |
-|-------|-------|-------|--------|------|--------------------|
-| ✓ | ✓ | ✓ | ✓ | ✓ | 4.2.0 |
-| ✓ | ✓ | ✓ | ✓ | | 4.1.1 |
-| ✓ | ✓ | ✓ | | | 4.1.0 |
-| ✓ | ✓ | | | | 4.0.2 |
-| ✓ | | | | | 4.0.1 |
## Upgrading from version 3.7 to version 4
-Currently it is recommended that you are using 3.7.15 or later before upgrading to 4. See [Upgrading within from 3.7](/pgd/3.7/bdr/upgrades/supported_paths/#upgrading-within-version-37) in the 3.7 documentation for more information. After upgrading to 3.7.15 or later the following combinations are allowed
-
-| 3.7.15 | 3.7.16 | 3.7.17 | Target BDR version |
-|--------|--------|--------|--------------------|
-| ✓ | ✓ | ✓ | 4.2.0 |
-| ✓ | ✓ | | 4.1.1 |
-| ✓ | ✓ | | 4.1.0 |
-| ✓ | | | 4.0.2 |
+Generally, we recommend you upgrade to the latest version 3.7 release, before
+upgrading to the latest version 4 release. You will have to be using 3.7.15 or later
+before upgrading to 4. See [Upgrading within from 3.7](/pgd/3.7/bdr/upgrades/supported_paths/#upgrading-within-version-37) in the
+3.7 documentation for more information. After upgrading to 3.7.15 or later, the
+following upgrade paths are possible.
+
+| From version | To version |
+| ---- | -- |
+| 3.7.15 | 4.0.2 or later |
+| 3.7.16 | 4.1.0 or later |
+| 3.7.17 | Must be upgraded to a later 3.7.x version before upgrading |
+| 3.7.18 | 4.2.1 or later |
+| 3.7.19 | 4.2.2 or later |
+| 3.7.20 | 4.3.0 or later |
+| 3.7.21 | 4.3.1 or later |
+| 3.7.22 | 4.3.2 or later |
## Upgrading from version 3.6 to version 4
-Currently there are no direct upgrade paths from 3.6 to 4. You must first upgrade your cluster to 3.7 before upgrading to 4. See [Upgrading from 3.6](/pgd/3.7/bdr/upgrades/supported_paths/#upgrading-from-version-36) in the 3.7 documentation for more information.
-
-
-
-
+Currently there are no direct upgrade paths from 3.6 to 4. You must first upgrade your cluster to 3.7 before upgrading to 4. See [Upgrading from 3.6](/pgd/3.7/bdr/upgrades/supported_paths/#upgrading-from-version-36) in the 3.7 documentation for more information.
\ No newline at end of file
diff --git a/product_docs/docs/pgd/5/appusage.mdx b/product_docs/docs/pgd/5/appusage.mdx
index 3cf4f83520d..7c54778a8cb 100644
--- a/product_docs/docs/pgd/5/appusage.mdx
+++ b/product_docs/docs/pgd/5/appusage.mdx
@@ -314,8 +314,9 @@ Being asynchronous by default, peer nodes might lag behind, making it
possible for a client connected to multiple PGD nodes or switching
between them to read stale data.
-A [queue wait function](/pgd/latest/reference/functions/#bdrwait_for_apply_queue) is
-provided for clients or proxies to prevent such stale reads.
+A [queue wait
+function](/pgd/latest/reference/functions/#bdrwait_for_apply_queue) is provided
+for clients or proxies to prevent such stale reads.
The synchronous replication features of Postgres are available to PGD
as well. In addition, PGD provides multiple variants for more synchronous
@@ -323,287 +324,6 @@ replication. See
[Durability and performance options](durability) for an overview and comparison of all variants available and
its different modes.
-## Application testing
-
-You can test PGD applications using the following programs,
-in addition to other techniques:
-
-- [Trusted Postgres Architect](#trusted-postgres-architect)
-- [pgbench with CAMO/Failover options](#pgbench-with-camofailover-options)
-- [isolationtester with multi-node access](#isolationtester-with-multi-node-access)
-
-### Trusted Postgres Architect
-
-[Trusted Postgres Architect](/tpa/latest) is the system used by EDB to
-deploy reference architectures, including those based on EDB Postgres Distributed.
-
-Trusted Postgres Architect includes test suites for each reference architecture.
-It also simplifies creating and managing a local collection of tests to run
-against a TPA cluster, using a syntax like the following:
-
-```
-tpaexec test mycluster mytest
-```
-
-We strongly recommend that developers write their own multi-node suite
-of Trusted Postgres Architect tests that verify the main expected properties
-of the application.
-
-### pgbench with CAMO/failover options
-
-In EDB Postgres Extended, the pgbench was extended to allow users to
-run failover tests while using CAMO or regular PGD deployments. The following options were added:
-
-```
--m, --mode=regular|camo|failover
-mode in which pgbench should run (default: regular)
-
---retry
-retry transactions on failover
-```
-
-In addition to these options, you must specify the connection information about the
-peer node for failover in [DSN
-form](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
-
-- Use `-m camo` or `-m failover` to specify the mode for pgbench.
- You can use The `-m failover` specification to test failover in
- regular PGD deployments.
-
-- Use `--retry` to specify whether to retry transactions when
- failover happens with `-m failover` mode. This option is enabled by default
- for `-m camo` mode.
-
-Here's an example in a CAMO environment:
-
-```sh
- pgbench -m camo -p $node1_port -h $node1_host bdrdemo \
- "host=$node2_host user=postgres port=$node2_port dbname=bdrdemo"
-```
-
-This command runs in CAMO mode. It connects to node1 and runs the tests. If the
-connection to node1 is lost, then pgbench connects to
-node2. It queries node2 to get the status of in-flight transactions.
-Aborted and in-flight transactions are retried in camo mode.
-
-In failover mode, if you specify `--retry`, then in-flight transactions are retried. In
-this scenario there's no way to find the status of in-flight transactions.
-
-### isolationtester with multi-node access
-
-`isolationtester` was extended to allow users to run tests on multiple
-sessions and on multiple nodes. This tool is used for internal PGD testing,
-although it's also available for use with user application testing.
-
-```
-$ isolationtester \
- --outputdir=./iso_output \
- --create-role=logical \
- --dbname=postgres \
- --server 'd1=dbname=node1' \
- --server 'd2=dbname=node2' \
- --server 'd3=dbname=node3'
-```
-
-Isolation tests are a set of tests for examining concurrent behaviors in
-PostgreSQL. These tests require running multiple interacting transactions,
-which requires managing multiple concurrent connections and therefore
-can't be tested using the normal `pg_regress` program. The name "isolation"
-comes from the fact that the original motivation was to test the
-serializable isolation level. Tests for other sorts of concurrent
-behaviors were added as well.
-
-It's built using PGXS as an external module.
-On installation, it creates the `isolationtester` binary file, which is run by
-`pg_isolation_regress` to perform concurrent regression tests and observe
-results.
-
-`pg_isolation_regress` is a tool similar to `pg_regress`, but instead of using
-psql to execute a test, it uses isolationtester. It accepts all the same
-command-line arguments as `pg_regress`. It was modified to accept multiple
-hosts as parameters. It then passes the host conninfo along with server names
-to the `isolationtester` binary. Isolation tester compares these server names with the
-names specified in each session in the spec files and runs given tests on
-respective servers.
-
-To define tests with overlapping transactions, use test specification
-files with a custom syntax. To add
-a new test, place a spec file in the `specs/` subdirectory, add the expected
-output in the `expected/` subdirectory, and add the test's name to the makefile.
-
-Isolationtester is a program that uses libpq to open multiple connections
-and executes a test specified by a spec file. A libpq connection string
-specifies the server and database to connect to. Defaults derived from
-environment variables are used otherwise.
-
-Specification consists of five parts, tested in this order:
-
-`server ""`
-
- This part defines the name of the servers for the sessions to run on.
- There can be zero or more server `""` specifications.
- The conninfo corresponding to the names is provided by the command to
- run `isolationtester`. This is described in `quickstart_isolationtest.md`.
- This part is optional.
-
-`setup { }`
-
- The given SQL block is executed once, in one session only, before running
- the test. Create any test tables or other required objects here. This
- part is optional. Multiple setup blocks are allowed if needed. Each is
- run separately, in the given order. The reason for allowing multiple
- setup blocks is that each block is run as a single PQexec submission,
- and some statements such as VACUUM can't be combined with others in such
- a block.
-
-`teardown { }`
-
- The teardown SQL block is executed once after the test is finished. Use
- this part to clean up in preparation for the next permutation, such as dropping
- any test tables created by setup. This part is optional.
-
-`session ""`
-
- There are normally several "session" parts in a spec file. Each
- session is executed in its own connection. A session part consists
- of three parts: setup, teardown, and one or more "steps." The per-session
- setup and teardown parts have the same syntax as the per-test setup and
- teardown, but they are executed in each session. The
- setup part typically contains a BEGIN command to begin a transaction.
-
- A session part also consists of `connect_to` specification,
- which points to a server name specified in the beginning that
- indicates the server on which this session runs.
-
- `connect_to ""`
-
- Each step has the syntax:
-
- `step "" { }`
-
- Where `` is a name identifying this step, and SQL is a SQL statement
- (or statements, separated by semicolons) that's executed in the step.
- Step names must be unique across the whole spec file.
-
-`permutation ""`
-
- A permutation line specifies a list of steps that are run in that order.
- Any number of permutation lines can appear. If no permutation lines are
- given, the test program automatically generates all possible orderings
- of the steps from each session (running the steps of any one session in
- order). The list of steps in a manually specified
- "permutation" line doesn't actually have to be a permutation of the
- available steps. It can, for instance, repeat some steps more than once
- or leave others out.
-
-Lines beginning with # are comments.
-
-For each permutation of the session steps (whether these are manually
-specified in the spec file or automatically generated), the isolation
-tester runs:
-
-1. The main setup part
-1. Per-session setup parts
-1. The selected session steps
-1. Per-session teardown
-1. The main teardown script
-
-Each selected step is sent to the connection associated
-with its session.
-
-To run isolation tests in a PGD environment that ran all prerequisite make
-commands:
-
-1. Run `make isolationcheck-install` to install the isolationtester submodule.
-
-2. You can run isolation regression tests using either
- of the following commands from the bdr-private repo:
-
- `make isolationcheck-installcheck`
- `make isolationcheck-makecheck`
-
-To run `isolationcheck-installcheck`, you need to have two or more postgresql
-servers running. Pass the conninfo of each server to `pg_isolation_regress`
-in the PGD makefile.
- Ex: `pg_isolation_regress --server 'd1=host=myhost dbname=mydb port=5434'
- --server 'd2=host=myhost1 dbname=mydb port=5432'`
-
-Next, add a `.spec` file containing tests in the `specs/isolation` directory
-of the `bdr-private/` repo. Add a `.out` file in the `expected/isolation` directory of
-the `bdr-private/` repo.
-
-Then run:
-
- `make isolationcheck-installcheck`
-
-`Isolationcheck-makecheck` currently supports running isolation tests on a
-single instance by setting up PGD between multiple databases.
-
-You need to pass appropriate database names and the conninfo of bdr instances
-to `pg_isolation_regress` in the PGD makefile as follows:
- `pg_isolation_regress --dbname=db1,db2 --server 'd1=dbname=db1'
- --server 'd2=dbname=db2'`
-
-Then run:
-
- `make isolationcheck-makecheck`
-
-Each step can contain commands that block until further action is taken.
-Most likely, some other session runs a step that unblocks it or causes a
-deadlock. A test that uses this ability must manually specify valid
-permutations, that is, those that don't expect a blocked session to execute a
-command. If a test doesn't follow that rule, `isolationtester` cancels it
-after 300 seconds. If the cancel doesn't work, `isolationtester` exits
-uncleanly after 375 seconds of wait time. Avoid testing invalid
-permutations because they can make the isolation tests take
-a very long time to run, and they serve no useful testing purpose.
-
-`isolationtester` recognizes that a command has blocked by checking whether it's shown as waiting in the `pg_locks` view. Therefore, only
-blocks on heavyweight locks are detected.
-
-## Performance testing and tuning
-
-PGD allows you to issue write transactions onto multiple master nodes.
-Bringing those writes back together onto each node has a cost in
-performance.
-
-First, replaying changes from another node has a CPU cost, an I/O cost,
-and it generates WAL records. The resource use is usually less
-than in the original transaction since CPU overheads are lower as a result
-of not needing to reexecute SQL. In the case of UPDATE and DELETE
-transactions, there might be I/O costs on replay if data isn't cached.
-
-Second, replaying changes holds table-level and row-level locks that can
-produce contention against local workloads. The conflict-free replicated data types (CRDT) and column-level conflict detection (CLCD) features
-ensure you get the correct answers even for concurrent updates, but they
-don't remove the normal locking overheads. If you get locking contention,
-try to avoid conflicting updates, or keep transactions as short as
-possible. A heavily updated row in a larger transaction causes
-a bottleneck on performance for that transaction. Complex applications
-require some thought to maintain scalability.
-
-If you think you're having performance problems,
-develop performance tests using the benchmarking tools. pgbench
-allows you to write custom test scripts specific to your use case
-so you can understand the overheads of your SQL and measure the impact
-of concurrent execution.
-
-If PGD is running slow, then we suggest the following:
-
-1. Write a custom test script for pgbench, as close as you can make it
- to the production system's problem case.
-2. Run the script on one node to give you a baseline figure.
-3. Run the script on as many nodes as occurs in production, using the
- same number of sessions in total as you did on one node. This technique
- shows you the effect of moving to multiple nodes.
-4. Increase the number of sessions for these two tests so you can
- plot the effect of increased contention on your application.
-5. Make sure your tests are long enough to account for replication delays.
-6. Ensure that replication delay isn't growing during your tests.
-
-Use all of the normal Postgres tuning features to improve the speed
-of critical parts of your application.
-
## Use of table access methods (TAMs) in PGD
PGD 5.0 supports two table access methods released with EDB Postgres 15.0.
@@ -627,8 +347,3 @@ After you create the extension, you can use TAM to create a table using
This replicates to all the PGD nodes.
For more information on these table access methods, see [`CREATE TABLE`](/epas/latest/epas_compat_sql/36_create_table/).
-
-
-
-
-
diff --git a/product_docs/docs/pgd/5/architectures.mdx b/product_docs/docs/pgd/5/architectures.mdx
index aee575d6b11..210ac0bcf2f 100644
--- a/product_docs/docs/pgd/5/architectures.mdx
+++ b/product_docs/docs/pgd/5/architectures.mdx
@@ -94,12 +94,14 @@ they aren't part of the standard Always On architectures.
* Can be 3 data nodes (recommended)
* Can be 2 data nodes and 1 witness that doesn't hold data (not depicted)
* A PGD Proxy for each data node with affinity to the applications
- * Can be colocated with data node
+ * Can be colocated with data node (recommended)
+ * Can be located on a separate node
+ * Configuration and infrastructure symmetry of data nodes is expected to ensure proper resources are available to handle application workload when rerouted
* Barman for backup and recovery (not depicted)
* Offsite is optional but recommended
- * Can be shared by multiple clusters
+ * Can be shared by multiple PGD clusters
* Postgres Enterprise Manager (PEM) for monitoring (not depicted)
- * Can be shared by multiple clusters
+ * Can be shared by multiple PGD clusters
### Always On multi-location
@@ -112,14 +114,17 @@ they aren't part of the standard Always On architectures.
* Can be 3 data nodes (recommended)
* Can be 2 data nodes and 1 witness which does not hold data (not depicted)
* A PGD-Proxy for each data node with affinity to the applications
- * can be co-located with data node
+ * can be co-located with data node (recommended)
+ * can be located on a separate node
+ * Configuration and infrastructure symmetry of data nodes and locations is expected to ensure proper resources are available to handle application workload when rerouted
* Barman for backup and recovery (not depicted).
- * Can be shared by multiple clusters
+ * Can be shared by multiple PGD clusters
* Postgres Enterprise Manager (PEM) for monitoring (not depicted).
- * Can be shared by multiple clusters
+ * Can be shared by multiple PGD clusters
* An optional witness node must be placed in a third region to increase tolerance for location failure.
* Otherwise, when a location fails, actions requiring global consensus are blocked, such as adding new nodes and distributed DDL.
+
## Choosing your architecture
All architectures provide the following:
diff --git a/product_docs/docs/pgd/5/cli/configuring_cli.mdx b/product_docs/docs/pgd/5/cli/configuring_cli.mdx
index 51bf554338e..8a3ba373087 100644
--- a/product_docs/docs/pgd/5/cli/configuring_cli.mdx
+++ b/product_docs/docs/pgd/5/cli/configuring_cli.mdx
@@ -3,25 +3,25 @@ title: "Configuring PGD CLI"
navTitle: "Configuring PGD CLI"
---
-PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security) - or equivalent (for example, edb_admin on BigAnimal distributed high-availability) to use PGD CLI.
+PGD CLI can be installed on any system that can connect to the PGD cluster. To use PGD CLI, you need a user with PGD superuser privileges or equivalent. The PGD user with superuser privileges is the [bdr_superuser role](../security). An example of an equivalent user is edb_admin on a BigAnimal distributed high-availability cluster.
## PGD CLI and database connection strings
-You may not need a database connection string. For example, when Trusted Postgres Architect installs the PGD CLI on a system, it also configures the connection to the PGD cluster. This means that PGD CLI will automatically connect when run.
+You might not need a database connection string. For example, when Trusted Postgres Architect installs the PGD CLI on a system, it also configures the connection to the PGD cluster, which means that the PGD CLI can connect to the cluster when run.
-If you are installing PGD CLI manually, you must give PGD CLI a database connection string so it knows which PGD cluster to connect to.
+If you're installing PGD CLI manually, you must give PGD CLI a database connection string so it knows which PGD cluster to connect to.
!!! Important Setting passwords
-PGD CLI does not interactively prompt for your user's password. You must pass your password using one of the following methods:
+PGD CLI doesn't interactively prompt for your password. You must pass your password using one of the following methods:
- 1. Adding an entry to your [`.pgpass` password file](https://www.postgresql.org/docs/current/libpq-pgpass.html) which includes the host, port, database name, user name, and password.
- 1. Setting the password in the `PGPASSWORD` environment variable.
- 1. Including the password in the connection string.
+ - Adding an entry to your [`.pgpass` password file](https://www.postgresql.org/docs/current/libpq-pgpass.html), which includes the host, port, database name, user name, and password.
+ - Setting the password in the `PGPASSWORD` environment variable.
+ - Including the password in the connection string.
-We recommend the first option, as the other options don't scale well with multiple databases or compromise password confidentiality.
+We recommend the first option, as the other options don't scale well with multiple databases, or they compromise password confidentiality.
!!!
-If you don't know the database connection strings for your PGD-powered deployment, see [discovering connection strings](discover_connections). It is a guide to finding the right connection strings for your cluster.
+If you don't know the database connection strings for your PGD-powered deployment, see [discovering connection strings](discover_connections), which helps you to find the right connection strings for your cluster.
Once you have that information, you can continue.
@@ -29,7 +29,6 @@ Once you have that information, you can continue.
PGD CLI takes its database connection information from either the PGD CLI configuration file or the command line.
-
### Using database connection strings in the command line
You can pass the connection string directly to `pgd` using the `--dsn` option. For details, see the [sample use case](using_cli/#passing-a-database-connection-string). For example:
@@ -58,5 +57,4 @@ By default, `pgd-cli-config.yml` is located in the `/etc/edb/pgd-cli` directory.
1. `/etc/edb/pgd-cli` (default)
2. `$HOME/.edb/pgd-cli`
-If your configuration file is not in either of these directories, you can use the optional `-f` or `--config-file` flag on a `pgd` command to set which file should be read as configuration. See the [sample use case](using_cli/#passing-a-database-connection-string).
-
+If your configuration file isn't in either of these directories, you can use the optional `-f` or `--config-file` flag on a `pgd` command to set the file to read as configuration. See the [sample use case](using_cli/#passing-a-database-connection-string).
diff --git a/product_docs/docs/pgd/5/cli/discover_connections.mdx b/product_docs/docs/pgd/5/cli/discover_connections.mdx
index 61bde0b5ed4..5a24b8a63e9 100644
--- a/product_docs/docs/pgd/5/cli/discover_connections.mdx
+++ b/product_docs/docs/pgd/5/cli/discover_connections.mdx
@@ -1,23 +1,27 @@
---
-title: "Discovering Connection Strings"
-navTitle: "Discovering Connection Strings"
+title: "Discovering connection strings"
+navTitle: "Discovering connection strings"
indexdepth: 2
deepToC: true
---
-PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security) - or equivalent (e.g. edb_admin on BigAnimal distributed high-availability) to use PGD CLI.
+You can install PGD CLI on any system that can connect to the PGD cluster. To use PGD CLI, you need a user with PGD superuser privileges or equivalent. The PGD user with superuser privileges is the [bdr_superuser role](../security). An example of an equivalent user is `edb_admin` on an EDB BigAnimal Distributed High Availability cluster.
## PGD CLI and database connection strings
-You may not need a database connection string. For example, when Trusted Postgres Architect installs the PGD CLI on a system, it also configures the connection to the PGD cluster. This means that PGD CLI will automatically connect when run.
+You might not need a database connection string. For example, when Trusted Postgres Architect installs the PGD CLI on a system, it also configures the connection to the PGD cluster. This means that PGD CLI can connect to the cluster when run.
## Getting your database connection string
-Every deployment method has a different way of deriving a connection string for it. This is because of the range of different configurations that PGD supports. Generally, you can obtain the required information from the configuration of your deployment; this section provides a guide of how to assemble that information into connection strings.
+Because of the range of different configurations that PGD supports, every deployment method has a different way of deriving a connection string for it. Generally, you can obtain the required information from the configuration of your deployment. You can then assemble that information into connection strings.
### For a TPA-deployed PGD cluster
-Because TPA is so flexible, you will have to derive your connection string from your cluster configuration file (config.yml). You will need the name or IP address of a host with the role pgd-proxy listed for it. This host will have a proxy you can connect to. Usually the proxy will be listening on port 6432 (check the setting for `default_pgd_proxy_options` and `listen_port` in the config to confirm). The default database name is `bdrdb` (check the setting `bdr_database` in the config to confirm) and the default PGD superuser will be `enterprisedb` for EPAS and `postgres` for Postgres and Postgres Extended.
+Because TPA is so flexible, you have to derive your connection string from your cluster configuration file (`config.yml`).
+
+- You need the name or IP address of a host with the role pgd-proxy listed for it. This host has a proxy you can connect to. Usually the proxy listens on port 6432. (Check the setting for `default_pgd_proxy_options` and `listen_port` in the config to confirm.)
+- The default database name is `bdrdb`. (Check the setting `bdr_database` in the config to confirm.)
+- The default PGD superuser is `enterprisedb` for EDB Postgres Advanced Server and `postgres` for Postgres and Postgres Extended.
You can then assemble a connection string based on that information:
@@ -25,7 +29,7 @@ You can then assemble a connection string based on that information:
"host= port= dbname= user= sslmode=require"
```
-To illustrate this, here's some excerpts of a config.yml file for a cluster:
+To illustrate this, here are some excerpts of a `config.yml` file for a cluster:
```yaml
...
@@ -51,26 +55,35 @@ instances:
...
```
-The connection string for this cluster would be:
+The connection string for this cluster is:
```
"host=192.168.100.2 port=6432 dbname=bdrdb user=enterprisedb sslmode=require"
```
!!! Note Host name versus IP address
-In our example, we use the IP address because the configuration is from a Docker TPA install with no name resolution available. Generally, you should be able to use the host name as configured.
+The example uses the IP address because the configuration is from a Docker TPA install with no name resolution available. Generally, you can use the host name as configured.
!!!
-### For a BigAnimal distributed high-availability cluster
+### For an EDB BigAnimal Distributed High Availability cluster
-1. Log into the [BigAnimal Clusters](https://portal.biganimal.com/clusters) view.
+1. Log in to the [BigAnimal clusters](https://portal.biganimal.com/clusters) view.
+1. In the filter, set the Cluster Type to "Distributed High Availability" to only show clusters which work with PGD CLI.
1. Select your cluster.
-1. In the view of your cluster, select the Connect tab.
-1. Copy the Read/Write URI from the connection info. This is your connection string.
+1. In the view of your cluster, select the **Connect** tab.
+1. Copy the read/write URI from the connection info. This is your connection string.
+
+### For a cluster deployed with EDB PGD for Kubernetes
+
+As with TPA, EDB PGD for Kubernetes is very flexible, and there are multiple ways to obtain a connection string. It depends, in large part, on how the [services](/postgres_distributed_for_kubernetes/latest/connectivity/#services) were configured for the deployment:
+
+- If you use the Node Service Template, direct connectivity to each node and proxy service is available.
+- If you use the Group Service Template, there's a gateway service to each group.
+- If you use the Proxy Service Template, a single proxy provides an entry point to the cluster for all applications.
-### For an EDB PGD for Kubernetes deployed cluster
+Consult your configuration file to determine this information.
-As with TPA, EDB PGD for Kubernetes is very flexible and there is no one way to obtain a connection string. It depends, in large part, on how the [Services](https://www.enterprisedb.com/docs/postgres_distributed_for_kubernetes/latest/connectivity/#services) have been configured for the deployment. If the Node Service Template is used, there should be direct connectivity to each node and proxy service available. If the Group Service Template, there will be a gateway service to each group. Finally, if the Proxy Service Template has been used, there should be a single proxy providing an entry point to the cluster for all applications. Consult your configuration file to determine this information. You should be able to establish a host name or IP address, port, database name (default: `bdrdb`) and username (`enterprisedb` for EPAS and `postgres` for Postgres and Postgres Extended.).
+Establish a host name or IP address, port, database name, and username. The default database name is `bdrdb`, and the default username is `enterprisedb` for EDB Postgres Advanced Server and `postgres` for Postgres and Postgres Extended.).
You can then assemble a connection string based on that information:
@@ -78,6 +91,6 @@ You can then assemble a connection string based on that information:
"host= port= dbname= user="
```
-You may need to add `sslmode=` if the deployment's configuration requires it.
+If the deployment's configuration requires it, add `sslmode=`.
diff --git a/product_docs/docs/pgd/5/cli/index.mdx b/product_docs/docs/pgd/5/cli/index.mdx
index 443ec13d74e..1b87d879919 100644
--- a/product_docs/docs/pgd/5/cli/index.mdx
+++ b/product_docs/docs/pgd/5/cli/index.mdx
@@ -13,9 +13,9 @@ directoryDefaults:
description: "The PGD Command Line Interface (CLI) is a tool to manage your EDB Postgres Distributed cluster"
---
-The EDB Postgres Distributed Command Line Interface (PGD CLI) is a tool for managing your EDB Postgres Distributed cluster. It allows you to run commands against EDB Postgres Distributed clusters. It may be installed automatically on systems within a TPA-deployed PGD cluster or it can be installed manually on systems that can connect to any PGD cluster, including BigAnimal Distributed High Availability PGD clusters or PGD clusters deployed using the EDB PGD for Kubernetes operator.
+The EDB Postgres Distributed Command Line Interface (PGD CLI) is a tool for managing your EDB Postgres Distributed cluster. It allows you to run commands against EDB Postgres Distributed clusters. It is installed automatically on systems in a TPA-deployed PGD cluster. Or it can be installed manually on systems that can connect to any PGD cluster, such as EDB BigAnimal Distributed High Availability clusters or PGD clusters deployed using the EDB PGD for Kubernetes operator.
-See [Installing PGD CLI](installing_cli) for information about how to install PGD CLI, both automatically with Trusted Postgres Architect and manually.
+See [Installing PGD CLI](installing_cli) for information about how to manually install PGD CLI on systems.
See [Using PGD CLI](using_cli) for an introduction to using the PGD CLI and connecting to your PGD cluster.
@@ -23,5 +23,5 @@ See [Configuring PGD CLI](configuring_cli) for details on creating persistent co
See the [Command reference](command_ref) for the available commands to inspect, manage, and get information about cluster resources.
-There is also a guide to [discovering connection strings](discover_connections). It shows how to obtain the correct connection strings for your PGD-powered deployment.
+See [Discovering connection strings](discover_connections) to learn how to obtain the correct connection strings for your PGD-powered deployment.
diff --git a/product_docs/docs/pgd/5/cli/installing_cli.mdx b/product_docs/docs/pgd/5/cli/installing_cli.mdx
index 2ec709cf331..c72e436f58b 100644
--- a/product_docs/docs/pgd/5/cli/installing_cli.mdx
+++ b/product_docs/docs/pgd/5/cli/installing_cli.mdx
@@ -3,14 +3,15 @@ title: "Installing PGD CLI"
navTitle: "Installing PGD CLI"
---
-PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security) - or equivalent (e.g. edb_admin on BigAnimal distributed high-availability) to use PGD CLI.
+You can install PGD CLI on any system that can connect to the PGD cluster. To use PGD CLI, you need a user with PGD superuser privileges or equivalent. The PGD user with superuser privileges is the [bdr_superuser role](../security). An example of an equivalent user is edb_admin on a BigAnimal distributed high-availability cluster.
## Installing automatically with Trusted Postgres Architect (TPA)
+
By default, Trusted Postgres Architect installs and configures PGD CLI on each PGD node. If you want to install PGD CLI on any non-PGD instance in the cluster, attach the pgdcli role to that instance in Trusted Postgres Architect's configuration file before deploying. See [Trusted Postgres Architect](/tpa/latest/) for more information.
## Installing manually on Linux
-PGD CLI is installable from the EDB Repositories. These repositories require a token to enable downloads from them. You will need to login to [EDB Repos 2.0](https://www.enterprisedb.com/repos-downloads) to obtain your token. Then execute the following command, substituting
+PGD CLI is installable from the EDB repositories. These repositories require a token to enable downloads from them. Log in to [EDB Repos 2.0](https://www.enterprisedb.com/repos-downloads) to obtain your token. Then execute the command shown for your operating system, substituting
your token for ``.
### Add repository and install PGD CLI on Debian or Ubuntu
@@ -20,7 +21,7 @@ curl -1sLf 'https://downloads.enterprisedb.com//postgres_distributed
sudo apt-get install edb-pgd5-cli
```
-### Add repository and install PGD CLI on RHEL, Rocky, AlmaLinux or Oracle Linux
+### Add repository and install PGD CLI on RHEL, Rocky, AlmaLinux, or Oracle Linux
```bash
curl -1sLf 'https://downloads.enterprisedb.com//postgres_distributed/setup.rpm.sh' | sudo -E bash
@@ -28,5 +29,3 @@ sudo yum install edb-pgd5-cli
```
[Next: Using PGD CLI](using_cli)
-
-
diff --git a/product_docs/docs/pgd/5/cli/using_cli.mdx b/product_docs/docs/pgd/5/cli/using_cli.mdx
index 3ea54d77ce3..ee992a9c828 100644
--- a/product_docs/docs/pgd/5/cli/using_cli.mdx
+++ b/product_docs/docs/pgd/5/cli/using_cli.mdx
@@ -3,27 +3,27 @@ title: "Using PGD CLI"
navTitle: "Using PGD CLI"
---
-## What is the PGD CLI
+## What is the PGD CLI?
-The PGD CLI is a convenient way to connect to and manage your PGD cluster. You will need the credentials of a Postgres users with PGD superuser privileges - the [bdr_superuser role](../security) - or equivalent (e.g. edb_admin on BigAnimal distributed high availability) to use it.
+The PGD CLI is a convenient way to connect to and manage your PGD cluster. To use it, you need a user with PGD superuser privileges or equivalent. The PGD user with superuser privileges is the [bdr_superuser role](../security). An example of an equivalent user is edb_admin on a BigAnimal distributed high-availability cluster.
!!! Important Setting passwords
-PGD CLI does not interactively prompt for your user's password. You must pass your password using one of the following methods:
+PGD CLI doesn't interactively prompt for your password. You must pass your password using one of the following methods:
- 1. Adding an entry to your [`.pgpass` password file](https://www.postgresql.org/docs/current/libpq-pgpass.html) which includes the host, port, database name, user name, and password.
- 1. Setting the password in the `PGPASSWORD` environment variable.
- 1. Including the password in the connection string.
+ - Adding an entry to your [`.pgpass` password file](https://www.postgresql.org/docs/current/libpq-pgpass.html), which includes the host, port, database name, user name, and password.
+ - Setting the password in the `PGPASSWORD` environment variable.
+ - Including the password in the connection string.
-We recommend the first option, as the other options don't scale well with multiple databases or compromise password confidentiality.
+We recommend the first option, as the other options don't scale well with multiple databases, or they compromise password confidentiality.
!!!
## Running the PGD CLI
-Once you have [installed pgd-cli](installing_cli), run the `pgd` command to access the PGD command line interface. The `pgd` command will need details of which host, port, and database to connect to, along with your username and password.
+Once you have [installed pgd-cli](installing_cli), run the `pgd` command to access the PGD command line interface. The `pgd` command needs details about the host, port, and database to connect to, along with your username and password.
## Passing a database connection string
-Use the `--dsn` flag to pass a database connection string to the `pgd` command. You don't need a configuration file when you pass the connection string with the `--dsn` flag. The flag takes precedence even if a configuration file is present. For example:
+Use the `--dsn` flag to pass a database connection string to the `pgd` command. When you pass the connection string with the `--dsn` flag, you don't need a configuration file. The flag takes precedence even if a configuration file is present. For example:
```sh
pgd show-nodes --dsn "host=bdr-a1 port=5432 dbname=bdrdb user=enterprisedb"
@@ -33,7 +33,7 @@ See [pgd](command_ref) in the command reference for a description of the command
## Specifying a configuration file
-If a `pgd-cli-config.yml` file is in `/etc/edb/pgd-cli` or `$HOME/.edb/pgd-cli`, `pgd` will automatically use it. You can override
+If a `pgd-cli-config.yml` file is in `/etc/edb/pgd-cli` or `$HOME/.edb/pgd-cli`, `pgd` uses it. You can override
this behavior using the optional `-f` or `--config-file` flag. For example:
```sh
@@ -75,13 +75,13 @@ pgd show-nodes -o json
]
```
-The PGD CLI supports the following output formats:
+The PGD CLI supports the following output formats.
| Setting | Format | Considerations |
| ------- | ------ | --------- |
| none | Tabular | Default format. This setting presents the data in tabular form.|
| `json` | JSON | Presents the raw data with no formatting. For some commands, the JSON output might show more data than the tabular output, such as extra fields and more detailed messages. |
-| `yaml` | YAML |Similar to the JSON output, but as YAML and with the fields ordered alphabetically. Experimental and may not be fully supported in future versions. |
+| `yaml` | YAML | Similar to the JSON output but as YAML and with the fields ordered alphabetically. Experimental and might not be fully supported in future versions. |
## Accessing the command line help
diff --git a/product_docs/docs/pgd/5/deployments.mdx b/product_docs/docs/pgd/5/deployments.mdx
index f57916d3e1b..ca16d43536a 100644
--- a/product_docs/docs/pgd/5/deployments.mdx
+++ b/product_docs/docs/pgd/5/deployments.mdx
@@ -6,11 +6,23 @@ indexCards: simple
You can deploy and install EDB Postgres Distributed products using the following methods:
-- [Trusted Postgres Architect](/tpa/latest) (TPA) is an orchestration tool that uses Ansible to build Postgres clusters using a set of reference architectures that document how to set up and operate Postgres in various scenarios. TPA represents the best practices followed by EDB, and its recommendations apply to quick testbed setups just as they do to production environments. See [Deploying with TPA](tpa) for more information.
+- [Trusted Postgres Architect](/tpa/latest) (TPA) is an orchestration tool that uses Ansible to build Postgres clusters using a set of reference architectures that document how to set up and operate Postgres in various scenarios. TPA represents the best practices followed by EDB, and its recommendations apply to quick testbed setups just as they do to production environments. TPA's flexibility allows deployments to virtual machines, AWS cloud instances or Linux host hardware. See [Deploying with TPA](tpa) for more information.
-- BigAnimal is a fully managed database-as-a-service with built-in Oracle compatibility that runs in your cloud account where it's operated by the Postgres experts. BigAnimal makes it easy to set up, manage, and scale your databases. The addition of extreme-high-availability support through EDB Postgres Distributed (PGD) allows single-region Always On Gold clusters. These clusters are two PGD groups in different availability zones in a single cloud region, with a witness node in a third availability zone. See [Extreme high availability](/biganimal/latest/overview/02_high_availability/#extreme-high-availability-beta) in the [BigAnimal documentation](/biganimal/latest) for more information.
+- BigAnimal is a fully managed database-as-a-service with built-in Oracle compatibility that runs in your cloud account or BigAnimal's cloud account where it's operated by our Postgres experts. EDB BigAnimal makes it easy to set up, manage, and scale your databases. The addition of distributed high-availability support powered by EDB Postgres Distributed (PGD) enables single- and and multi-region Always On Gold clusters. See [Distributed high availability](/biganimal/latest/overview/02_high_availability/#distributed-high-availability) in the [BigAnimal documentation](/biganimal/latest) for more information.
-Coming soon:
+- EDB Postgres Distributed for Kubernetes is a Kubernetes operator designed, developed, and supported by EDB. It covers the full lifecycle of highly available Postgres database clusters with a multi-master architecture, using PGD replication. It's based on the open source CloudNativePG operator and provides additional value, such as compatibility with Oracle using EDB Postgres Advanced Server, Transparent Data Encryption (TDE) using EDB Postgres Extended or Advanced Server, and additional supported platforms including IBM Power and OpenShift. This offering is currently in preview.
-- EDB Postgres Distributed for Kubernetes will be a Kubernetes operator designed, developed, and supported by EDB. It will cover the full lifecycle of highly available Postgres database clusters with a multi-master architecture, using PGD replication. It's based on the open source CloudNativePG operator and provides additional value, such as compatibility with Oracle using EDB Postgres Advanced Server and additional supported platforms including IBM Power and OpenShift.
+| |
TPA
|
BigAnimal
|
Kubernetes (preview)
|
+|--------------------------------------------|:------------------------:|:------------------------------------------:|:-----------------------------------------:|
+| Single region | ✅ | ✅ | ✅ |
+| Active-Active support | 2+ regions | 2 regions | 2 regions |
+| Write/Read routing | Local or global | Local | Local |
+| Automated failover | AZ or Region | AZ | AZ |
+| Major version upgrades | ✅ | Roadmap | Roadmap |
+| Subscriber-only nodes (read replicas) | ✅ | TBD | TBD |
+| Logical standby nodes | ✅ | TBD | TBD |
+| PgBouncer | ✅ | TBD | TBD |
+| Selective data replication | ✅ | ✅ | ✅ |
+| Maintenance windows per region | ✅ | ✅ | ✅ |
+| Target availability | 99.999% SLO | 99.99 SLA (single) 99.995% SLA (multi) | 99.999% SLO |
diff --git a/product_docs/docs/pgd/5/index.mdx b/product_docs/docs/pgd/5/index.mdx
index 1285499d76e..ff42aeb253c 100644
--- a/product_docs/docs/pgd/5/index.mdx
+++ b/product_docs/docs/pgd/5/index.mdx
@@ -36,6 +36,7 @@ navigation:
- monitoring
- cli
- transaction-streaming
+ - testingandtuning
- striggers
- scaling
- twophase
diff --git a/product_docs/docs/pgd/5/known_issues.mdx b/product_docs/docs/pgd/5/known_issues.mdx
index 072f22d8c02..a6314e90a4e 100644
--- a/product_docs/docs/pgd/5/known_issues.mdx
+++ b/product_docs/docs/pgd/5/known_issues.mdx
@@ -44,8 +44,6 @@ release.
- Transactions using Eager Replication can't yet execute DDL. The TRUNCATE command is allowed.
-- Not all DDL can run when either CAMO or Group Commit is used.
-
- Parallel apply isn't currently supported in combination with Group Commit. Make sure to disable it when using Group Commit by either:
- Setting `num_writers` to 1 for the node group using [`bdr.alter_node_group_config`](/pgd/latest/reference/nodes-management-interfaces/#bdralter_node_group_config).
- Using the GUC `bdr.writers_per_subscription`. See [Configuration of generic replication](/pgd/latest/reference/pgd-settings/#generic-replication).
diff --git a/product_docs/docs/pgd/5/limitations.mdx b/product_docs/docs/pgd/5/limitations.mdx
index 286c2829b5b..2ea6a437a25 100644
--- a/product_docs/docs/pgd/5/limitations.mdx
+++ b/product_docs/docs/pgd/5/limitations.mdx
@@ -108,6 +108,31 @@ Be sure to disable transaction streaming when planning to use
CAMO. You can configure this option globally or in the PGD node group. See
[Transaction streaming configuration](../transaction-streaming#configuration).
+- Not all DDL can run when you use CAMO. If you use unsupported DDL, a warning is logged and the transactions commit scope is set to local only. The only supported DDL operations are:
+ - non-concurrent `CREATE INDEX`
+ - non-concurrent `DROP INDEX`
+ - non-concurrent `REINDEX` of an individual table or index
+ - `CLUSTER` (of a single relation or index only)
+ - `ANALYZE`
+ - `TRUNCATE`
+
+## Group Commit
+
+[Group Commit](durability/group-commit) is a feature which enables configurable synchronous commits over
+nodes in a group. If you use this feature, take the following limitations into account:
+
+- Not all DDL can run when you use Group Commit. If you use unsupported DDL, a warning is logged and the transactions commit scope is set to local. The only supported DDL operations are:
+ - non-concurrent `CREATE INDEX`
+ - non-concurrent `DROP INDEX`
+ - non-concurrent `REINDEX` of an individual table or index
+ - `CLUSTER` (of a single relation or index only)
+ - `ANALYZE`
+ - `TRUNCATE`
+
+## Eager
+
+[Eager](consistency/eager) is a feature which is available in Group Commit which enables conflicts to be avoided by eagerly aborting transactions that may clash. It is subject to the same limitations as Group Commit.
+
## Other limitations
This noncomprehensive list includes other limitations that are expected and
diff --git a/product_docs/docs/pgd/5/monitoring/otel.mdx b/product_docs/docs/pgd/5/monitoring/otel.mdx
index ca7df3a644d..c33fb9ec965 100644
--- a/product_docs/docs/pgd/5/monitoring/otel.mdx
+++ b/product_docs/docs/pgd/5/monitoring/otel.mdx
@@ -3,7 +3,7 @@ title: OpenTelemetry integration
---
You can configure EDB Postgres Distributed to report monitoring information
-as well as traces to the OpenTelemetry collector.
+as well as traces to the [OpenTelemetry](https://opentelemetry.io/) collector.
EDB Postgres Distributed OTEL collector fills several resource attributes.
These are attached to all metrics and traces:
diff --git a/product_docs/docs/pgd/5/monitoring/sql.mdx b/product_docs/docs/pgd/5/monitoring/sql.mdx
index 0a7f7d64719..36ac35396c8 100644
--- a/product_docs/docs/pgd/5/monitoring/sql.mdx
+++ b/product_docs/docs/pgd/5/monitoring/sql.mdx
@@ -113,7 +113,7 @@ You can use another view for monitoring of outgoing replication activity:
- [`bdr.node_replication_rates`](/pgd/latest/reference/catalogs-visible/#bdrnode_replication_rates) for monitoring outgoing replication
-The []`bdr.node_replication_rates`](/pgd/latest/reference/catalogs-visible/#bdrnode_replication_rates) view gives an overall picture of the outgoing
+The [`bdr.node_replication_rates`](/pgd/latest/reference/catalogs-visible/#bdrnode_replication_rates) view gives an overall picture of the outgoing
replication activity along with the catchup estimates for peer nodes,
specifically.
@@ -152,7 +152,7 @@ view.
!!! Note
This catalog is present only when the bdr-enterprise extension is installed.
-Administrators can query `[`bdr.node_slots`](/pgd/latest/reference/catalogs-visible/#bdrnode_slots) for outgoing replication from the
+Administrators can query [`bdr.node_slots`](/pgd/latest/reference/catalogs-visible/#bdrnode_slots) for outgoing replication from the
local node. It shows information about replication status of all other nodes
in the group that are known to the current node as well as any additional
replication slots created by PGD on the current node.
diff --git a/product_docs/docs/pgd/5/quickstart/index.mdx b/product_docs/docs/pgd/5/quickstart/index.mdx
index e7d45925288..a3a691b0075 100644
--- a/product_docs/docs/pgd/5/quickstart/index.mdx
+++ b/product_docs/docs/pgd/5/quickstart/index.mdx
@@ -20,7 +20,7 @@ navigation:
EDB Postgres Distributed (PGD) is a multi-master replicating implementation of Postgres designed for high performance and availability. You can create database clusters made up of many bidirectionally synchronizing database nodes. The clusters can have a number of proxy servers that direct your query traffic to the most available nodes, adding further resilience to your cluster configuration.
!!! Note Fully managed BigAnimal
- If you prefer to have a fully managed EDB Postgres Distributed experience, PGD is now available as the Extreme High Availability option on BigAnimal, EDB's cloud platform for Postgres. Read more about [BigAnimal Extreme High Availability](/biganimal/latest/overview/02_high_availability/#extreme-high-availability-preview).
+ If you prefer to have a fully managed EDB Postgres Distributed experience, PGD is now available as an option on BigAnimal, EDB's cloud platform for Postgres. Read more about [BigAnimal distributed high-availability clusters](/biganimal/latest/overview/02_high_availability/#distributed-high-availability).
PGD is very configurable. To quickly evaluate and deploy PGD, use this quick start. It'll get you up and running with a fully configured PGD cluster using the same tools that you'll use to deploy to production. This quick start includes:
diff --git a/product_docs/docs/pgd/5/reference/index.json b/product_docs/docs/pgd/5/reference/index.json
index 8e5c541ecc2..b846fc85cdc 100644
--- a/product_docs/docs/pgd/5/reference/index.json
+++ b/product_docs/docs/pgd/5/reference/index.json
@@ -185,6 +185,7 @@
"bdrreplication_set_remove_table": "/pgd/latest/reference/repsets-membership#bdrreplication_set_remove_table",
"bdrreplication_set_add_ddl_filter": "/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter",
"bdrreplication_set_remove_ddl_filter": "/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter",
+ "pgd_bench": "/pgd/latest/reference/testingandtuning#pgd_bench",
"bdralter_sequence_set_kind": "/pgd/latest/reference/sequences#bdralter_sequence_set_kind",
"bdrextract_timestamp_from_snowflakeid": "/pgd/latest/reference/sequences#bdrextract_timestamp_from_snowflakeid",
"bdrextract_nodeid_from_snowflakeid": "/pgd/latest/reference/sequences#bdrextract_nodeid_from_snowflakeid",
diff --git a/product_docs/docs/pgd/5/reference/index.mdx b/product_docs/docs/pgd/5/reference/index.mdx
index 7c9617cf7c8..8504dba6028 100644
--- a/product_docs/docs/pgd/5/reference/index.mdx
+++ b/product_docs/docs/pgd/5/reference/index.mdx
@@ -14,6 +14,7 @@ navigation:
- repsets-management
- repsets-membership
- repsets-ddl-filtering
+- testingandtuning
- sequences
- autopartition
- streamtriggers
@@ -171,7 +172,7 @@ The reference section is a definitive listing of all functions, views and comman
* [`bdr.min_worker_backoff_delay`](pgd-settings#bdrmin_worker_backoff_delay)
### [CRDTs](pgd-settings#crdts)
* [`bdr.crdt_raw_value`](pgd-settings#bdrcrdt_raw_value)
-### [Eager Replication](pgd-settings#eager-replication)
+### [Commit Scopes](pgd-settings#commit-scopes)
* [`bdr.commit_scope`](pgd-settings#bdrcommit_scope)
### [Commit At Most Once](pgd-settings#commit-at-most-once)
* [`bdr.camo_local_mode_delay`](pgd-settings#bdrcamo_local_mode_delay)
@@ -263,6 +264,10 @@ The reference section is a definitive listing of all functions, views and comman
* [`bdr.replication_set_remove_ddl_filter`](repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter)
+## [Testing and tuning commands](testingandtuning)
+ * [`pgd_bench`](testingandtuning#pgd_bench)
+
+
## [Global sequence management interfaces](sequences)
### [Sequence functions](sequences#sequence-functions)
* [`bdr.alter_sequence_set_kind`](sequences#bdralter_sequence_set_kind)
diff --git a/product_docs/docs/pgd/5/reference/index.mdx.src b/product_docs/docs/pgd/5/reference/index.mdx.src
index 4443174c515..b95a72d417c 100644
--- a/product_docs/docs/pgd/5/reference/index.mdx.src
+++ b/product_docs/docs/pgd/5/reference/index.mdx.src
@@ -14,6 +14,7 @@ navigation:
- repsets-management
- repsets-membership
- repsets-ddl-filtering
+- testingandtuning
- sequences
- autopartition
- streamtriggers
diff --git a/product_docs/docs/pgd/5/reference/pgd-settings.mdx b/product_docs/docs/pgd/5/reference/pgd-settings.mdx
index 6af5595994d..9d6a47eebf9 100644
--- a/product_docs/docs/pgd/5/reference/pgd-settings.mdx
+++ b/product_docs/docs/pgd/5/reference/pgd-settings.mdx
@@ -37,10 +37,13 @@ identify rows that are updated or deleted.
The accepted values are:
-- `DEFAULT` — Records the old values of the columns of the primary key,
+- `default` — Records the old values of the columns of the primary key,
if any (this is the default PostgreSQL behavior).
-- `FULL` — Records the old values of all columns in the row.
-- `NOTHING` — Records no information about the old row.
+- `full` — Records the old values of all columns in the row.
+- `nothing` — Records no information about the old row.
+- `auto` — Tables with PK are created with REPLICA IDENTITY DEFAULT,
+ and tables without PK are created with REPLICA IDENTITY FULL. This is
+ the default PGD behavior.
See [PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-altertable.html#SQL-CREATETABLE-REPLICA-IDENTITY) for more details.
@@ -49,12 +52,12 @@ PGD can't replicate `UPDATE` and `DELETE` operations on tables without a
for the table is `FULL`, either by table-specific configuration or by
`bdr.default_replica_identity`.
-If `bdr.default_replica_identity` is `DEFAULT` and there is a `UNIQUE`
+If `bdr.default_replica_identity` is `default` and there is a `UNIQUE`
constraint on the table, it isn't automatically picked up as `REPLICA IDENTITY`.
You need to set it explicitly when creating the table or after, as described
above.
-Setting the replica identity of tables to `FULL` increases the volume of
+Setting the replica identity of tables to `full` increases the volume of
WAL written and the amount of data replicated on the wire for the table.
### `bdr.ddl_replication`
@@ -363,12 +366,12 @@ value of the base CRDT type (for example, a bigint for `crdt_pncounter`).
When set to `on`, the returned value represents the full representation of
the CRDT value, which can, for example, include the state from multiple nodes.
-## Eager Replication
+## Commit Scopes
### `bdr.commit_scope`
-Set's the current (or default) [commit scope](../durability/commit-scopes) (default
-`local`).
+Sets the current (or default) [commit scope](../durability/commit-scopes) (default
+is an empty string).
## Commit At Most Once
diff --git a/product_docs/docs/pgd/5/reference/testingandtuning.mdx b/product_docs/docs/pgd/5/reference/testingandtuning.mdx
new file mode 100644
index 00000000000..317b6746400
--- /dev/null
+++ b/product_docs/docs/pgd/5/reference/testingandtuning.mdx
@@ -0,0 +1,120 @@
+---
+title: Testing and tuning commands
+navTitle: Testing and tuning
+indexdepth: 2
+---
+
+EDB Postgres Distributed has tools which help with testing and tuning of your PGD clusters. For background, read the [Testing and Tuning](../testingandtuning) section.
+
+
+## `pgd_bench`
+
+### Synopsis
+
+A benchmarking tool for PGD enhanced PostgreSQL.
+
+```shell
+pgd_bench [OPTION]... [DBNAME] [DBNAME2]
+```
+
+`DBNAME` may be a conninfo string of the format:
+ `"host=10.1.1.2 user=postgres dbname=master"`
+
+Consult the [Testing and Tuning - Pgd_bench](../testingandtuning#pgd_bench) section for examples
+of `pgd_bench` options and usage.
+
+### Options
+
+`pgd_bench` specific options include:
+
+#### Setting mode
+
+`-m` or `--mode`
+
+Which can be set to `regular`, `camo`, or `failover`. It defaults to `regular`.
+
+* regular — Only a single node is needed to run `pgd_bench`
+* camo — A second node must be specified to act as the CAMO-partner (CAMO should be set up)
+* failover — A second node must be specified to act as the failover.
+
+When using `-m failover`, an additional option `--retry` is available. This will
+instruct `pgd_bench` to retry transactions when there is a failover. The `--retry`
+option is automatically enabled with `-m camo`.
+
+#### Setting GUC variables
+
+ `-o` or `--set-option`
+
+This option is followed by `NAME=VALUE` entries, which will be applied using the
+Postgresql [`SET`](https://www.postgresql.org/docs/current/sql-set.html) command on each server, and only those servers, that `pgd_bench` connects to.
+
+The other options are identical to the Community PostgreSQL `pgbench`. For more
+details, consult the official documentation on
+[`pgbench`](https://www.postgresql.org/docs/current/pgbench.html).
+
+We list all the options (`pgd_bench` and `pgbench`) below for completeness.
+
+#### Initialization options:
+- `-i, --initialize` — invokes initialization mode
+- `-I, --init-steps=[dtgGvpf]+` (default `"dtgvp"`) — run selected initialization steps
+ - `d` — drop any existing `pgbench` tables
+ - `t` — create the tables used by the standard `pgbench` scenario
+ - `g` — generate data client-side and load it into the standard tables, replacing any data already present
+ - `G` — generate data server-side and load it into the standard tables, replacing any data already present
+ - `v` — invoke `VACUUM` on the standard tables
+ - `p` — create primary key indexes on the standard tables
+ - `f` — create foreign key constraints between the standard tables
+- `-F, --fillfactor=NUM` — set fill factor
+- `-n, --no-vacuum` — do not run `VACUUM` during initialization
+- `-q, --quiet` — quiet logging (one message each 5 seconds)
+- `-s, --scale=NUM` — scaling factor
+- `--foreign-keys` — create foreign key constraints between tables
+- `--index-tablespace=TABLESPACE` — create indexes in the specified tablespace
+- `--partition-method=(range|hash)` — partition `pgbench_accounts` with this method (default: range)
+- `--partitions=NUM` — partition `pgbench_accounts` into `NUM` parts (default: 0)
+- `--tablespace=TABLESPACE` — create tables in the specified tablespace
+- `--unlogged-tables` — create tables as unlogged tables (Note: unlogged tables are not replicated)
+
+#### Options to select what to run:
+- `-b, --builtin=NAME[@W]` — add builtin script NAME weighted at W (default: 1). Use `-b list` to list available scripts.
+- `-f, --file=FILENAME[@W]` — add script `FILENAME` weighted at W (default: 1)
+- `-N, --skip-some-updates` — updates of pgbench_tellers and pgbench_branches. Same as `-b simple-update`
+- `-S, --select-only` — perform SELECT-only transactions. Same as `-b select-only`
+
+#### Benchmarking options:
+- `-c, --client=NUM` — number of concurrent database clients (default: 1)
+- `-C, --connect` — establish new connection for each transaction
+- `-D, --define=VARNAME=VALUE` — define variable for use by custom script
+- `-j, --jobs=NUM` — number of threads (default: 1)
+- `-l, --log` — write transaction times to log file
+- `-L, --latency-limit=NUM` — count transactions lasting more than NUM ms as late
+- `-m, --mode=regular|camo|failover` — mode in which pgbench should run (default: `regular`)
+- `-M, --protocol=simple|extended|prepared` — protocol for submitting queries (default: `simple`)
+- `-n, --no-vacuum` — do not run `VACUUM` before tests
+- `-o, --set-option=NAME=VALUE` — specify runtime SET option
+- `-P, --progress=NUM` — show thread progress report every NUM seconds
+- `-r, --report-per-command` — latencies, failures and retries per command
+- `-R, --rate=NUM` — target rate in transactions per second
+- `-s, --scale=NUM` — report this scale factor in output
+- `-t, --transactions=NUM` — number of transactions each client runs (default: 10)
+- `-T, --time=NUM` — duration of benchmark test in seconds
+- `-v, --vacuum-all` — vacuum all four standard tables before tests
+- `--aggregate-interval=NUM` — data over NUM seconds
+- `--failures-detailed` — report the failures grouped by basic types
+- `--log-prefix=PREFIX` — prefix for transaction time log file (default: `pgbench_log`)
+- `--max-tries=NUM` — max number of tries to run transaction (default: 1)
+- `--progress-timestamp` — use Unix epoch timestamps for progress
+- `--random-seed=SEED` — set random seed ("time", "rand", integer)
+- `--retry` — retry transactions on failover, used with "-m"
+- `--sampling-rate=NUM` — fraction of transactions to log (e.g., 0.01 for 1%)
+- `--show-script=NAME` — show builtin script code, then exit
+- `--verbose-errors` — print messages of all errors
+
+#### Common options:
+- `-d, --debug` — print debugging output
+- `-h, --host=HOSTNAME` — database server host or socket directory
+- `-p, --port=PORT` — database server port number
+- `-U, --username=USERNAME` — connect as specified database user
+- `-V, --version` — output version information, then exit
+- `-?, --help` — show help, then exit
+
diff --git a/product_docs/docs/pgd/5/rel_notes/index.mdx b/product_docs/docs/pgd/5/rel_notes/index.mdx
index 8ca36d6563f..c5eb7e32160 100644
--- a/product_docs/docs/pgd/5/rel_notes/index.mdx
+++ b/product_docs/docs/pgd/5/rel_notes/index.mdx
@@ -20,7 +20,7 @@ that introduced the feature.
| Release Date | EDB Postgres Distributed | BDR extension | PGD CLI | PGD Proxy |
| ------------- | ---------------------------- | ------------- | ------- | --------- |
-| 2023 Aug 4 | [5.2.0](pgd_5.2.0_rel_notes) | 5.2.0 | 5.2.0 | 5.2.0 |
-| 2023 May 16 | [5.1.0](pgd_5.1.0_rel_notes) | 5.1.0 | 5.1.0 | 5.1.0 |
-| 2023 Mar 21 | [5.0.1](pgd_5.0.1_rel_notes) | 5.0.0 | 5.0.1 | 5.0.1 |
-| 2023 Feb 21 | [5.0.0](pgd_5.0.0_rel_notes) | 5.0.0 | 5.0.0 | 5.0.0 |
+| 4 Aug 2023 | [5.2.0](pgd_5.2.0_rel_notes) | 5.2.0 | 5.2.0 | 5.2.0 |
+| 16 May 2023 | [5.1.0](pgd_5.1.0_rel_notes) | 5.1.0 | 5.1.0 | 5.1.0 |
+| 21 Mar 2023 | [5.0.1](pgd_5.0.1_rel_notes) | 5.0.0 | 5.0.1 | 5.0.1 |
+| 21 Feb 2023 | [5.0.0](pgd_5.0.0_rel_notes) | 5.0.0 | 5.0.0 | 5.0.0 |
diff --git a/product_docs/docs/pgd/5/testingandtuning.mdx b/product_docs/docs/pgd/5/testingandtuning.mdx
new file mode 100644
index 00000000000..06931bee105
--- /dev/null
+++ b/product_docs/docs/pgd/5/testingandtuning.mdx
@@ -0,0 +1,153 @@
+---
+title: Testing and Tuning PGD clusters
+navTitle: Testing and Tuning
+---
+
+You can test PGD applications using the following approaches:
+
+- [Trusted Postgres Architect](#trusted-postgres-architect)
+- [pgd_bench with CAMO/Failover options](#pgd_bench)
+
+
+### Trusted Postgres Architect
+
+[Trusted Postgres Architect](/tpa/latest) is the system used by EDB to
+deploy reference architectures, including those based on EDB Postgres Distributed.
+
+Trusted Postgres Architect includes test suites for each reference architecture.
+It also simplifies creating and managing a local collection of tests to run
+against a TPA cluster, using a syntax like the following:
+
+```
+tpaexec test mycluster mytest
+```
+
+We strongly recommend that developers write their own multi-node suite
+of Trusted Postgres Architect tests that verify the main expected properties
+of the application.
+
+### pgd_bench
+
+The Postgres benchmarking application
+[`pgbench`](https://www.postgresql.org/docs/current/pgbench.html) has been
+extended in PGD 5.0 in the form of a new applications: `pgd_bench`.
+
+[`pgd_bench`](/pgd/latest/reference/testingandtuning#pgd_bench) is a regular command-line utility that's added to PostgreSQL's bin
+directory. The utility is based on the Community PostgreSQL `pgbench` tool but
+supports benchmarking CAMO transactions and PGD specific workloads.
+
+Functionality of the `pgd_bench` is a superset of those of `pgbench` but
+requires the BDR extension to be installed in order to work properly.
+
+Key differences include:
+
+- Adjustments to the initialization (`-i` flag) with the standard
+ `pgbench` scenario to prevent global lock timeouts in certain cases
+- `VACUUM` command in the standard scenario is executed on all nodes
+- `pgd_bench` releases are tied to the releases of the BDR extension
+ and are built against the corresponding PostgreSQL flavour (this is
+ reflected in the output of `--version` flag)
+
+The current version allows users to run failover tests while using CAMO or
+regular PGD deployments.
+
+The following options were added:
+
+```
+-m, --mode=regular|camo|failover
+mode in which pgbench should run (default: regular)
+```
+
+- Use `-m camo` or `-m failover` to specify the mode for pgd_bench.
+ You can use The `-m failover` specification to test failover in
+ regular PGD deployments.
+
+```
+--retry
+retry transactions on failover
+```
+
+- Use `--retry` to specify whether to retry transactions when
+ failover happens with `-m failover` mode. This option is enabled by default
+ for `-m camo` mode.
+
+In addition to these options, you must specify the connection information about
+the peer node for failover in [DSN
+form](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
+
+Here's an example in a CAMO environment:
+
+```sh
+ pgd_bench -m camo -p $node1_port -h $node1_host bdrdemo \
+ "host=$node2_host user=postgres port=$node2_port dbname=bdrdemo"
+```
+
+This command runs in CAMO mode. It connects to node1 and runs the tests. If the
+connection to node1 is lost, then pgd_bench connects to node2. It queries node2
+to get the status of in-flight transactions. Aborted and in-flight transactions
+are retried in CAMO mode.
+
+In failover mode, if you specify `--retry`, then in-flight transactions are
+retried. In this scenario there's no way to find the status of in-flight
+transactions.
+
+### Notes on pgd_bench usage
+
+- When using custom init-scripts it is important to understand implications behind the DDL commands.
+It is generally recommended to wait for the secondary nodes to catch-up on the data-load steps
+before proceeding with DDL operations such as `CREATE INDEX`. The latter acquire global locks which
+can't be acquired until the data-load is complete and thus may time out.
+
+- No extra steps are taken to suppress client messages, such as `NOTICE`s and `WARNING`s emitted
+by PostgreSQL and or any possible extensions including the BDR extension. It is the user's
+responsibility to suppress them by setting appropriate variables (e.g. `client_min_messages`,
+`bdr.camo_enable_client_warnings ` etc.).
+
+
+
+## Performance testing and tuning
+
+PGD allows you to issue write transactions onto multiple master nodes. Bringing
+those writes back together onto each node has a cost in performance.
+
+First, replaying changes from another node has a CPU cost, an I/O cost,
+and it generates WAL records. The resource use is usually less
+than in the original transaction since CPU overheads are lower as a result
+of not needing to reexecute SQL. In the case of UPDATE and DELETE
+transactions, there might be I/O costs on replay if data isn't cached.
+
+Second, replaying changes holds table-level and row-level locks that can produce
+contention against local workloads. The conflict-free replicated data types
+(CRDT) and column-level conflict detection (CLCD) features ensure you get the
+correct answers even for concurrent updates, but they don't remove the normal
+locking overheads. If you get locking contention, try to avoid conflicting
+updates, or keep transactions as short as possible. A heavily updated row in a
+larger transaction causes a bottleneck on performance for that transaction.
+Complex applications require some thought to maintain scalability.
+
+If you think you're having performance problems, develop performance tests using
+the benchmarking tools. pgd_bench allows you to write custom test scripts specific
+to your use case so you can understand the overheads of your SQL and measure the
+impact of concurrent execution.
+
+If PGD is running slow, then we suggest the following:
+
+1. Write a custom test script for pgd_bench, as close as you can make it
+ to the production system's problem case.
+2. Run the script on one node to give you a baseline figure.
+3. Run the script on as many nodes as occurs in production, using the
+ same number of sessions in total as you did on one node. This technique
+ shows you the effect of moving to multiple nodes.
+4. Increase the number of sessions for these two tests so you can
+ plot the effect of increased contention on your application.
+5. Make sure your tests are long enough to account for replication delays.
+6. Ensure that replication delay isn't growing during your tests.
+
+Use all of the normal Postgres tuning features to improve the speed
+of critical parts of your application.
+
+
+
+
+
+
diff --git a/product_docs/docs/pgd/5/upgrades/upgrade_paths.mdx b/product_docs/docs/pgd/5/upgrades/upgrade_paths.mdx
index 0802df449da..d911ba899e3 100644
--- a/product_docs/docs/pgd/5/upgrades/upgrade_paths.mdx
+++ b/product_docs/docs/pgd/5/upgrades/upgrade_paths.mdx
@@ -2,53 +2,39 @@
title: Supported PGD upgrade paths
---
-
## Upgrading within version 5
-| 5.0.0 | 5.0.1 | 5.1.0 | Target PGD version |
-|-------|-------|-------|--------------------|
-| ✓ | ✓ | ✓ | 5.2.0 |
-| ✓ | ✓ | | 5.1.0 |
-| ✓ | | | 5.0.1 |
-
+You can upgrade from any version 5.x release to a later 5.x release.
## Upgrading from version 4 to version 5
-Upgrades from PGD 4 to PGD 5 are supported from version 4.3.0. For older versions, upgrade to 4.3.0 before upgrading to 5. See [Upgrading within 4](/pgd/4/upgrades/upgrade_paths/#upgrading-within-version-4) for more information. After upgrading to 4.3.0 or later, the following combinations are allowed.
-
-| 4.3.0 | 4.3.0-1 | 4.3.1 | 4.3.1-1 | Target PGD version |
-|-------|---------|-------|---------|--------------------|
-| ✓ | ✓ | ✓ | ✓ | 5.2.0 |
-| ✓ | ✓ | ✓ | | 5.1.0 |
-| ✓ | ✓ | | | 5.0.1 |
-| ✓ | | | | 5.0.0 |
-
-
-
-## Upgrading from version 3.7 to version 5
-
-Currently there are no direct upgrade paths from 3.7 to 5. You must first upgrade your cluster to 4.3.0 or later before upgrading to 5. See [Upgrading from version 3.7 to version 4](/pgd/4/upgrades/upgrade_paths/#upgrading-from-version-37-to-version-4) for more information.
+Upgrades from PGD 4 to PGD 5 are supported from version 4.3.0. For older
+versions, upgrade to 4.3.0 before upgrading to 5. See [Upgrading within
+4](/pgd/4/upgrades/upgrade_paths/#upgrading-within-version-4) for more
+information. Generally, we recommend you upgrade to the latest version 4
+release, before upgrading to the latest version 5 release. After upgrading to
+4.3.0 or later, the following upgrade paths are possible.
+| From version | To version |
+| ---- | -- |
+| 4.3.0 | 5.0.0 or later |
+| 4.3.1 | 5.1.0 or later |
+| 4.3.2 | 5.2.0 or later |
-
-
-
-
-
-
-
-
-
-
-
+At this time, TPA has only enabled upgrades from PGD 3.7 to 4 and PGD 4 to 5.
+Customers moving from 3.7 to 5 and desiring automation would need to use this
+two-step upgrade path and will potentially have their proxy architecture changed
+multiple times from HA Proxy to HARP and finally to PGD Proxy.
+
+Significant development has been done to make upgrading directly from 3.7 to 5
+possible and easier. We've expanded the database versions supported with PGD 5
+and enabled the upgradeability of the BDR extension across multiple PGD
+versions. Previously, we only supported upgrades from the previous major
+version. Often, a database upgrade was also required because only one database
+major version was supported by both PGD versions.
+
+Development is currently ongoing to provide a direct upgrade path from 3.7 to 5
+using TPA by Q4. In the interim, customers may reach out to EDB Professional
+Services or Support for assistance and guidance.
\ No newline at end of file
diff --git a/product_docs/docs/pge/15/release_notes/index.mdx b/product_docs/docs/pge/15/release_notes/index.mdx
index bd99b69f9b0..82131a33b20 100644
--- a/product_docs/docs/pge/15/release_notes/index.mdx
+++ b/product_docs/docs/pge/15/release_notes/index.mdx
@@ -1,6 +1,7 @@
---
title: "Release notes"
navigation:
+ - rel_notes15.4
- rel_notes15.3
- rel_notes15.2
---
@@ -10,8 +11,9 @@ cover what was new in each release.
| Version | Release date |
| ------------------------ | ------------ |
-| [15.3](rel_notes15.3) | 2023 May 11 |
-| [15.2](rel_notes15.2) | 2023 Feb 14 |
+| [15.4](rel_notes15.4) | 21 Aug 2023 |
+| [15.3](rel_notes15.3) | 11 May 2023 |
+| [15.2](rel_notes15.2) | 14 Feb 2023 |
diff --git a/product_docs/docs/pge/15/release_notes/rel_notes15.4.mdx b/product_docs/docs/pge/15/release_notes/rel_notes15.4.mdx
new file mode 100644
index 00000000000..f26c12e7833
--- /dev/null
+++ b/product_docs/docs/pge/15/release_notes/rel_notes15.4.mdx
@@ -0,0 +1,14 @@
+---
+title: "EDB Postgres Extended Server version 15.4"
+navTitle: Version 15.4
+---
+
+New features, enhancements, bug fixes, and other changes in EDB Postgres Extended Server 15.2 include:
+
+| Type | Description |
+| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Upstream merge | Merged with community PostgreSQL 15.4. See the [PostgreSQL 15 Release Notes](https://www.postgresql.org/docs/15/release-15-4.html) for more information. |
+| Bug fix | Fixed a memory leak experienced when using EDB Postgres Distributed (PGD) with Transparent Data Encryption (TDE). [Support issue: #93936] |
+
+
+
diff --git a/product_docs/docs/pgpool/4/pgpool_rel_notes/index.mdx b/product_docs/docs/pgpool/4/pgpool_rel_notes/index.mdx
index 24731fc47b7..1b0236f8d2f 100644
--- a/product_docs/docs/pgpool/4/pgpool_rel_notes/index.mdx
+++ b/product_docs/docs/pgpool/4/pgpool_rel_notes/index.mdx
@@ -18,8 +18,8 @@ The EDB Pgpool-II documentation describes the latest version of EDB Pgpool-II, i
| Version | Release Date | Upstream merges | |
| ------------------------------------------------------------------------------------------------------------------------------- | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- |
-| [4.4.2](442_rel_notes) | 2023 Feb 14 | Upstream [4.4.2](https://www.pgpool.net/docs/44/en/html/release-4-4-2.html) | |
-| [4.3.2](07_432_rel_notes), [4.2.9](429_rel_notes), [4.1.12](4112_rel_notes), [4.0.19](4019_rel_notes), [3.7.24](3724_rel_notes) | 2022 Jul 05 | Upstream [4.3.2](https://www.pgpool.net/docs/43/en/html/release-4-3-2.html#bug-fixes), [4.2.9](https://www.pgpool.net/docs/43/en/html/release-4-2-9.html#bug-fixes), [4.1.12](https://www.pgpool.net/docs/43/en/html/release-4-1-12.html#bug-fixes), [4.0.19](https://www.pgpool.net/docs/43/en/html/release-4-0-19.html#bug-fixes), [3.7.24](https://www.pgpool.net/docs/43/en/html/release-3-7-24.html#bug-fixes) | |
-| [4.3.0](08_430_rel_notes) | 2022 Feb 01 | Upstream [4.3.0](https://www.pgpool.net/docs/43/en/html/release-4-3-0.html#bug-fixes) | |
-| [4.2.6](09_426_rel_notes) | 2021 Dec 01 | Upstream [4.2.6](https://www.pgpool.net/docs/42/en/html/release-4-2-6.html#bug-fixes) | |
-| [4.2.5](10_425_rel_notes) | 2021 Jun 10 | Upstream [4.2.5](https://www.pgpool.net/docs/42/en/html/release-4-2-5.html) and [4.2.4](https://www.pgpool.net/docs/42/en/html/release-4-2-4.html) | |
\ No newline at end of file
+| [4.4.2](442_rel_notes) | 14 Feb 2023 | Upstream [4.4.2](https://www.pgpool.net/docs/44/en/html/release-4-4-2.html) | |
+| [4.3.2](07_432_rel_notes), [4.2.9](429_rel_notes), [4.1.12](4112_rel_notes), [4.0.19](4019_rel_notes), [3.7.24](3724_rel_notes) | 05 Jul 2022 | Upstream [4.3.2](https://www.pgpool.net/docs/43/en/html/release-4-3-2.html#bug-fixes), [4.2.9](https://www.pgpool.net/docs/43/en/html/release-4-2-9.html#bug-fixes), [4.1.12](https://www.pgpool.net/docs/43/en/html/release-4-1-12.html#bug-fixes), [4.0.19](https://www.pgpool.net/docs/43/en/html/release-4-0-19.html#bug-fixes), [3.7.24](https://www.pgpool.net/docs/43/en/html/release-3-7-24.html#bug-fixes) | |
+| [4.3.0](08_430_rel_notes) | 01 Feb 2022 | Upstream [4.3.0](https://www.pgpool.net/docs/43/en/html/release-4-3-0.html#bug-fixes) | |
+| [4.2.6](09_426_rel_notes) | 01 Dec 2021 | Upstream [4.2.6](https://www.pgpool.net/docs/42/en/html/release-4-2-6.html#bug-fixes) | |
+| [4.2.5](10_425_rel_notes) | 10 Jun 2021 | Upstream [4.2.5](https://www.pgpool.net/docs/42/en/html/release-4-2-5.html) and [4.2.4](https://www.pgpool.net/docs/42/en/html/release-4-2-4.html) | |
\ No newline at end of file
diff --git a/product_docs/docs/postgis/3.2/01_release_notes/index.mdx b/product_docs/docs/postgis/3.2/01_release_notes/index.mdx
index ba879151a01..769dc9c3e12 100644
--- a/product_docs/docs/postgis/3.2/01_release_notes/index.mdx
+++ b/product_docs/docs/postgis/3.2/01_release_notes/index.mdx
@@ -13,11 +13,11 @@ cover what was new in each release.
| Version | Release date |
| ------------------------ | ------------ |
-| [3.2.1](rel_notes321) | 2022 Aug 04 |
-| [3.2.0](rel_notes32) | 2022 Dec 01 |
-| [3.1.5](rel_notes315) | 2022 Aug 03 |
-| [3.1.4](rel_notes314) | 2021 Dec 01 |
-| [3.1.2](rel_notes312) | 2021 Jun 24|
+| [3.2.1](rel_notes321) | 04 Aug 2022 |
+| [3.2.0](rel_notes32) | 01 Dec 2022 |
+| [3.1.5](rel_notes315) | 03 Aug 2022|
+| [3.1.4](rel_notes314) | 01 Dec 2021|
+| [3.1.2](rel_notes312) | 24 Jun 2021 |
diff --git a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
index a847b4e6165..cf3533088ad 100644
--- a/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
+++ b/product_docs/docs/postgres_distributed_for_kubernetes/1/rel_notes/index.mdx
@@ -11,7 +11,7 @@ The EDB Postgres Distributed for Kubernetes documentation describes the major ve
| Version | Release date |
| -------------------------- | ------------ |
-| [0.6.0](0_6_rel_notes) | 2023 May 15 |
+| [0.6.0](0_6_rel_notes) | 15 May 2023 |
diff --git a/product_docs/docs/postgres_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/index.mdx
index 6f00e5e9e10..523dbc4964d 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/index.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/index.mdx
@@ -89,11 +89,12 @@ primary/standby architecture, using native streaming replication.
## Features unique to EDB Postgres of Kubernetes
-- [Long Term Support](#long-term-support) for 1.18.x
+- [Long Term Support](#long-term-support)
- Red Hat certified operator for OpenShift
-- Support on IBM Power
+- Support on IBM Power and z/Linux through partnership with IBM
+- [Oracle compatibility](https://www.enterprisedb.com/docs/epas/latest/fundamentals/epas_fundamentals/epas_compat_ora_dev_guide/) through EDB Postgres Advanced Sever
+- [Transparent Data Encryption (TDE)](https://www.enterprisedb.com/docs/tde/latest/) through EDB Postgres Advanced Server
- EDB Postgres for Kubernetes Plugin
-- Oracle compatibility through EDB Postgres Advanced Sever
- Velero/OADP cold backup support
- Generic adapter for third-party Kubernetes backup tools
@@ -107,18 +108,20 @@ You need a valid license key to use EDB Postgres for Kubernetes in production.
### Long Term Support
-EDB is committed to declaring one version of EDB Postgres for Kubernetes per
-year as a Long Term Support version. This version will be supported and receive
-maintenance releases for an additional 12 months beyond the last release of
-CloudNativePG by the community for the same version. For example, the last
-version of 1.18 of CloudNativePG was released on June 12, 2023. This was
-declared a LTS version of EDB Postgres for Kubernetes and it will be supported
-for additional 12 months until June 12, 2024. Customers can expect that they
-will have at least 6 months to move between LTS versions. So they should
-expect the next LTS to be available by January 12, 2024 to allow at least 6
-months to migrate. While we encourage customers to regularly upgrade to the
-latest version of the operator to take advantage of new features, having LTS
-versions allows customers desiring additional stability to stay on the same
+EDB is committed to declaring a Long Term Support (LTS) version of EDB
+Postgres for Kubernetes annually (1.18 was our first). Each LTS version will
+receive maintenance releases and be supported for an additional 12 months beyond
+the last community release of CloudNativePG for the same version.
+
+For example, the last version of 1.18 of CloudNativePG was released on June 12, 2023.
+Because this was declared an LTS version of EDB Postgres for Kubernetes, it will be supported
+for additional 12 months until June 12, 2024.
+
+In addition, customers will always have at least 6 months to move between LTS versions. This
+means a new LTS version will be available by January 12, 2024 at the latest.
+
+While we encourage customers to regularly upgrade to the latest version of the operator to take
+advantage of new features, having LTS versions allows customers desiring additional stability to stay on the same
version for 12-18 months before upgrading.
## Licensing
diff --git a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx
index 2f1cace2e97..db070492918 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx
@@ -862,7 +862,7 @@ it from the actual pod. This means that you will be using the `postgres` user.
```shell
kubectl cnp psql cluster-example
-psql (15.3 (Debian 15.3-1.pgdg110+1))
+psql (15.3)
Type "help" for help.
postgres=#
@@ -873,7 +873,7 @@ select to work against a replica by using the `--replica` option:
```shell
kubectl cnp psql --replica cluster-example
-psql (15.3 (Debian 15.3-1.pgdg110+1))
+psql (15.3)
Type "help" for help.
@@ -889,6 +889,16 @@ postgres=# \q
This command will start `kubectl exec`, and the `kubectl` executable must be
reachable in your `PATH` variable to correctly work.
+!!! Note
+When connecting to instances running on OpenShift, you must explicitly
+pass a username to the `psql` command, because of a [security measure built into
+OpenShift](https://cloud.redhat.com/blog/a-guide-to-openshift-and-uids):
+
+```shell
+kubectl cnp psql cluster-example -- -U postgres
+```
+!!!
+
### Snapshotting a Postgres cluster
The `kubectl cnp snapshot` creates consistent snapshots of a Postgres
@@ -931,4 +941,4 @@ A specific `VolumeSnapshotClass` can be requested via the `-c` option:
```shell
kubectl cnp snapshot cluster-example -c longhorn
-```
\ No newline at end of file
+```
diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx
index 335c160cdfa..d490711144a 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx
@@ -63,58 +63,58 @@ The EDB Postgres for Kubernetes documentation describes the major version of EDB
| Version | Release date | Upstream merges |
| -------------------------- | ------------ | ------------------------------------------------------------------------------------------- |
-| [1.20.2](1_20_2_rel_notes) | 2023 Jul 27 | Upstream [1.20.2](https://cloudnative-pg.io/documentation/1.20/release_notes/v1.20/) |
-| [1.20.1](1_20_1_rel_notes) | 2023 Jun 13 | Upstream [1.20.1](https://cloudnative-pg.io/documentation/1.20/release_notes/v1.20/) |
-| [1.20.0](1_20_0_rel_notes) | 2023 Apr 27 | Upstream [1.20.0](https://cloudnative-pg.io/documentation/1.20/release_notes/v1.20/) |
-| [1.19.4](1_19_4_rel_notes) | 2023 Jul 27 | Upstream [1.19.4](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
-| [1.19.3](1_19_3_rel_notes) | 2023 Jun 13 | Upstream [1.19.3](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
-| [1.19.2](1_19_2_rel_notes) | 2023 Apr 27 | Upstream [1.19.2](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
-| [1.19.1](1_19_1_rel_notes) | 2023 Mar 20 | Upstream [1.19.1](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
-| [1.19.0](1_19_0_rel_notes) | 2023 Feb 14 | Upstream [1.19.0](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
-| [1.18.6](1_18_6_rel_notes) | 2023 Jul 27 | None |
-| [1.18.5](1_18_5_rel_notes) | 2023 Jun 13 | Upstream [1.18.5](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
-| [1.18.4](1_18_4_rel_notes) | 2023 Apr 27 | Upstream [1.18.4](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
-| [1.18.3](1_18_3_rel_notes) | 2023 Mar 20 | Upstream [1.18.3](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
-| [1.18.2](1_18_2_rel_notes) | 2023 Feb 14 | Upstream [1.18.2](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
-| [1.18.1](1_18_1_rel_notes) | 2022 Dec 21 | Upstream [1.18.1](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
-| [1.18.0](1_18_0_rel_notes) | 2022 Nov 14 | Upstream [1.18.0](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
-| [1.17.5](1_17_5_rel_notes) | 2023 Mar 20 | Upstream [1.17.5](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
-| [1.17.4](1_17_4_rel_notes) | 2023 Feb 14 | Upstream [1.17.4](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
-| [1.17.3](1_17_3_rel_notes) | 2022 Dec 21 | Upstream [1.17.3](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
-| [1.17.2](1_17_2_rel_notes) | 2022 Nov 14 | Upstream [1.17.2](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
-| [1.17.1](1_17_1_rel_notes) | 2022 Oct 7 | Upstream [1.17.1](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
-| [1.17.0](1_17_rel_notes) | 2022 Sep 6 | Upstream [1.17.0](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
-| [1.16.4](1_16_4_rel_notes) | 2022 Nov 14 | Upstream [1.16.4](https://cloudnative-pg.io/documentation/1.16/release_notes/v1.16/) |
-| [1.16.3](1_16_3_rel_notes) | 2022 Oct 7 | Upstream [1.16.3](https://cloudnative-pg.io/documentation/1.16/release_notes/v1.16/) |
-| [1.16.2](1_16_2_rel_notes) | 2022 Sep 6 | Upstream [1.16.2](https://cloudnative-pg.io/documentation/1.16/release_notes/v1.16/) |
-| [1.16.1](1_16_1_rel_notes) | 2022 Aug 12 | Upstream [1.16.1](https://cloudnative-pg.io/documentation/1.16/release_notes/v1.16/) |
-| [1.16.0](1_16_rel_notes) | 2022 Jul 07 | Upstream [1.16.0](https://cloudnative-pg.io/documentation/1.16/release_notes/v1.16) |
-| [1.15.5](1_15_5_rel_notes) | 2022 Oct 7 | Upstream [1.15.5](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
-| [1.15.4](1_15_4_rel_notes) | 2022 Sep 6 | Upstream [1.15.4](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
-| [1.15.3](1_15_3_rel_notes) | 2022 Aug 12 | Upstream [1.15.3](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
-| [1.15.2](1_15_2_rel_notes) | 2022 Jul 07 | Upstream [1.15.2](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
-| [1.15.1](1_15_1_rel_notes) | 2022 May 27 | Upstream [1.15.1](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
-| [1.15.0](1_15_rel_notes) | 2022 Apr 21 | Upstream [1.15.0](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
-| [1.14.0](1_14_rel_notes) | 2022 Mar 25 | NA |
-| [1.13.0](1_13_rel_notes) | 2022 Feb 17 | NA |
-| [1.12.0](1_12_rel_notes) | 2022 Jan 11 | NA |
-| [1.11.0](1_11_rel_notes) | 2021 Dec 15 | NA |
-| [1.10.0](1_10_rel_notes) | 2021 Nov 11 | NA |
-| [1.9.2](1_9_2_rel_notes) | 2021 Oct 15 | NA |
-| [1.9.1](1_9_1_rel_notes) | 2021 Sep 30 | NA |
-| [1.9.0](1_9_rel_notes) | 2021 Sep 28 | NA |
-| [1.8.0](1_8_rel_notes) | 2021 Sep 13 | NA |
-| [1.7.1](1_7_1_rel_notes) | 2021 Aug 11 | NA |
-| [1.7.0](1_7_rel_notes) | 2021 Jul 28 | NA |
-| [1.6.0](1_6_rel_notes) | 2021 Jul 12 | NA |
-| [1.5.1](1_5_1_rel_notes) | 2021 Jun 11 | NA |
-| [1.5.0](1_5_rel_notes) | 2021 Jun 17 | NA |
-| [1.4.0](1_4_rel_notes) | 2021 May 18 | NA |
-| [1.3.0](1_3_rel_notes) | 2021 Apr 23 | NA |
-| [1.2.1](1_2_1_rel_notes) | 2021 Apr 06 | NA |
-| [1.2.0](1_2_rel_notes) | 2021 Mar 31 | NA |
-| [1.1.0](1_1_rel_notes) | 2021 Mar 03 | NA |
-| [1.0.0](1_0_rel_notes) | 2021 Feb 04 | NA |
+| [1.20.2](1_20_2_rel_notes) | 27 Jul 2023 | Upstream [1.20.2](https://cloudnative-pg.io/documentation/1.20/release_notes/v1.20/) |
+| [1.20.1](1_20_1_rel_notes) | 13 Jun 2023 | Upstream [1.20.1](https://cloudnative-pg.io/documentation/1.20/release_notes/v1.20/) |
+| [1.20.0](1_20_0_rel_notes) | 27 Apr 2023 | Upstream [1.20.0](https://cloudnative-pg.io/documentation/1.20/release_notes/v1.20/) |
+| [1.19.4](1_19_4_rel_notes) | 27 Jul 2023 | Upstream [1.19.4](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
+| [1.19.3](1_19_3_rel_notes) | 13 Jun 2023 | Upstream [1.19.3](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
+| [1.19.2](1_19_2_rel_notes) | 27 Apr 2023 | Upstream [1.19.2](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
+| [1.19.1](1_19_1_rel_notes) | 20 Mar 2023 | Upstream [1.19.1](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
+| [1.19.0](1_19_0_rel_notes) | 14 Feb 2023 | Upstream [1.19.0](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
+| [1.18.6](1_18_6_rel_notes) | 27 Jul 2023 | None |
+| [1.18.5](1_18_5_rel_notes) | 13 Jun 2023 | Upstream [1.18.5](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
+| [1.18.4](1_18_4_rel_notes) | 27 Apr 2023 | Upstream [1.18.4](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
+| [1.18.3](1_18_3_rel_notes) | 20 Mar 2023 | Upstream [1.18.3](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
+| [1.18.2](1_18_2_rel_notes) | 14 Feb 2023 | Upstream [1.18.2](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
+| [1.18.1](1_18_1_rel_notes) | 21 Dec 2022 | Upstream [1.18.1](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
+| [1.18.0](1_18_0_rel_notes) | 14 Nov 2022 | Upstream [1.18.0](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
+| [1.17.5](1_17_5_rel_notes) | 20 Mar 2023 | Upstream [1.17.5](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
+| [1.17.4](1_17_4_rel_notes) | 14 Feb 2023 | Upstream [1.17.4](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
+| [1.17.3](1_17_3_rel_notes) | 21 Dec 2022 | Upstream [1.17.3](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
+| [1.17.2](1_17_2_rel_notes) | 14 Nov 2022 | Upstream [1.17.2](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
+| [1.17.1](1_17_1_rel_notes) | 07 Oct 2022 | Upstream [1.17.1](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
+| [1.17.0](1_17_rel_notes) | 06 Sep 2022 | Upstream [1.17.0](https://cloudnative-pg.io/documentation/1.17/release_notes/v1.17/) |
+| [1.16.4](1_16_4_rel_notes) | 14 Nov 2022 | Upstream [1.16.4](https://cloudnative-pg.io/documentation/1.16/release_notes/v1.16/) |
+| [1.16.3](1_16_3_rel_notes) | 07 Oct 2022 | Upstream [1.16.3](https://cloudnative-pg.io/documentation/1.16/release_notes/v1.16/) |
+| [1.16.2](1_16_2_rel_notes) | 06 Sep 2022 | Upstream [1.16.2](https://cloudnative-pg.io/documentation/1.16/release_notes/v1.16/) |
+| [1.16.1](1_16_1_rel_notes) | 12 Aug 2022 | Upstream [1.16.1](https://cloudnative-pg.io/documentation/1.16/release_notes/v1.16/) |
+| [1.16.0](1_16_rel_notes) | 07 Jul 2022 | Upstream [1.16.0](https://cloudnative-pg.io/documentation/1.16/release_notes/v1.16) |
+| [1.15.5](1_15_5_rel_notes) | 07 Oct 2022 | Upstream [1.15.5](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
+| [1.15.4](1_15_4_rel_notes) | 06 Sep 2022 | Upstream [1.15.4](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
+| [1.15.3](1_15_3_rel_notes) | 12 Aug 2022 | Upstream [1.15.3](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
+| [1.15.2](1_15_2_rel_notes) | 07 Jul 2022 | Upstream [1.15.2](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
+| [1.15.1](1_15_1_rel_notes) | 27 May 2022 | Upstream [1.15.1](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
+| [1.15.0](1_15_rel_notes) | 21 Apr 2022 | Upstream [1.15.0](https://cloudnative-pg.io/documentation/1.15/release_notes/v1.15) |
+| [1.14.0](1_14_rel_notes) | 25 Mar 2022 | NA |
+| [1.13.0](1_13_rel_notes) | 17 Feb 2022 | NA |
+| [1.12.0](1_12_rel_notes) | 11 Jan 2022 | NA |
+| [1.11.0](1_11_rel_notes) | 15 Dec 2021 | NA |
+| [1.10.0](1_10_rel_notes) | 11 Nov 2021 | NA |
+| [1.9.2](1_9_2_rel_notes) | 15 Oct 2021 | NA |
+| [1.9.1](1_9_1_rel_notes) | 30 Sep 2021 | NA |
+| [1.9.0](1_9_rel_notes) | 28 Sep 2021 | NA |
+| [1.8.0](1_8_rel_notes) | 13 Sep 2021 | NA |
+| [1.7.1](1_7_1_rel_notes) | 11 Aug 2021 | NA |
+| [1.7.0](1_7_rel_notes) | 28 Jul 2021 | NA |
+| [1.6.0](1_6_rel_notes) | 12 Jul 2021 | NA |
+| [1.5.1](1_5_1_rel_notes) | 11 Jun 2021 | NA |
+| [1.5.0](1_5_rel_notes) | 17 Jun 2021 | NA |
+| [1.4.0](1_4_rel_notes) | 18 May 2021 | NA |
+| [1.3.0](1_3_rel_notes) | 23 Apr 2021 | NA |
+| [1.2.1](1_2_1_rel_notes) | 06 Apr 2021 | NA |
+| [1.2.0](1_2_rel_notes) | 31 Mar 2021 | NA |
+| [1.1.0](1_1_rel_notes) | 03 Mar 2021 | NA |
+| [1.0.0](1_0_rel_notes) | 04 Feb 2021 | NA |
diff --git a/product_docs/docs/tpa/23/INSTALL.mdx b/product_docs/docs/tpa/23/INSTALL.mdx
index 02ffb7e25ee..6398a34f496 100644
--- a/product_docs/docs/tpa/23/INSTALL.mdx
+++ b/product_docs/docs/tpa/23/INSTALL.mdx
@@ -5,14 +5,15 @@ originalFilePath: INSTALL.md
---
-To use TPA, you need to install tpaexec and run the `tpaexec setup`
-command. This document explains how to install TPA packages. See
-[Distribution support](reference/distributions/) for information on what
-platforms are supported.
+To use TPA, you need to install from packages or source and run the
+`tpaexec setup` command. This document explains how to install TPA
+packages. If you have an EDB subscription plan, and therefore have
+access to the EDB repositories, you should follow these instructions. To
+install TPA from source, please refer to
+[Installing TPA from Source](reference/INSTALL-repo/).
-TPA packages are available to prospects (for a 60 day trial), EDB
-customers with a valid Extreme HA subscription, or by prior arrangement.
-Please contact your account manager to request access.
+See [Distribution support](reference/distributions/) for information
+on what platforms are supported.
!!! Info
@@ -25,19 +26,21 @@ Please contact your account manager to request access.
Login to [EDB Repos 2.0](https://www.enterprisedb.com/repos-downloads)
to obtain your token. Then execute the following command, substituting
-your token for ``.
+your token for `` and replacing `` with
+one of the following according to which EDB plan you are subscribed:
+`enterprise`, `standard`, `community360`, `postgres_distributed`.
#### Add repository and install TPA on Debian or Ubuntu
```bash
-curl -1sLf 'https://downloads.enterprisedb.com//postgres_distributed/setup.deb.sh' | sudo -E bash
+curl -1sLf 'https://downloads.enterprisedb.com///setup.deb.sh' | sudo -E bash
sudo apt-get install tpaexec
```
#### Add repository and install TPA on RHEL, Rocky, AlmaLinux or Oracle Linux
```bash
-curl -1sLf 'https://downloads.enterprisedb.com//postgres_distributed/setup.rpm.sh' | sudo -E bash
+curl -1sLf 'https://downloads.enterprisedb.com///setup.rpm.sh' | sudo -E bash
sudo yum install tpaexec
```
@@ -58,7 +61,8 @@ More detailed explanations of each step are given below.
## Where to install TPA
As long as you are using a supported platform, TPA can be installed and
-run from your workstation. This is fine for learning, local testing or demonstration purposes. TPA supports [deploying to Docker containers](platform-docker/)
+run from your workstation. This is fine for learning, local testing or
+demonstration purposes. TPA supports [deploying to Docker containers](platform-docker/)
should you wish to perform a complete deployment on your own workstation.
For production use, we recommend running TPA on a dedicated, persistent
@@ -75,19 +79,21 @@ provides it. The preferred source for repositories is EDB Repos 2.0.
Login to [EDB Repos 2.0](https://www.enterprisedb.com/repos-downloads)
to obtain your token. Then execute the following command, substituting
-your token for ``.
+your token for `` and replacing `` with
+one of the following according to which EDB plan you are subscribed:
+`enterprise`, `standard`, `community360`, `postgres_distributed`.
#### Add repository on Debian or Ubuntu
```bash
-curl -1sLf 'https://downloads.enterprisedb.com//postgres_distributed/setup.deb.sh' | sudo -E bash
+curl -1sLf 'https://downloads.enterprisedb.com///setup.deb.sh' | sudo -E bash
```
#### Add repository on RHEL, Rocky, AlmaLinux or Oracle Linux
```bash
-curl -1sLf 'https://downloads.enterprisedb.com//postgres_distributed/setup.rpm.sh' | sudo -E bash
+curl -1sLf 'https://downloads.enterprisedb.com///setup.rpm.sh' | sudo -E bash
```
Alternatively, you may obtain TPA from the legacy 2ndQuadrant
diff --git a/product_docs/docs/tpa/23/architecture-M1.mdx b/product_docs/docs/tpa/23/architecture-M1.mdx
index ffa6345b1c4..ca8b39ddccf 100644
--- a/product_docs/docs/tpa/23/architecture-M1.mdx
+++ b/product_docs/docs/tpa/23/architecture-M1.mdx
@@ -6,15 +6,12 @@ originalFilePath: architecture-M1.md
A Postgres cluster with a primary and a streaming replica, one Barman
server, and any number of additional replicas cascaded from the first
-one. This architecture is suitable for testing, demonstrating and
-learning. We plan to release a production primary/standby architecture
-for TPA in the near future.
-
-In default configuration this architecture uses open source software
-only. To use subscription-only EDB software with this architecture
-requires credentials for EDB Repos 1.0. If you choose EDB Advanced
-Server (EPAS) you will also require credentials for the legacy
-2ndQuadrant repos.
+one. This architecture is suitable for production and is also suited to
+testing, demonstrating and learning due to its simplicity and ability to
+be configured with no proprietary components.
+
+If you select subscription-only EDB software with this architecture
+it will be sourced from EDB Repos 2.0 and you will need to provide a token.
See [How TPA uses 2ndQuadrant and EDB repositories](reference/2q_and_edb_repositories/)
for more detail on this topic.
@@ -32,6 +29,26 @@ additionally configured as a witness. This ensures that the
number of nodes is always odd, which is convenient when
enabling automatic failover.
+## Application and backup failover
+
+The M1 architecture implements failover management in that it ensures
+that a replica will be promoted to take the place of the primary should
+the primary become unavailable. However it *does not provide any
+automatic facility to reroute application traffic to the primary*. If
+you require, automatic failover of application traffic you will need to
+configure this at the application itself (for example using multi-host
+connections) or by using an appropriate proxy or load balancer and the
+facilities offered by your selected failover manager.
+
+The above is also true of the connection between the backup node and the
+primary created by TPA. The backup will not be automatically adjusted to
+target the new primary in the event of failover, instead it will remain
+connected to the original primary. If you are performing a manual
+failover and wish to connect the backup to the new primary, you may
+simply re-run `tpaexec deploy`. If you wish to automatically change the
+backup source, you should implement this using your selected failover
+manager as noted above.
+
## Cluster configuration
### Overview of configuration options
@@ -44,7 +61,8 @@ tpaexec configure ~/clusters/m1 \
--architecture M1 \
--platform aws --region eu-west-1 --instance-type t3.micro \
--distribution Debian \
- --postgresql 14
+ --postgresql 14 \
+ --failover-manager repmgr
```
You can list all available options using the help command.
@@ -59,25 +77,22 @@ More detail on the options is provided in the following section.
#### Mandatory Options
-| Parameter | Description |
-| ----------------------------------------------------- | ----------------------------------------------------------------------------------------- |
-| `--architecture` (`-a`) | Must be set to `M1`. |
-| Postgres flavour and version (e.g. `--postgresql 15`) | A valid [flavour and version specifier](tpaexec-configure/#postgres-flavour-and-version). |
+| Parameter | Description |
+| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+| `--architecture` (`-a`) | Must be set to `M1`. |
+| Postgres flavour and version (e.g. `--postgresql 15`) | A valid [flavour and version specifier](tpaexec-configure/#postgres-flavour-and-version). |
+| One of: * `--failover-manager {efm, repmgr, patroni}` * `--enable-efm` * `--enable-repmgr` * `--enable-patroni` | Select the failover manager from [`efm`](reference/efm/), [`repmgr`](reference/repmgr/) and [`patroni`](reference/patroni/). |
#### Additional Options
-| Parameter | Description | Behaviour if omitted |
-| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
-| `--platform` | One of `aws`, `docker`, `bare`. | Defaults to `aws`. |
-| `--num-cascaded-replicas` | The number of cascaded replicas from the first replica. | Defaults to 1. |
-| `--failover-manager` | Select the failover manager from `efm`, `repmgr` and `patroni`. | TPA will select EFM as the failover manager for EPAS, and repmgr for all other flavours. |
-| `--enable-efm` | Configure Failover Manager as the cluster failover manager. | See `--failover-manager` above. |
-| `--enable-repmgr` | Configure Replication Manager as the cluster failover manager. | See `--failover-manager` above. |
-| `--enable-patroni` | Configure Patroni as the cluster failover manager. | See `--failover-manager` above. |
-| `--enable-haproxy` | 2 additional nodes will be added as a load balancer layer. Only supported with Patroni as the failover manager. | HAproxy nodes will not be added to the cluster. |
-| `--patroni-dcs` | Select the Distributed Configuration Store backend for patroni. Only option is `etcd` at this time. Only supported with Patroni as the failover manager. | Defaults to `etcd`. |
+| Parameter | Description | Behaviour if omitted |
+| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------- |
+| `--platform` | One of `aws`, `docker`, `bare`. | Defaults to `aws`. |
+| `--num-cascaded-replicas` | The number of cascaded replicas from the first replica. | Defaults to 1. |
+| `--enable-haproxy` | 2 additional nodes will be added as a load balancer layer. Only supported with Patroni as the failover manager. | HAproxy nodes will not be added to the cluster. |
+| `--patroni-dcs` | Select the Distributed Configuration Store backend for patroni. Only option is `etcd` at this time. Only supported with Patroni as the failover manager. | Defaults to `etcd`. |
diff --git a/product_docs/docs/tpa/23/index.mdx b/product_docs/docs/tpa/23/index.mdx
index 01355e1f8eb..f7dfb75ecfb 100644
--- a/product_docs/docs/tpa/23/index.mdx
+++ b/product_docs/docs/tpa/23/index.mdx
@@ -95,7 +95,8 @@ TPA operates in four distinct stages to bring up a Postgres cluster:
```bash
# 1. Configuration: decide what kind of cluster you want
[tpa]$ tpaexec configure clustername --architecture M1 --platform aws \
- --postgresql 14
+ --postgresql 14 \
+ --failover-manager repmgr
# 2. Provisioning: create the servers needed to host the cluster
[tpa]$ tpaexec provision clustername
@@ -221,7 +222,33 @@ designed to make life simpler, but there is no hidden magic or anything
standing in the way between you and the database. You can do everything
on a TPA cluster that you could do on any other Postgres installation.
-## Getting started
+## Versioning in TPA
+
+TPA previously used a date-based versioning scheme whereby the major
+version was derived from the year. From version 23 we have moved to a
+derivative of semantic versioning. For historical reasons, we are not
+using the full three-part semantic version number. Instead TPA uses a
+two-part `major.minor` format. The minor version is incremented on every
+release, the major version is only incremented where required to comply
+with the backward compatibility principle below.
+
+### Backwards compatibility
+
+A key development principle of TPA is to maintain backwards
+compatibility so there is no reason for users to need anything other
+than the latest version of TPA. We define backwards compatibility as
+follows:
+
+- A config.yml created with TPA X.a will be valid with TPA X.b where
+ b>=a
+- The cluster created from that config.yml will be maintainable and
+ re-deployable with TPA X.b
+
+ Therefore, a new major version implies a break in backward
+ compatibility. As such, we aim to avoid releasing major versions and
+ will only do so in exceptional circumstances.
+
+ ## Getting started
Follow the [TPA installation instructions](INSTALL/) for your
system, then [configure your first cluster](tpaexec-configure/).
diff --git a/product_docs/docs/tpa/23/reference/2q_and_edb_repositories.mdx b/product_docs/docs/tpa/23/reference/2q_and_edb_repositories.mdx
index 19815ae4f4e..dd191b340ed 100644
--- a/product_docs/docs/tpa/23/reference/2q_and_edb_repositories.mdx
+++ b/product_docs/docs/tpa/23/reference/2q_and_edb_repositories.mdx
@@ -67,7 +67,11 @@ If the PGD-Always-ON architecture is selected, repositories will be
selected from EDB Repos 2.0 and all software will be sourced
from these repositories.
-For M1 and BDR-Always-ON architectures, the default source is
+If the M1 architecture is selected and no proprietary EDB software is
+selected, all packages will be sourced from PGDG. If any proprietary EDB
+software is selected, all packages will be sourced from EDB Repos 2.0.
+
+For the BDR-Always-ON architecture, the default source is
2ndQuadrant, and the necessary repositories will be added from this
source. In addition, the PGDG repositories will be used for community
packages such as PostgreSQL and etcd as required.
diff --git a/product_docs/docs/tpa/23/reference/INSTALL-repo.mdx b/product_docs/docs/tpa/23/reference/INSTALL-repo.mdx
index a6aa71d9b15..6d762c6d8a7 100644
--- a/product_docs/docs/tpa/23/reference/INSTALL-repo.mdx
+++ b/product_docs/docs/tpa/23/reference/INSTALL-repo.mdx
@@ -7,15 +7,15 @@ originalFilePath: INSTALL-repo.md
This document explains how to use TPA from a copy of the source code
repository.
-Please [install TPA from packages](../INSTALL/) if you can; install
-from source only if no packages are available for your system (e.g., on
-MacOS X), or if you are collaborating with the TPA developers to
-test unreleased code.
+!!! Note
+
+ EDB customers must [install TPA from packages](../INSTALL/) in
+ order to receive EDB support for the software.
To run TPA from source, you must install all of the dependencies
(e.g., Python 3.6+) that the packages would handle for you, or download
the source and [run TPA in a Docker container](INSTALL-docker/).
-(Either way will work fine on Linux and MacOS X.)
+(Either way will work fine on Linux and macOS.)
## Quickstart
@@ -25,8 +25,8 @@ something other than `sudo` to run these commands as root, if you
prefer.)
```bash
-# Debian (python3.7) or Ubuntu (python3.6)
-$ sudo apt-get install python3.7 python3-pip python3-venv \
+# Debian or Ubuntu
+$ sudo apt-get install python3 python3-pip python3-venv \
git openvpn patch
# RedHat, Rocky or AlmaLinux (python3 for RHEL7, python36 for RHEL8)
diff --git a/product_docs/docs/tpa/23/reference/patroni.mdx b/product_docs/docs/tpa/23/reference/patroni.mdx
index 5c664de68a3..8e88c4b1097 100644
--- a/product_docs/docs/tpa/23/reference/patroni.mdx
+++ b/product_docs/docs/tpa/23/reference/patroni.mdx
@@ -18,6 +18,15 @@ cluster_vars:
failover_manager: patroni
```
+If deploying to RedHat you must also add the `PGDG` repository to your
+yum repository list in config.yml:
+
+```yaml
+cluster_vars:
+ yum_repository_list:
+ - PGDG
+```
+
TPA `configure` will add 3 etcd nodes and 2 haproxy nodes. Etcd is used
for the Distributed Configuration Store (DCS). Patroni supports other
DCS backends, but they are not currently supported by EDB or TPA.
diff --git a/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx b/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx
index b0dbfa12247..241fe346bf2 100644
--- a/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx
+++ b/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx
@@ -61,3 +61,21 @@ cluster-dir/
You can use this in the cluster as is or copy it to a target control
node. See [recommendations for installing to an air-gapped environment](air-gapped/). A [local-repo](local-repo/) will be detected and used
automatically by TPA.
+
+## Cleaning up failed downloader container
+
+If there is an error during the download process, the command will leave
+behind the downloader container running to help with debugging. For
+instance you may want to log in to the failed downloader container to
+inspect logs or networking. Downloader container is typically named
+$cluster_name-downloader unless it exceeds the allowed limit of 64
+characters for the container name. You can check for the exact name by
+running `docker ps` to list the running containers and look for a container
+name that matches your cluster name. In most cases you can log in to the
+running container by executing `docker exec -it $cluster_name-downloader /bin/bash`.
+After the inspection, you can clean up the left over container by running the
+`download-packages` command with `--tags cleanup`. For example:
+
+```shell
+tpaexec download-packages cluster-dir --docker-image tpa/redhat:8 --tags cleanup
+```
diff --git a/product_docs/docs/tpa/23/reference/tpaexec-info.mdx b/product_docs/docs/tpa/23/reference/tpaexec-info.mdx
new file mode 100644
index 00000000000..8ef67a153d4
--- /dev/null
+++ b/product_docs/docs/tpa/23/reference/tpaexec-info.mdx
@@ -0,0 +1,48 @@
+---
+title: tpaexec info
+originalFilePath: tpaexec-info.md
+
+---
+
+You can use the info command to output information about the TPA installation.
+Providing this information is valuable for troubleshooting.
+
+## Usage
+
+- Run `tpaexec info`
+
+### Subcommands
+
+- `tpaexec info version`
+
+ Displays current TPA version
+
+- `tpaexec info platforms`
+
+ Displays available deployment platforms
+
+- `tpaexec info architectures`
+
+ Displays available deployment architectures
+
+- `tpaexec info platforms/`
+
+ Displays information about a particular platform
+
+- `tpaexec info architectures/`
+
+ Displays information about a particular architecture
+
+## Example Output
+
+The `tpaexec info` command outputs the following:
+
+```bash
+# TPAexec 23.18.18
+tpaexec=./tpaexec
+TPA_DIR=/opt/EDB/TPA
+PYTHON=/opt/EDB/TPA/tpa-venv/bin/python3 (v3.7.3, venv)
+TPA_VENV=/opt/EDB/TPA/tpa-venv
+ANSIBLE=/opt/EDB/TPA/tpa-venv/bin/ansible (v2.9.27)
+Validation: e05e5302cd357b8ddbb042b7591bf66dfa283213ccbe5073b2cff3c783be1310 [OK]
+```
diff --git a/product_docs/docs/tpa/23/rel_notes/index.mdx b/product_docs/docs/tpa/23/rel_notes/index.mdx
index 6c170e0878d..a0aad80b43e 100644
--- a/product_docs/docs/tpa/23/rel_notes/index.mdx
+++ b/product_docs/docs/tpa/23/rel_notes/index.mdx
@@ -2,6 +2,8 @@
title: Trusted Postgres Architect release notes
navTitle: "Release notes"
navigation:
+ - tpa_23.22_rel_notes
+ - tpa_23.21_rel_notes
- tpa_23.20_rel_notes
- tpa_23.19_rel_notes
- tpa_23.18_rel_notes
@@ -18,7 +20,9 @@ The Trusted Postgres Architect documentation describes the latest version of Tru
| Version | Release date |
| ---------------------------- | ------------ |
-| [23.20](tpa_23.20_rel_notes) | 1 Aug 2023 |
+| [23.22](tpa_23.22_rel_notes) | 06 Sep 2023 |
+| [23.21](tpa_23.21_rel_notes) | 05 Sep 2023 |
+| [23.20](tpa_23.20_rel_notes) | 01 Aug 2023 |
| [23.19](tpa_23.19_rel_notes) | 12 Jul 2023 |
| [23.18](tpa_23.18_rel_notes) | 23 May 2023 |
| [23.17](tpa_23.17_rel_notes) | 10 May 2023 |
diff --git a/product_docs/docs/tpa/23/rel_notes/tpa_23.21_rel_notes.mdx b/product_docs/docs/tpa/23/rel_notes/tpa_23.21_rel_notes.mdx
new file mode 100644
index 00000000000..e35f65e0a93
--- /dev/null
+++ b/product_docs/docs/tpa/23/rel_notes/tpa_23.21_rel_notes.mdx
@@ -0,0 +1,19 @@
+---
+title: Trusted Postgres Architect 23.21 release notes
+navTitle: "Version 23.21"
+---
+
+Released: 5 Sep 2023
+
+
+New features, enhancements, bug fixes, and other changes in Trusted Postgres Architect 23.21 include the following:
+
+| Type | Description |
+| ---- |------------ |
+| Change | The default M1 configuration now uses EDB Repos 2.0 if any EDB software is selected, otherwise PGDG is used. This only affects new clusters. |
+| Change | You must now choose a failover manager explicitly when running `tpaexec configure` with the M1 architecture. |
+| Bug fix | Fixed an issue with creation of PGD subscriber-only nodes whereby TPA incorrectly required 'subscriber-only' to be set on the replica instead of the upstream instance. |
+| Bug fix | TPA will now skip inapplicable tasks when deploying to containers even if you are using the 'bare' platform option (previously these were skipped only if 'docker' was selected). |
+| Bug fix | Fixed an issue with permissions on `/etc/edb` whereby if you added the pgd-proxy role to a data node in a deployed PGD5 cluster, pgd-proxy would fail to start because it did not have permissions to open pgd-proxy-config.yml. |
+| Bug fix | Fixed an issue whereby `/var/log/postgres` could end up with inappropriate permissions (0600) if a strict umask was set |
+| Bug fix | Fixed an issue whereby repeating `tpaexec deploy` on a Barman instance correctly registered with PEM would lose the PEM Agent Barman configuration. |
diff --git a/product_docs/docs/tpa/23/rel_notes/tpa_23.22_rel_notes.mdx b/product_docs/docs/tpa/23/rel_notes/tpa_23.22_rel_notes.mdx
new file mode 100644
index 00000000000..218d509361e
--- /dev/null
+++ b/product_docs/docs/tpa/23/rel_notes/tpa_23.22_rel_notes.mdx
@@ -0,0 +1,13 @@
+---
+title: Trusted Postgres Architect 23.22 release notes
+navTitle: "Version 23.22"
+---
+
+Released: 6 Sep 2023
+
+
+New features, enhancements, bug fixes, and other changes in Trusted Postgres Architect 23.22 include the following:
+
+| Type | Description |
+| ---- |------------ |
+| Change | TPA is now an open source project! You can clone the source under the GPLv3 license from [GitHub](https://github.com/EnterpriseDB/tpa). |
diff --git a/product_docs/docs/tpa/23/tpaexec-configure.mdx b/product_docs/docs/tpa/23/tpaexec-configure.mdx
index 34ee0098b17..d1e72dd042d 100644
--- a/product_docs/docs/tpa/23/tpaexec-configure.mdx
+++ b/product_docs/docs/tpa/23/tpaexec-configure.mdx
@@ -13,7 +13,8 @@ cycle.
```bash
[tpa]$ tpaexec configure ~/clusters/speedy --architecture M1 \
- --postgresql 14
+ --postgresql 14 \
+ --failover-manager repmgr
```
This command will create a directory named `~/clusters/speedy` and
@@ -416,7 +417,8 @@ Let's see what happens when we run the following command:
--platform aws --region us-east-1 --network 10.33.0.0/16 \
--instance-type t2.medium --root-volume-size 32 \
--postgres-volume-size 64 --barman-volume-size 128 \
- --postgresql 14
+ --postgresql 14 \
+ --failover-manager repmgr
[tpa]$
```
diff --git a/scripts/source/pglogical2.js b/scripts/source/pglogical2.js
index f3596fdc620..6d522df7d1d 100644
--- a/scripts/source/pglogical2.js
+++ b/scripts/source/pglogical2.js
@@ -2,23 +2,22 @@
// purpose:
// Import and convert the pglogical2 docs from https://raw.githubusercontent.com/2ndQuadrant/pglogical/REL2_x_STABLE/docs/README.md, rendering them in /advocacy_docs/supported-open-source/pglogical2/
//
-const path = require("path");
-const fs = require("fs/promises");
-const https = require("https");
-const { read, write } = require("to-vfile");
-const remarkParse = require("remark-parse");
-const mdx = require("remark-mdx");
-const unified = require("unified");
-const remarkFrontmatter = require("remark-frontmatter");
-const remarkStringify = require("remark-stringify");
-const admonitions = require("remark-admonitions");
-const yaml = require("js-yaml");
-const visit = require("unist-util-visit");
-const visitAncestors = require("unist-util-visit-parents");
-const mdast2string = require("mdast-util-to-string");
-const { exec } = require("child_process");
-const isAbsoluteUrl = require("is-absolute-url");
-const slugger = require("github-slugger");
+import path from "path";
+import fs from "fs/promises";
+import https from "https";
+import pkg from 'to-vfile';
+const {write, read} = pkg;
+import remarkParse from "remark-parse";
+import mdx from "remark-mdx";
+import unified from "unified";
+import remarkFrontmatter from "remark-frontmatter";
+import remarkStringify from "remark-stringify";
+import admonitions from "remark-admonitions";
+import yaml from "js-yaml";
+import visit from "unist-util-visit";
+import visitAncestors from "unist-util-visit-parents";
+import mdast2string from "mdast-util-to-string";
+import slugger from "github-slugger";
const outputFiles = [];
const source = new URL(
@@ -26,9 +25,13 @@ const source = new URL(
);
const originalSource =
"https://github.com/2ndQuadrant/pglogical/blob/REL2_x_STABLE/docs/README.md?plain=1";
-const destination = path.resolve(
+const docsRoot = path.resolve(
process.argv[1],
- "../../../advocacy_docs/supported-open-source/pglogical2/",
+ "../../../",
+);
+const destination = path.resolve(
+ docsRoot,
+ "advocacy_docs/supported-open-source/pglogical2/",
);
(async () => {
@@ -104,7 +107,7 @@ function pglogicalTransformer() {
metadata: {
title: title,
product: "pglogical 2",
- generatedBy: `${process.argv[1]} - re-run to regenerate from originalFilePath`,
+ generatedBy: `${path.relative(docsRoot, process.argv[1])} - re-run to regenerate from originalFilePath`,
},
data: {
type: "root",
diff --git a/src/components/feedback-dropdown.js b/src/components/feedback-dropdown.js
new file mode 100644
index 00000000000..0d664121705
--- /dev/null
+++ b/src/components/feedback-dropdown.js
@@ -0,0 +1,70 @@
+import React, { useEffect, useState } from "react";
+import { Dropdown, DropdownButton } from "react-bootstrap";
+import { useStaticQuery, graphql } from "gatsby";
+import Icon, { iconNames } from "./icon";
+
+export const FeedbackDropdown = ({ githubIssuesLink }) => {
+ const data = useStaticQuery(graphql`
+ {
+ edbGit {
+ docsRepoUrl
+ branch
+ sha
+ }
+ }
+ `);
+
+ // add the last commit SHA to paths dynamically to minimize page changes
+ const [url, setUrl] = useState();
+ useEffect(() => {
+ if (githubIssuesLink)
+ setUrl(
+ githubIssuesLink.replace(
+ encodeURIComponent(
+ `${data.edbGit.docsRepoUrl}/commits/${data.edbGit.branch}/`,
+ ),
+ encodeURIComponent(
+ `${data.edbGit.docsRepoUrl}/commits/${data.edbGit.sha}/`,
+ ),
+ ),
+ );
+ }, [
+ githubIssuesLink,
+ data.edbGit.docsRepoUrl,
+ data.edbGit.branch,
+ data.edbGit.sha,
+ ]);
+
+ return (
+
+ }
+ >
+
+ Report a problem
+
+
+ Give product feedback
+
+
+ );
+};
diff --git a/src/components/footer.js b/src/components/footer.js
index c020ff322fb..23e2f0368ea 100644
--- a/src/components/footer.js
+++ b/src/components/footer.js
@@ -1,26 +1,54 @@
-import React from "react";
+import React, { useEffect, useState } from "react";
import { Link } from "./";
+import { useStaticQuery, graphql } from "gatsby";
const TimestampLink = ({ timestamp, githubFileLink }) => {
+ const data = useStaticQuery(graphql`
+ {
+ edbGit {
+ docsRepoUrl
+ branch
+ sha
+ }
+ }
+ `);
+
+ // add the last commit SHA to paths dynamically to minimize page changes
+ const [url, setUrl] = useState(githubFileLink);
+ useEffect(() => {
+ if (githubFileLink)
+ setUrl(
+ githubFileLink.replace(
+ `${data.edbGit.docsRepoUrl}/commits/${data.edbGit.branch}/`,
+ `${data.edbGit.docsRepoUrl}/commits/${data.edbGit.sha}/`,
+ ),
+ );
+ }, [
+ githubFileLink,
+ data.edbGit.docsRepoUrl,
+ data.edbGit.branch,
+ data.edbGit.sha,
+ ]);
+
if (timestamp) {
return (
<>
·
>
);
diff --git a/src/components/layout.js b/src/components/layout.js
index 6f2cee0e9c3..541df4a1b64 100644
--- a/src/components/layout.js
+++ b/src/components/layout.js
@@ -53,6 +53,7 @@ const Layout = ({
to={href}
pageUrl={meta.path}
pageIsIndex={meta.isIndexPage}
+ productVersions={meta.productVersions}
{...rest}
/>
),
@@ -95,7 +96,7 @@ const Layout = ({
Archive,
AuthenticatedContentPlaceholder,
}),
- [katacodaPanelData, meta.path, meta.isIndexPage],
+ [katacodaPanelData, meta.path, meta.isIndexPage, meta.productVersions],
);
return (
diff --git a/src/components/left-nav.js b/src/components/left-nav.js
index 4de191470a0..b94ac7be931 100644
--- a/src/components/left-nav.js
+++ b/src/components/left-nav.js
@@ -87,7 +87,7 @@ const LeftNav = ({
{navTree.items.map((node) => (
diff --git a/src/components/link.js b/src/components/link.js
index 8b48064cba6..c1d071d7270 100644
--- a/src/components/link.js
+++ b/src/components/link.js
@@ -35,7 +35,7 @@ const hasNonMarkdownExtension = (url) => {
);
};
-const rewriteUrl = (url, pageUrl, pageIsIndex, pathPrefix) => {
+const rewriteUrl = (url, pageUrl, pageIsIndex, productVersions, pathPrefix) => {
if (!pageUrl) return forceTrailingSlash(url);
// consistent behavior while authoring: base path for relative links
@@ -57,10 +57,23 @@ const rewriteUrl = (url, pageUrl, pageIsIndex, pathPrefix) => {
let resultHref = result.href.replace(/^loc:/, "");
resultHref = stripPathPrefix(resultHref, pathPrefix);
resultHref = stripMarkdownExtension(resultHref);
+
+ // if this looks like a versioned product link that points at the latest version, rewrite to the "latest" path
+ // this avoids depending on redirects (which won't play well with client-side nav)
+ const splitPath = resultHref.split("/");
+ if (
+ productVersions &&
+ productVersions[splitPath[1]] &&
+ productVersions[splitPath[1]][0] === splitPath[2]
+ ) {
+ splitPath[2] = "latest";
+ resultHref = splitPath.join("/");
+ }
+
return forceTrailingSlash(resultHref);
};
-const Link = ({ to, pageUrl, pageIsIndex, ...rest }) => {
+const Link = ({ to, pageUrl, pageIsIndex, productVersions, ...rest }) => {
const pathPrefix = usePathPrefix();
if (
@@ -74,7 +87,13 @@ const Link = ({ to, pageUrl, pageIsIndex, ...rest }) => {
);
} else {
- const outputUrl = rewriteUrl(to, pageUrl, pageIsIndex, pathPrefix);
+ const outputUrl = rewriteUrl(
+ to,
+ pageUrl,
+ pageIsIndex,
+ productVersions,
+ pathPrefix,
+ );
return ;
}
};
diff --git a/src/components/prev-next.js b/src/components/prev-next.js
index ba4fc5dcfcb..02fb7270674 100644
--- a/src/components/prev-next.js
+++ b/src/components/prev-next.js
@@ -1,7 +1,7 @@
import React from "react";
import { Link } from "./";
-const PrevNext = ({ prevNext, path, depth, depthLimit = 3 }) => {
+const PrevNext = ({ prevNext, depth, depthLimit = 3 }) => {
let prevLink = prevNext.prev;
let nextLink = prevNext.next;
if (depth <= depthLimit) prevLink = null;
diff --git a/src/constants/gatsby-utils.js b/src/constants/gatsby-utils.js
index 115ecb32255..66817d6cdb9 100644
--- a/src/constants/gatsby-utils.js
+++ b/src/constants/gatsby-utils.js
@@ -2,6 +2,9 @@ const fs = require("fs");
const asyncFs = require("fs/promises");
const path = require("path");
+const isGHBuild = !!process.env.GITHUB_HEAD_REF;
+const ghBranch = process.env.GITHUB_HEAD_REF || process.env.GITHUB_REF;
+
const sortVersionArray = (versions) => {
return versions.sort((a, b) =>
b.localeCompare(a, undefined, { numeric: true }),
@@ -31,9 +34,6 @@ const removeTrailingSlash = (url) => {
return url;
};
-const isPathAnIndexPage = (filePath) =>
- filePath.endsWith("/index.mdx") || filePath === "index.mdx";
-
const removeNullEntries = (obj) => {
if (!obj) return obj;
for (const [key, value] of Object.entries(obj)) {
@@ -255,7 +255,7 @@ const findPrevNextNavNodes = (navTree, currNode) => {
};
const preprocessPathsAndRedirects = (nodes, productVersions) => {
- const validPaths = new Set();
+ const validPaths = new Map();
for (let node of nodes) {
const nodePath = node.fields?.path;
if (!nodePath) continue;
@@ -264,20 +264,26 @@ const preprocessPathsAndRedirects = (nodes, productVersions) => {
node.fields.docType === "doc" &&
productVersions[node.fields.product][0] === node.fields.version;
const nodePathLatest = isLatest && replacePathVersion(nodePath);
+ const addPath = (url) => {
+ let value = validPaths.get(url);
+ if (!value) validPaths.set(url, (value = []));
+ value.push({
+ urlpath: nodePathLatest || nodePath,
+ filepath: node.fileAbsolutePath,
+ });
+ };
- validPaths.add(nodePath);
- if (isLatest) validPaths.add(nodePathLatest);
+ addPath(nodePath);
+ if (isLatest) {
+ addPath(nodePathLatest);
+ // from here on, the "latest" path *is* the canonical path
+ node.fields.path = nodePathLatest;
+ }
const redirects = node.frontmatter?.redirects;
if (!redirects || !redirects.length) continue;
const newRedirects = new Set();
- const addNewRedirect = (redirect) => {
- if (validPaths.has(redirect))
- console.warn(`Redirect ${redirect} for page ${nodePath} matches the path of a page or redirect already added and will be ignored!
-::warning file=${node.fileAbsolutePath},title=Overlapping redirect::Redirect matches another redirect or page path, ${redirect}`);
- newRedirects.add(redirect);
- };
for (let redirect of redirects) {
if (!redirect) continue;
@@ -306,35 +312,55 @@ const preprocessPathsAndRedirects = (nodes, productVersions) => {
path.sep,
);
- if (fromPath !== nodePath) addNewRedirect(fromPath);
+ if (fromPath !== nodePath) newRedirects.add(fromPath);
}
- for (let redirect of newRedirects) validPaths.add(redirect);
+ for (let redirect of newRedirects) addPath(redirect);
- node.frontmatter.redirects = [...newRedirects];
+ node.frontmatter.redirects = [...newRedirects.keys()];
}
return validPaths;
};
-const configureRedirects = (
- toPath,
- redirects,
- actions,
- isLatest,
- pathVersions,
-) => {
+const configureRedirects = (productVersions, node, validPaths, actions) => {
+ const toPath = node.fields.path;
+ const redirects = node.frontmatter.redirects || [];
+ const versions =
+ node.fields.docType === "doc"
+ ? productVersions[node.fields.product] || []
+ : [];
+
+ // all versions for this path.
+ // Null entries for versions that don't exist. Will try to match redirects to avoid this, but won't follow redirect chains
+ // Canonical version is the first non-null in the list, e.g. pathVersions.filter((p) => !!p)[0]
+ const allPaths = [node.fields.path, ...(redirects || [])];
+ const pathVersions = versions.map((v, i) => {
+ const versionPaths = allPaths.map((p) => replacePathVersion(p, v));
+ const match = versionPaths.find((vp) => validPaths.has(vp));
+ if (!match) return null;
+ const sources = validPaths.get(match);
+ // this is problematic situation: multiple sources (pages, redirects) exist for this version
+ // the first one will usually "win" - unless one is a page, in which case that will win.
+ // we'll warn about this later on
+ return (
+ sources.find((p) => p.urlpath === match)?.urlpath || sources[0].urlpath
+ );
+ });
+
+ const splitToPath = toPath.split(path.sep);
+ const isLatest = splitToPath[2] === "latest";
const lastVersionPath = pathVersions.find((p) => !!p);
const isLastVersion = toPath === lastVersionPath;
+
// latest version should always redirect to .../latest/...
if (isLatest) {
actions.createRedirect({
- fromPath: toPath,
- toPath: replacePathVersion(toPath),
- redirectInBrowser: true,
+ fromPath: replacePathVersion(toPath, versions[0]),
+ toPath: toPath,
+ redirectInBrowser: false,
isPermanent: false,
force: true,
});
- toPath = replacePathVersion(toPath);
}
// if this path is a dead-end (it does not exist in any newer versions
// of the product, and also does not have a matching redirect in any newer versions)
@@ -344,14 +370,11 @@ const configureRedirects = (
actions.createRedirect({
fromPath: replacePathVersion(toPath),
toPath,
- redirectInBrowser: true,
+ redirectInBrowser: false,
isPermanent: false,
});
}
- if (!redirects) return;
-
- const splitToPath = toPath.split(path.sep);
for (let fromPath of redirects) {
if (!fromPath) continue;
if (fromPath !== toPath) {
@@ -361,7 +384,7 @@ const configureRedirects = (
actions.createRedirect({
fromPath,
toPath,
- redirectInBrowser: true,
+ redirectInBrowser: false,
isPermanent,
});
}
@@ -401,16 +424,80 @@ const configureRedirects = (
// /epas/latest/B -> /epas/latest/C
const toIsLatest = isLatest || isLastVersion;
if (toIsLatest) {
- fromPath = replacePathVersion(fromPath);
- if (fromPath !== toPath)
+ const fromPathLatest = replacePathVersion(fromPath);
+ if (fromPathLatest !== fromPath && fromPathLatest !== toPath) {
+ let value = validPaths.get(fromPathLatest);
+ if (!value) validPaths.set(fromPathLatest, (value = []));
+ value.push({
+ urlpath: toPath,
+ filepath: node.fileAbsolutePath,
+ });
actions.createRedirect({
- fromPath,
+ fromPath: fromPathLatest,
toPath,
- redirectInBrowser: true,
+ redirectInBrowser: false,
isPermanent: false,
});
+ }
+ }
+ }
+
+ return pathVersions;
+};
+
+const reportRedirectCollisions = (validPaths, reporter) => {
+ let collisionCount = 0,
+ sourceCount = 0;
+ for (const [urlpath, sources] of validPaths) {
+ if (sources.length <= 1) continue;
+
+ collisionCount += 1;
+ sourceCount += sources.length;
+
+ for (const source of sources) {
+ if (source.urlpath === urlpath) continue;
+ if (isGHBuild) {
+ let list = sources
+ .filter((s) => s !== source)
+ .map((existing) => {
+ const existingIsRedirect = existing.urlpath !== urlpath;
+ return ` - ${
+ existingIsRedirect ? "redirect" : "page"
+ } at https://github.com/EnterpriseDB/docs/blob/${ghBranch}/${path.relative(
+ process.cwd(),
+ existing.filepath,
+ )}`;
+ })
+ .join("\n");
+ reporter.warn(`
+::warning file=${
+ source.filepath
+ },title=Overlapping redirect found in::Redirect ${urlpath} also matches ${(
+ "\n" + list
+ )
+ .replace(/%/g, "%25")
+ .replace(/\r/g, "%0D")
+ .replace(/\n/g, "%0A")}`);
+ } else {
+ let list = sources
+ .filter((s) => s !== source)
+ .map((existing) => {
+ const existingIsRedirect = existing.urlpath !== urlpath;
+ return ` - ${
+ existingIsRedirect ? "redirect" : "page"
+ } at ${path.relative(process.cwd(), existing.filepath)}`;
+ })
+ .join("\n");
+ reporter.warn(`Redirect ${urlpath} for page ${source.filepath} matches the path of
+${list}`);
+ break; // reduce noise: only report once for each collision on non-CI builds
+ }
}
}
+
+ reporter.info(
+ `redirects: ${collisionCount} collisions across ${sourceCount} locations`,
+ );
};
const convertLegacyDocsPathToLatest = (fromPath) => {
@@ -534,7 +621,6 @@ module.exports = {
replacePathVersion,
filePathToDocType,
removeTrailingSlash,
- isPathAnIndexPage,
removeNullEntries,
pathToDepth,
mdxNodesToTree,
@@ -546,6 +632,7 @@ module.exports = {
findPrevNextNavNodes,
preprocessPathsAndRedirects,
configureRedirects,
+ reportRedirectCollisions,
configureLegacyRedirects,
makeFileNodePublic,
readFile,
diff --git a/src/constants/utils.js b/src/constants/utils.js
index b32c0f2e949..bef256c446e 100644
--- a/src/constants/utils.js
+++ b/src/constants/utils.js
@@ -28,3 +28,6 @@ export const capitalize = (s) => {
export const getBaseUrl = (path, depth) => {
return path.split("/").slice(0, depth).join("/");
};
+
+export const isPathAnIndexPage = (filePath) =>
+ filePath.endsWith("/index.mdx") || filePath === "index.mdx";
diff --git a/src/pages/index.js b/src/pages/index.js
index 9ae45ffbb86..89f663535ac 100644
--- a/src/pages/index.js
+++ b/src/pages/index.js
@@ -95,26 +95,23 @@ const Page = () => (
-
- EDB Postgres Distributed's ready reference
+
+ Find the EPAS content you're looking for
- Use the new reference section in EDB Postgres Distributed to
- quickly look up views, catalogs, functions, and variables.
- It's a new view of the documentation designed to centralize
- essential information and speed up your development.
+ Whether you are an application programmer trying to debug your
+ programs or a DBA configuring your database, the new structure
+ of the EDB Postgres Advanced Server documentation makes
+ finding relevant content a snap.
-
+
Find out more →
diff --git a/src/templates/doc.js b/src/templates/doc.js
index 6853d2c3d17..02b69e07b44 100644
--- a/src/templates/doc.js
+++ b/src/templates/doc.js
@@ -1,6 +1,7 @@
import React from "react";
-import { Container, Row, Col, Dropdown, DropdownButton } from "react-bootstrap";
+import { Container, Row, Col } from "react-bootstrap";
import { graphql, Link } from "gatsby";
+import { isPathAnIndexPage } from "../constants/utils";
import { MDXRenderer } from "gatsby-plugin-mdx";
import {
CardDecks,
@@ -15,7 +16,7 @@ import {
TableOfContents,
} from "../components";
import { products } from "../constants/products";
-import Icon, { iconNames } from "../components/icon";
+import { FeedbackDropdown } from "../components/feedback-dropdown";
export const query = graphql`
query ($nodeId: String!) {
@@ -27,6 +28,11 @@ export const query = graphql`
}
body
tableOfContents
+ fileAbsolutePath
+ }
+ edbGit {
+ docsRepoUrl
+ branch
}
}
`;
@@ -181,56 +187,19 @@ const Section = ({ section }) => (