diff --git a/advocacy_docs/pg_extensions/index.mdx b/advocacy_docs/pg_extensions/index.mdx
index ca921194ac6..5d0ee44dbcc 100644
--- a/advocacy_docs/pg_extensions/index.mdx
+++ b/advocacy_docs/pg_extensions/index.mdx
@@ -96,7 +96,6 @@ Categories of extensions:
pglogical 2.x | | ✓ | ✓ | – | – | – | – | – | – |
pgsnmpd (11-13) | | ✓ | – | ✓ | – | – | – | – | – |
pgvector | | ✓ | ✓ | ✓ | – | – | ✓ | ✓ | ✓ |
-pljava (11) | | ✓ | – | ✓ | – | – | – | – | – |
plperl | | ✓ | ✓ | ✓ | – | – | ✓ | ✓ | ✓ |
plperlu | Yes | ✓ | ✓ | ✓ | – | – | ✓ | ✓ | ✓ |
bool_plperl | | ✓ | ✓ | ✓ | – | – | ✓ | ✓ | ✓ |
@@ -134,7 +133,7 @@ Categories of extensions:
refdata | | ✓ | ✓ | ✓ | – | – | – | ✓ | ✓ |
EDB Postgres Advanced Server extensions |
dbms_job | | – | – | ✓ | – | – | – | – | – |
-dbms_scheduler | | – | – | ✓ | – | – | – | – | Q4 2023 |
+dbms_scheduler | | – | – | ✓ | – | – | – | – | ✓ |
edb_cloneschema | | – | – | ✓ | – | – | – | – | – |
edb_dblink_libpq | | – | – | ✓ | – | ✓ | – | – | ✓ |
edb_dblink_oci | Yes | – | – | ✓ | – | ✓ | – | – | ✓ |
diff --git a/product_docs/docs/biganimal/release/using_cluster/01_postgres_access/index.mdx b/product_docs/docs/biganimal/release/using_cluster/01_postgres_access/index.mdx
index 51c5804f7fc..07ae66281b2 100644
--- a/product_docs/docs/biganimal/release/using_cluster/01_postgres_access/index.mdx
+++ b/product_docs/docs/biganimal/release/using_cluster/01_postgres_access/index.mdx
@@ -26,10 +26,15 @@ psql -W "postgres://edb_admin@xxxxxxxxx.xxxxx.biganimal.io:5432/edb_admin?sslmod
So that we can effectively manage the cloud resources and ensure users are protected against security threats, BigAnimal provides a special administrative role, pg_ba_admin. The edb_admin user is a member of the pg_ba_admin role. The pg_ba_admin role has privileges similar to a Postgres superuser. Like the edb_admin user, the pg_ba_admin role shouldn't be used for day-to-day application operations and access to the role must be controlled carefully. See [pg_ba_admin role](pg_ba_admin) for details.
### superuser
-When using your own cloud account, you can grant the edb_admin role superuser privileges for a cluster. See [Superuser access](/biganimal/latest/getting_started/creating_a_cluster/#superuser-access). If you grant superuser privileges, you must take care to limit the number of connections used by superusers to avoid degraded service or compromising availability.
+
+Superuser access in BigAnimal is available only where the users are in control of their infrastructure. When using your own cloud account, you can grant the edb_admin role superuser privileges for a cluster. See [Superuser access](/biganimal/latest/getting_started/creating_a_cluster/#superuser-access). If you grant superuser privileges, you must take care to limit the number of connections used by superusers to avoid degraded service or compromising availability.
!!! note
Superuser privileges allow you to make Postgres configuration changes using `ALTER SYSTEM` queries. We recommend that you don't do this because it might lead to an unpredictable or unrecoverable state of the cluster. In addition, `ALTER SYSTEM` changes aren't replicated across the cluster.
+
+For BigAnimal hosted and Distributed high-availability clusters, there is no superuser access option. The edb_admin role should be used for most superuser level activities, unsafe activities are not available to the edb_admin role.
+
+Distributed high-availability clusters also have a bdr_superuser role. This is *not* a general superuser, but a specific user/role that has privileges and access to all the bdr schemas and functions. For more information, see [bdr_superuser](/pgd/latest/security/roles/).
See the [PostgreSQL documentation on superusers](https://www.postgresql.org/docs/current/role-attributes.html) for best practices.
diff --git a/product_docs/docs/eprs/7/installing/windows.mdx b/product_docs/docs/eprs/7/installing/windows.mdx
index 34a021cfa52..0885654ffb9 100644
--- a/product_docs/docs/eprs/7/installing/windows.mdx
+++ b/product_docs/docs/eprs/7/installing/windows.mdx
@@ -17,9 +17,9 @@ EDB provides a graphical interactive installer for Windows. You can access it tw
## Prerequisites
-- You must have Java Runtime Environment (JRE) version 1.8 or later installed on the hosts where you intend to install any Replication Server component (Replication Console, publication server, or subscription server). You can use Any Java product, such as Oracle Java or OpenJDK. Follow the directions for your host operating system to install the Java runtime environment.
+- You must have Java Runtime Environment (JRE) version 1.8 or later installed on the hosts where you intend to install any Replication Server component (Replication Console, publication server, or subscription server). You can use any Java product, such as Oracle Java or OpenJDK. Follow the directions for your host operating system to install the Java runtime environment.
-- Be sure the system environment variable, `JAVA_HOME`, is set to the JRE installation directory of the JRE version you want to use with Replication Server. Please ensure that 64-bit version of JRE is installed and `JAVA_HOME` is set before installing Replication Server.
+- Be sure the system environment variable, `JAVA_HOME`, is set to the JRE installation directory of the JRE version you want to use with Replication Server. Make sure that the 64-bit version of JRE is installed and `JAVA_HOME` is set before installing Replication Server.
!!! Note
Replication Server doesn't support JRE installations with 32-bit.
diff --git a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.3_rel_notes.mdx b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.3_rel_notes.mdx
index 7f45cba187b..6064cc6d61c 100644
--- a/product_docs/docs/pgd/4/rel_notes/pgd_4.3.3_rel_notes.mdx
+++ b/product_docs/docs/pgd/4/rel_notes/pgd_4.3.3_rel_notes.mdx
@@ -10,7 +10,7 @@ Released: 14 Nov 2023
EDB Postgres Distributed version 4.3.3 is a patch release of EDB Postgres Distributed 4, which includes bug fixes for issues identified in previous versions.
!!! Note
-This version is required for EDB Postgres Advanced Server versions 12.14.18, 13.10.14, 14.7.0 and later.
+This version is required for EDB Postgres Advanced Server versions 12.14.18, 13.10.14, 14.10.0 and later.
!!!
| Component | Version | Type | Description |
diff --git a/product_docs/docs/pgd/5/postgres-configuration.mdx b/product_docs/docs/pgd/5/postgres-configuration.mdx
index 679e295a44c..7b4904db158 100644
--- a/product_docs/docs/pgd/5/postgres-configuration.mdx
+++ b/product_docs/docs/pgd/5/postgres-configuration.mdx
@@ -16,7 +16,7 @@ To run correctly, PGD requires these Postgres settings:
- `wal_level` — Must be set to `logical`, since PGD relies on logical decoding.
- `shared_preload_libraries` — Must include `bdr` to enable the extension. Most other
- extensions can appear before of after the bdr entry in the comma-separated list. One exception
+ extensions can appear before or after the `bdr` entry in the comma-separated list. One exception
to that is `pgaudit`, which must appear in the list before `bdr`. Also, don't include
`pglogical` in this list.
- `track_commit_timestamp` — Must be set to `on` for conflict resolution to
diff --git a/product_docs/docs/tpa/23/INSTALL-repo.mdx b/product_docs/docs/tpa/23/INSTALL-repo.mdx
index b250a6d7a4a..c9cb0d7646b 100644
--- a/product_docs/docs/tpa/23/INSTALL-repo.mdx
+++ b/product_docs/docs/tpa/23/INSTALL-repo.mdx
@@ -13,18 +13,18 @@ repository.
order to receive EDB support for the software.
To run TPA from source, you must install all of the dependencies
-(e.g., Python 3.6+) that the packages would handle for you, or download
+(e.g., Python 3.9+) that the packages would handle for you, or download
the source and [run TPA in a Docker container](reference/INSTALL-docker/).
(Either way will work fine on Linux and macOS.)
## Quickstart
-First, you must install the various dependencies Python 3, Python
+First, you must install the various dependencies Python 3, Python
venv, git, openvpn and patch. Installing from EDB repositories would
-would install these automatically along with the TPA
-packages.
+would install these automatically along with the TPA
+packages.
-Before you install TPA, you must install the required packages:
+Before you install TPA, you must install the required packages:
- **Debian/Ubuntu**
`sudo apt-get install python3 python3-pip python3-venv git openvpn patch`
- **Redhat, Rocky or AlmaLinux (RHEL7)**
`sudo yum install python3 python3-pip epel-release git openvpn patch`
@@ -64,9 +64,9 @@ You now have tpaexec installed.
## Dependencies
-### Python 3.6+
+### Python 3.9+
-TPA requires Python 3.6 or later, available on most
+TPA requires Python 3.9 or later, available on most
modern distributions. If you don't have it, you can use
[pyenv](https://github.com/pyenv/pyenv) to install any version of Python
you like without affecting the system packages.
diff --git a/product_docs/docs/tpa/23/INSTALL.mdx b/product_docs/docs/tpa/23/INSTALL.mdx
index 624376e7c64..2e262af72be 100644
--- a/product_docs/docs/tpa/23/INSTALL.mdx
+++ b/product_docs/docs/tpa/23/INSTALL.mdx
@@ -117,7 +117,7 @@ sudo yum install tpaexec
```
This will install TPA into `/opt/EDB/TPA`. It will also
-ensure that other required packages (e.g., Python 3.6 or later) are
+ensure that other required packages (e.g., Python 3.9 or later) are
installed.
We mention `sudo` here only to indicate which commands need root
diff --git a/product_docs/docs/tpa/23/reference/INSTALL-docker.mdx b/product_docs/docs/tpa/23/reference/INSTALL-docker.mdx
index c96498b79ba..c6ff69ce762 100644
--- a/product_docs/docs/tpa/23/reference/INSTALL-docker.mdx
+++ b/product_docs/docs/tpa/23/reference/INSTALL-docker.mdx
@@ -7,7 +7,7 @@ originalFilePath: INSTALL-docker.md
If you are using a system for which there are no [TPA
packages](../INSTALL/) available, and it's difficult to run TPA after
[installing from source](../INSTALL-repo/) (for example, because it's not
-easy to obtain a working Python 3.6+ interpreter), your last resort may
+easy to obtain a working Python 3.9+ interpreter), your last resort may
be to build a Docker image and run TPA inside a Docker container.
Please note that you do not need to run TPA in a Docker container in
diff --git a/product_docs/docs/tpa/23/reference/artifacts.mdx b/product_docs/docs/tpa/23/reference/artifacts.mdx
index 7408e5f3233..da0d942eb06 100644
--- a/product_docs/docs/tpa/23/reference/artifacts.mdx
+++ b/product_docs/docs/tpa/23/reference/artifacts.mdx
@@ -14,13 +14,13 @@ cluster_vars:
state: directory
owner: root
group: root
- mode: 0755
+ mode: "0755"
- type: file
src: /host/path/to/file
dest: /target/path/to/file
owner: root
group: root
- mode: 0644
+ mode: "0644"
- type: archive
src: example.tar.gz
dest: /some/target/path
diff --git a/product_docs/docs/tpa/23/reference/bdr.mdx b/product_docs/docs/tpa/23/reference/bdr.mdx
index 75dc29dbd51..a9980ca1609 100644
--- a/product_docs/docs/tpa/23/reference/bdr.mdx
+++ b/product_docs/docs/tpa/23/reference/bdr.mdx
@@ -144,7 +144,7 @@ is mentioned in `bdr_node_groups`), it will join that group instead of
### bdr_commit_scopes
This is an optional list of
-[commit scopes](https://www.enterprisedb.com/docs/pgd/latest/bdr/group-commit/)
+[commit scopes](https://www.enterprisedb.com/docs/pgd/latest/durability/group-commit/)
that must exist in the PGD database (available for PGD 4.1 and above).
```yaml
diff --git a/product_docs/docs/tpa/23/reference/distributions.mdx b/product_docs/docs/tpa/23/reference/distributions.mdx
index b4442d25e18..c4baa483295 100644
--- a/product_docs/docs/tpa/23/reference/distributions.mdx
+++ b/product_docs/docs/tpa/23/reference/distributions.mdx
@@ -28,6 +28,12 @@ TPA and target systems on which TPA deploys the Postgres cluster.
- Ubuntu 18.04/bionic is a legacy distribution
- Ubuntu 16.04/xenial is a legacy distribution
+## Oracle Linux
+
+- Oracle Linux 9.x is fully supported (docker only)
+- Oracle Linux 8.x is fully supported (docker only)
+- Oracle Linux 7.x is fully supported (docker only)
+
## RedHat x86
- RHEL/Rocky/AlmaLinux/Oracle Linux 9.x is fully supported (python3 only)
diff --git a/product_docs/docs/tpa/23/reference/reconciling-local-changes.mdx b/product_docs/docs/tpa/23/reference/reconciling-local-changes.mdx
index 73c9b6a63ea..af4b549860a 100644
--- a/product_docs/docs/tpa/23/reference/reconciling-local-changes.mdx
+++ b/product_docs/docs/tpa/23/reference/reconciling-local-changes.mdx
@@ -87,7 +87,7 @@ undone unless they are:
1. Made in the `conf.d/9999-override.conf` file reserved for manual
edits;
2. Made using `ALTER SYSTEM` SQL; or
-3. Made [natively in TPA](#postgres_conf_settings) by adding
+3. Made [natively in TPA](postgresql.conf/#postgres_conf_settings) by adding
`postgres_conf_settings`.
Other than the fact that option 3 is self-documenting and portable,
diff --git a/product_docs/docs/tpa/23/reference/tpaexec-deprovision.mdx b/product_docs/docs/tpa/23/reference/tpaexec-deprovision.mdx
index cfb4cc88efe..c0fcb31be58 100644
--- a/product_docs/docs/tpa/23/reference/tpaexec-deprovision.mdx
+++ b/product_docs/docs/tpa/23/reference/tpaexec-deprovision.mdx
@@ -1,5 +1,5 @@
---
-title: 'tpaexec deprovision'
+title: tpaexec deprovision
originalFilePath: tpaexec-deprovision.md
---
diff --git a/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx b/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx
index 241fe346bf2..d95fabc9195 100644
--- a/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx
+++ b/product_docs/docs/tpa/23/reference/tpaexec-download-packages.mdx
@@ -16,6 +16,14 @@ files for the target distribution package manager, so can be used
exclusively to build clusters. At this time package managers Apt and YUM
are supported.
+!!! Note
+ The download-packages feature requires Docker to be installed
+ on the TPA host. This is because the downloader operates by creating a
+ container of the target operating system and uses that system's package
+ manager to resolve dependencies and download all necessary packages. The
+ required Docker setup for download-packages is the same as that for
+ [using Docker as a deployment platform](#platform-docker).
+
## Usage
An existing cluster configuration needs to exist which can be achieved
diff --git a/product_docs/docs/tpa/23/reference/tpaexec-reconfigure.mdx b/product_docs/docs/tpa/23/reference/tpaexec-reconfigure.mdx
index beca8ffb623..130fef4d6a0 100644
--- a/product_docs/docs/tpa/23/reference/tpaexec-reconfigure.mdx
+++ b/product_docs/docs/tpa/23/reference/tpaexec-reconfigure.mdx
@@ -5,16 +5,17 @@ originalFilePath: tpaexec-reconfigure.md
---
The `tpaexec reconfigure` command reads config.yml and generates a
-revised version of it that changes the cluster from one architecture to
-another. [tpaexec upgrade](../tpaexec-upgrade/) may then be invoked to
-make the required changes on the instances that make up the cluster.
+revised version of it that changes the cluster in various ways according
+to its arguments.
## Arguments
As with other tpaexec commands, the cluster directory must always be
given.
-The following arguments control the contents of the new config.yml:
+## Changing a cluster's architecture
+
+The following arguments enable the cluster's architecture to be changed:
- `--architecture `(required)
The new architecture for the cluster. At present the only supported
@@ -30,6 +31,21 @@ The following arguments control the contents of the new config.yml:
unnecessary to specify this; `tpaexec configure` will choose a suitable
repository based on the postgres flavour in use in the cluster.
+After changing the architecture, run [tpaexec
+upgrade](../tpaexec-upgrade/) to make the required changes to the
+cluster.
+
+## Changing a cluster from 2q to EDB repositories
+
+The `--replace-2q-repositories` argument removes any 2ndQuadrant
+repositories the cluster uses and adds EDB repositories as required to
+replace them.
+
+After reconfiguring with this argument, run \[tpaexec
+deploy)(tpaexec-deploy.md) to make the required changes to the cluster.
+
+## Output format
+
The following options control the form of the output:
- `--describe`
diff --git a/product_docs/docs/tpa/23/reference/volumes.mdx b/product_docs/docs/tpa/23/reference/volumes.mdx
index 8527672f8de..037be4d25a8 100644
--- a/product_docs/docs/tpa/23/reference/volumes.mdx
+++ b/product_docs/docs/tpa/23/reference/volumes.mdx
@@ -289,7 +289,7 @@ instances:
readahead: 65536
owner: root
group: root
- mode: 0755
+ mode: "0755"
```
You can specify the `fstype` (default: ext4), `fsopts` to be passed to
diff --git a/product_docs/docs/tpa/23/reference/yum_repositories.mdx b/product_docs/docs/tpa/23/reference/yum_repositories.mdx
index 9a1e6b8683f..459fead6f85 100644
--- a/product_docs/docs/tpa/23/reference/yum_repositories.mdx
+++ b/product_docs/docs/tpa/23/reference/yum_repositories.mdx
@@ -58,7 +58,7 @@ file yourself:
dest: /etc/yum.repos.d/example.repo
owner: root
group: root
- mode: 0644
+ mode: "0644"
content: |
[example]
name=Example repo
diff --git a/product_docs/docs/tpa/23/rel_notes/index.mdx b/product_docs/docs/tpa/23/rel_notes/index.mdx
index 1c0411b9854..b0afc87a0ab 100644
--- a/product_docs/docs/tpa/23/rel_notes/index.mdx
+++ b/product_docs/docs/tpa/23/rel_notes/index.mdx
@@ -2,6 +2,7 @@
title: Trusted Postgres Architect release notes
navTitle: "Release notes"
navigation:
+ - tpa_23.27_rel_notes
- tpa_23.26_rel_notes
- tpa_23.25_rel_notes
- tpa_23.24_rel_notes
@@ -24,6 +25,7 @@ The Trusted Postgres Architect documentation describes the latest version of Tru
| Version | Release date |
| ---------------------------- | ------------ |
+| [23.27](tpa_23.27_rel_notes) | 19 Dec 2023 |
| [23.26](tpa_23.26_rel_notes) | 30 Nov 2023 |
| [23.25](tpa_23.25_rel_notes) | 14 Nov 2023 |
| [23.24](tpa_23.24_rel_notes) | 17 Oct 2023 |
diff --git a/product_docs/docs/tpa/23/rel_notes/tpa_23.27_rel_notes.mdx b/product_docs/docs/tpa/23/rel_notes/tpa_23.27_rel_notes.mdx
new file mode 100644
index 00000000000..8f36f2bf4ba
--- /dev/null
+++ b/product_docs/docs/tpa/23/rel_notes/tpa_23.27_rel_notes.mdx
@@ -0,0 +1,38 @@
+---
+title: Trusted Postgres Architect 23.27 release notes
+navTitle: "Version 23.27"
+---
+
+Released: 19 Dec 2023
+
+!!! Note Migration to EDB repositories
+This release of TPA lays the groundwork for the decommissioning of the legacy 2ndQuadrant repositories.
+Existing configurations that use the legacy repositories will continue to function until they are decommissioned, but a warning will be displayed.
+To update an existing configuration to use EDB Repos 2.0, you may use `tpaexec reconfigure --replace-2q-repositories`.
+!!!
+
+!!! Note Python interpreter
+TPA now runs using a Python interpreter provided by the `edb-python-39` package, which will be automatically installed as a dependency of the `tpaexec` package.
+This allows us to keep TPA updated with security patches on older systems where the Python version is no longer widely supported.
+This is a completely standard build of Python 3.9. If you prefer, you may run TPA using another interpreter.
+We recommend 3.9, versions older than 3.9 or newer than 3.11 are not supported.
+!!!
+
+New features, enhancements, bug fixes, and other changes in Trusted Postgres Architect 23.27 include the following:
+
+| Type | Description |
+| ---- |------------ |
+| Enhancement | TPA now supports Oracle Linux 7, 8 and 9 on Docker. |
+| Change | TPA now requires Python 3.9-3.11 and depends on the package `edb-python-39` to provide a suitable interpreter. |
+| Change | TPA will no longer configure any 2ndQuadrant repositories by default, instead it will select suitable repositories from EDB Repos 2.0. |
+| Change | TPA now provides a new `--replace-2q-repositories` argument to `tpaexec reconfigure` that will remove 2q repositories from an existing config.yml and add suitable EDB repositories for the cluster's postgres flavour and BDR version. |
+| Change | TPA now sets file system permissions explicitly on more objects. |
+| Change | A new variable `disable_repository_checks` can be set to true in config.yml to bypass the usual check for EDB repositories when deploying the PGD-Always-ON architecture. |
+| Change | TPA will now generate a primary_slot_name also on primary node to be used in case of switchover, to ensure the switched primary will have a physical slot on the new primary. |
+| Change | TPA will now ensure that commit_scope for CAMO enabled partners is generated using existing config options from older BDR versions when running tpaexec reconfigure command to prepare for major PGD upgrade. It also choses better defaults. |
+| Bug fix | Fixed an issue whereby postgres variables were rejected by Patroni due to validation rules. |
+| Bug fix | Fixed an issue whereby a user could not set a single `barman_client_dsn_attributes` with `sslmode=verify-full`. |
+| Bug Fix | TPA will now assign a lower default `maintenance_work_mem` to avoid out-of-memory errors. |
+
+
+
diff --git a/tools/user/import/extensions/exsp.js b/tools/user/import/extensions/exsp.js
index 986dbed5627..45f699ab439 100644
--- a/tools/user/import/extensions/exsp.js
+++ b/tools/user/import/extensions/exsp.js
@@ -1,313 +1,455 @@
-import { promises as fs } from 'fs';
-import { join } from 'path';
-import { env, exit as _exit, argv as _argv } from 'process';
-import { authenticate } from '@google-cloud/local-auth';
-import { google } from 'googleapis';
-import parseArgs from 'minimist';
+import { promises as fs } from "fs";
+import { join } from "path";
+import { env, exit as _exit, argv as _argv } from "process";
+import { authenticate } from "@google-cloud/local-auth";
+import { google } from "googleapis";
+import parseArgs from "minimist";
// If modifying these scopes, delete token.json.
-const SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly'];
+const SCOPES = ["https://www.googleapis.com/auth/spreadsheets.readonly"];
const EDBDOCS_PATH = join(env.HOME, ".edbdocs", "extensions");
-const TOKEN_PATH = join(EDBDOCS_PATH, 'token.json');
-const CREDENTIALS_PATH = join(EDBDOCS_PATH, 'credentials.json');
+const TOKEN_PATH = join(EDBDOCS_PATH, "token.json");
+const CREDENTIALS_PATH = join(EDBDOCS_PATH, "credentials.json");
async function loadSavedCredentialsIfExist() {
- try {
- const content = await fs.readFile(TOKEN_PATH);
- const credentials = JSON.parse(content);
- return google.auth.fromJSON(credentials);
- } catch (err) {
- return null;
- }
+ try {
+ const content = await fs.readFile(TOKEN_PATH);
+ const credentials = JSON.parse(content);
+ return google.auth.fromJSON(credentials);
+ } catch (err) {
+ return null;
+ }
}
async function saveCredentials(client) {
- const content = await fs.readFile(CREDENTIALS_PATH);
- const keys = JSON.parse(content);
- const key = keys.installed || keys.web;
- const payload = JSON.stringify({
- type: 'authorized_user',
- client_id: key.client_id,
- client_secret: key.client_secret,
- refresh_token: client.credentials.refresh_token,
- });
- await fs.writeFile(TOKEN_PATH, payload);
+ const content = await fs.readFile(CREDENTIALS_PATH);
+ const keys = JSON.parse(content);
+ const key = keys.installed || keys.web;
+ const payload = JSON.stringify({
+ type: "authorized_user",
+ client_id: key.client_id,
+ client_secret: key.client_secret,
+ refresh_token: client.credentials.refresh_token,
+ });
+ await fs.writeFile(TOKEN_PATH, payload);
}
async function authorize() {
- const secretsExist = await fs.access(EDBDOCS_PATH)
- .then(() => true).catch(() => false);
-
-
- if (!secretsExist) {
- console.log(`${EDBDOCS_PATH} does not exist. Please create this directory and add the appropriate credentials.json`);
- _exit(1);
- }
-
- let client = await loadSavedCredentialsIfExist();
- if (client) {
- return client;
- }
- client = await authenticate({
- scopes: SCOPES,
- keyfilePath: CREDENTIALS_PATH,
- });
- if (client.credentials) {
- await saveCredentials(client);
- }
+ const secretsExist = await fs
+ .access(EDBDOCS_PATH)
+ .then(() => true)
+ .catch(() => false);
+
+ if (!secretsExist) {
+ console.log(
+ `${EDBDOCS_PATH} does not exist. Please create this directory and add the appropriate credentials.json`,
+ );
+ _exit(1);
+ }
+
+ let client = await loadSavedCredentialsIfExist();
+ if (client) {
return client;
+ }
+ client = await authenticate({
+ scopes: SCOPES,
+ keyfilePath: CREDENTIALS_PATH,
+ });
+ if (client.credentials) {
+ await saveCredentials(client);
+ }
+ return client;
}
function processRow(currentState, row, nextRow) {
- switch (currentState.rowState) {
- case 0:
- currentState.rowState = 1
- return;
- case 1:
- if (row.length == 0 || row[0] == "Notes : ") {
- // We're done
- currentState.done = true;
- return;
- }
-
- currentState.output.push(`\n\n## ${row[0]}\n\n`);
- currentState.output.push(`\n`)
- currentState.rowState = 2
- return;
- case 2:
- currentState.output.push(``)
- currentState.output.push(``)
- currentState.output.push(` | `)
- currentState.output.push(` | `)
- currentState.output.push(`${row[6]} | `)
- currentState.output.push(`${row[9]} | `)
- currentState.output.push(`${row[11]} | `)
- currentState.output.push(`
\n`)
- currentState.rowState = 3
- return false;
- case 3:
- currentState.output.push(``)
- currentState.output.push(composeHeading(true, true, false, true, row[0], false));
- currentState.output.push(composeHeading(true, true, true, true, row[5], true));
- currentState.output.push(composeHeading(true, false, true, true, row[6], true));
- currentState.output.push(composeHeading(false, false, true, true, row[7], true));
- currentState.output.push(composeHeading(false, true, true, true, row[8], true));
- currentState.output.push(composeHeading(true, false, true, true, row[9], true));
- currentState.output.push(composeHeading(false, true, true, true, row[10], true));
- currentState.output.push(composeHeading(true, false, true, true, row[11], true));
- currentState.output.push(composeHeading(false, false, true, true, row[12], true));
- currentState.output.push(composeHeading(false, true, true, true, row[13], true));
-
- currentState.output.push(`
`)
- currentState.rowState = 4
- return;
- case 4:
- if (row.length == 1) {
- currentState.output.push(composeHeadingRow(row))
- currentState.output.push(``)
- currentState.output.push(``)
- } else {
- currentState.output.push(``)
- currentState.output.push(``)
- currentState.output.push(composeRow(row,nextRow.length==0,currentState));
- }
- currentState.rowState = 5
- return;
- case 5:
- if (row.length == 0) {
- currentState.output.push(`
\n`)
- currentState.rowState = 1
- return;
- }
- if (row.length == 1) {
- currentState.output.push(composeHeadingRow(row,currentState));
- } else {
- currentState.output.push(composeRow(row, nextRow.length == 0,currentState));
- }
- currentState.rowState = 5
- return;
- }
-
- console.err("Fell out of state machine")
- currentState.done = true;
- return;
+ switch (currentState.rowState) {
+ case 0:
+ currentState.rowState = 1;
+ return;
+ case 1:
+ if (row.length == 0 || row[0] == "Notes : ") {
+ // We're done
+ currentState.done = true;
+ return;
+ }
+
+ currentState.output.push(`\n\n## ${row[0]}\n\n`);
+ currentState.output.push(
+ `\n`,
+ );
+ currentState.rowState = 2;
+ return;
+ case 2:
+ currentState.output.push(``);
+ currentState.output.push(``);
+ currentState.output.push(
+ ` | `,
+ );
+ currentState.output.push(
+ ` | `,
+ );
+ currentState.output.push(
+ `${row[6]} | `,
+ );
+ currentState.output.push(
+ `${row[9]} | `,
+ );
+ currentState.output.push(
+ `${row[11]} | `,
+ );
+ currentState.output.push(`
\n`);
+ currentState.rowState = 3;
+ return false;
+ case 3:
+ currentState.output.push(``);
+ currentState.output.push(
+ composeHeading(true, true, false, true, row[0], false),
+ );
+ currentState.output.push(
+ composeHeading(true, true, true, true, row[5], true),
+ );
+ currentState.output.push(
+ composeHeading(true, false, true, true, row[6], true),
+ );
+ currentState.output.push(
+ composeHeading(false, false, true, true, row[7], true),
+ );
+ currentState.output.push(
+ composeHeading(false, true, true, true, row[8], true),
+ );
+ currentState.output.push(
+ composeHeading(true, false, true, true, row[9], true),
+ );
+ currentState.output.push(
+ composeHeading(false, true, true, true, row[10], true),
+ );
+ currentState.output.push(
+ composeHeading(true, false, true, true, row[11], true),
+ );
+ currentState.output.push(
+ composeHeading(false, false, true, true, row[12], true),
+ );
+ currentState.output.push(
+ composeHeading(false, true, true, true, row[13], true),
+ );
+
+ currentState.output.push(`
`);
+ currentState.rowState = 4;
+ return;
+ case 4:
+ if (row.length == 1) {
+ currentState.output.push(composeHeadingRow(row));
+ currentState.output.push(``);
+ currentState.output.push(``);
+ } else {
+ currentState.output.push(``);
+ currentState.output.push(``);
+ currentState.output.push(
+ composeRow(row, nextRow.length == 0, currentState),
+ );
+ }
+ currentState.rowState = 5;
+ return;
+ case 5:
+ if (row.length == 0) {
+ currentState.output.push(`
\n`);
+ currentState.rowState = 1;
+ return;
+ }
+ if (row.length == 1) {
+ currentState.output.push(composeHeadingRow(row, currentState));
+ } else {
+ currentState.output.push(
+ composeRow(row, nextRow.length == 0, currentState),
+ );
+ }
+ currentState.rowState = 5;
+ return;
+ }
+
+ console.err("Fell out of state machine");
+ currentState.done = true;
+ return;
}
function composeRow(row, lastRow, currentState) {
- let output = []
- output.push("")
- let fullName = row[0];
- let trimmedName = fullName.trim();
- let spaceDiff=0;
- while(spaceDiff");
+ let fullName = row[0];
+ let trimmedName = fullName.trim();
+ let spaceDiff = 0;
+ while (spaceDiff < fullName.length && fullName[spaceDiff] == " ") {
+ spaceDiff++;
+ }
+ if (spaceDiff == fullName.length) {
+ console.error("All spaces name found.");
+ process.exit(1);
+ }
+ let lookupName = trimmedName.replaceAll(" ", "_");
+ if (spaceDiff <= 1) {
+ // Root element, update state
+ currentState.lastRoot = lookupName;
+ } else {
+ lookupName = currentState.lastRoot + "." + trimmedName.replace(" ", "_");
+ }
+
+ let url = productToURL[lookupName];
+
+ if (url == undefined) {
+ currentState.unmapped.push(lookupName);
+ }
+
+ if (url == "undefined") {
+ // This lets you not set a URL and not get nagged at
+ url = undefined;
+ }
+
+ if (spaceDiff > 3) {
+ trimmedName = " ".repeat(spaceDiff) + trimmedName;
+ }
+
+ output.push(
+ composeCell(true, false, false, true, trimmedName, lastRow, false, url),
+ );
+ output.push(composeCell(true, true, false, true, row[5], lastRow, true));
+ for (let i = 6; i < 14; i++) {
+ if (row[i] == "TRUE") {
+ output.push(
+ composeCell(
+ i == 6 || i == 9 || i == 11,
+ i == 13,
+ true,
+ true,
+ `✓`,
+ lastRow,
+ true,
+ ),
+ );
+ } else if (row[i] == "FALSE") {
+ output.push(
+ composeCell(
+ i == 6 || i == 9 || i == 11,
+ i == 13,
+ true,
+ true,
+ `–`,
+ lastRow,
+ true,
+ ),
+ );
+ } else if (row[i] == "PREVIEW") {
+ output.push(
+ composeCell(
+ i == 6 || i == 9 || i == 11,
+ i == 13,
+ false,
+ true,
+ `Preview`,
+ lastRow,
+ true,
+ ),
+ );
+ } else if (row[i].match(/Q[1-4] 20[0-9][0-9]/gm)) {
+ output.push(
+ composeCell(
+ i == 6 || i == 9 || i == 11,
+ i == 13,
+ false,
+ true,
+ row[i],
+ lastRow,
+ true,
+ ),
+ );
+ } else if (row[i] == "n/a") {
+ /* Hide n/a from spreadsheet as - (n/a is internal status only) */
+ output.push(
+ composeCell(
+ i == 6 || i == 9 || i == 11,
+ i == 13,
+ true,
+ true,
+ `–`,
+ lastRow,
+ true,
+ ),
+ );
} else {
- lookupName=currentState.lastRoot+"."+trimmedName.replace(" ","_");
+ console.log(`Unhandled value ${row[i]}`);
}
+ }
+ output.push("
\n");
- let url=productToURL[lookupName];
-
- if(url==undefined) {
- currentState.unmapped.push(lookupName);
- }
-
- if(url=="undefined") { // This lets you not set a URL and not get nagged at
- url=undefined;
- }
-
- if(spaceDiff>3) {
- trimmedName = " ".repeat(spaceDiff) + trimmedName
- }
-
- output.push(composeCell(true, false, false, true, trimmedName, lastRow, false,url));
- output.push(composeCell(true, true, false, true, row[5], lastRow, true));
- for (let i = 6; i < 14; i++) {
- if (row[i] == "TRUE") {
- output.push(composeCell(i == 6 || i == 9 || i == 11, i == 13, true, true, `✓`, lastRow, true));
- } else if(row[i] == "FALSE") {
- output.push(composeCell(i == 6 || i == 9 || i == 11, i == 13, true, true, `–`, lastRow, true));
- } else if(row[i] == "PREVIEW") {
- output.push(composeCell(i == 6 || i == 9 || i == 11, i == 13, false, true, `Preview`, lastRow, true));
- } else if(row[i].match(/Q[1-4] 20[0-9][0-9]/gm)) {
- output.push(composeCell(i == 6 || i == 9 || i == 11, i == 13, false, true, row[i], lastRow, true));
- } else if(row[i]=="n/a") { /* Hide n/a from spreadsheet as - (n/a is internal status only) */
- output.push(composeCell(i == 6 || i == 9 || i == 11, i == 13, true, true, `–`, lastRow, true));
- }else {
- console.log(`Unhandled value ${row[i]}`)
- }
- }
- output.push("\n")
-
- return output.join("");
+ return output.join("");
}
function composeHeadingRow(row) {
- return `${row[0]} |
\n`;
+ return `${row[0]} |
\n`;
}
-function composeCell(left, right, bold, middleAlign, value, lastRow, centered,url) {
- var cellValue=value;
-
- if(url!=undefined) {
- cellValue=`${value}`;
- }
-
- return `${cellValue} | `;
-
+function composeCell(
+ left,
+ right,
+ bold,
+ middleAlign,
+ value,
+ lastRow,
+ centered,
+ url,
+) {
+ var cellValue = value;
+
+ if (url != undefined) {
+ cellValue = `${value}`;
+ }
+
+ return `${cellValue} | `;
}
function composeHeading(left, right, bold, middleAlign, value, alwaysSplit) {
- let splitValue = value.split(" ");
- let displayValue = value;
- if (alwaysSplit || splitValue.length > 2) {
- displayValue = splitValue.join("
");
- }
-
- return `${displayValue} | `;
+ let splitValue = value.split(" ");
+ let displayValue = value;
+ if (alwaysSplit || splitValue.length > 2) {
+ displayValue = splitValue.join("
");
+ }
+
+ return `${displayValue} | `;
}
-function _composeCell(left, right, bold, middleAlign, top, bottom, centered, bottomAlign) {
-
- let options = [];
-
- if (bold) {
- options.push("font-weight: bold;");
- }
- if (left) {
- options.push("border-left: solid 1px;");
- } else {
- options.push("border-left: none;");
- }
- if (right) {
- options.push("border-right: solid 1px;")
- } else {
- options.push("border-right: none;")
- }
- if (middleAlign) {
- options.push("middle-align: middle;")
- }
-
- if (top) {
- options.push("border-top: solid 1px;")
- } else {
- options.push("border-top: none;")
- }
-
- if (bottom) {
- options.push("border-bottom: solid 1px;")
- } else {
- options.push("border-bottom: none;")
- }
-
- if (centered) {
- options.push("text-align: center;");
- }
-
- if (bottomAlign) {
- options.push("vertical-align: bottom;");
- }
-
- options.push("padding-left: 3px; padding-right: 3px;padding-top: 2px; padding-bottom: 2px; ")
-
- return options.join(" ");
+function _composeCell(
+ left,
+ right,
+ bold,
+ middleAlign,
+ top,
+ bottom,
+ centered,
+ bottomAlign,
+) {
+ let options = [];
+
+ if (bold) {
+ options.push("font-weight: bold;");
+ }
+ if (left) {
+ options.push("border-left: solid 1px;");
+ } else {
+ options.push("border-left: none;");
+ }
+ if (right) {
+ options.push("border-right: solid 1px;");
+ } else {
+ options.push("border-right: none;");
+ }
+ if (middleAlign) {
+ options.push("middle-align: middle;");
+ }
+
+ if (top) {
+ options.push("border-top: solid 1px;");
+ } else {
+ options.push("border-top: none;");
+ }
+
+ if (bottom) {
+ options.push("border-bottom: solid 1px;");
+ } else {
+ options.push("border-bottom: none;");
+ }
+
+ if (centered) {
+ options.push("text-align: center;");
+ }
+
+ if (bottomAlign) {
+ options.push("vertical-align: bottom;");
+ }
+
+ options.push(
+ "padding-left: 3px; padding-right: 3px;padding-top: 2px; padding-bottom: 2px; ",
+ );
+
+ return options.join(" ");
}
-var productToURL={};
+var productToURL = {};
async function processExtensions(auth) {
- var argv = parseArgs(_argv.slice(2));
-
- if (argv.source == undefined) {
- console.log("Need --source");
- _exit(1);
+ var argv = parseArgs(_argv.slice(2));
+
+ if (argv.source == undefined) {
+ console.log("Need --source");
+ _exit(1);
+ }
+
+ const templateFile = join(argv.source, "index.mdx.in");
+ const templateFileContent = await fs.readFile(templateFile);
+
+ const extensionsFile = join(argv.source, "extensionrefs.json");
+ const extensionsContent = await fs.readFile(extensionsFile);
+
+ productToURL = JSON.parse(extensionsContent);
+
+ const sheets = google.sheets({ version: "v4", auth });
+
+ // const res = await sheets.spreadsheets.values.get({
+ // spreadsheetId: "1GXzzVYT6CULGgGcyp0VtBfOtbxuWxkOU2pRYW42W4pM",
+ // range: "Extensions by Offering"
+ // });
+ const res = await sheets.spreadsheets.values.get({
+ spreadsheetId: "1UjwikOZhid9PgFd6JPF72XpA-QCmy3uuWnvHRak563U",
+ range: "Extensions by Offering",
+ });
+ const rows = res.data.values;
+ if (!rows || rows.length === 0) {
+ console.log("No data found.");
+ return;
+ }
+
+ var currentState = {
+ done: false,
+ output: [templateFileContent],
+ rowState: 0,
+ lastRoot: "",
+ unmapped: [],
+ };
+
+ for (var i = 0; i < rows.length; i++) {
+ processRow(currentState, rows[i], rows[i + 1]);
+ if (currentState.done) {
+ break;
}
+ }
- const templateFile = join(argv.source, "index.mdx.in");
- const templateFileContent = await fs.readFile(templateFile);
-
- const extensionsFile= join(argv.source, "extensionrefs.json");
- const extensionsContent = await fs.readFile(extensionsFile);
+ const outputFile = join(argv.source, "index.mdx");
- productToURL=JSON.parse(extensionsContent);
+ await fs.writeFile(outputFile, currentState.output.join(""));
- const sheets = google.sheets({ version: 'v4', auth });
-
- const res = await sheets.spreadsheets.values.get({
- spreadsheetId: "1GXzzVYT6CULGgGcyp0VtBfOtbxuWxkOU2pRYW42W4pM",
- range: "Extensions by Offering"
+ if (currentState.unmapped.length > 0) {
+ console.log("Unmapped products - add to extensionrefs.json");
+ currentState.unmapped.forEach((element) => {
+ console.log(`"${element}":"https:",`);
});
- const rows = res.data.values;
- if (!rows || rows.length === 0) {
- console.log('No data found.');
- return;
- }
-
-
- var currentState = { done: false, output: [templateFileContent], rowState: 0, lastRoot:"", unmapped: [] };
-
- for (var i = 0; i < rows.length; i++) {
- processRow(currentState, rows[i], rows[i + 1])
- if (currentState.done) { break; }
- };
-
- const outputFile = join(argv.source, "index.mdx");
-
- await fs.writeFile(outputFile,currentState.output.join(""));
-
- if (currentState.unmapped.length>0) {
- console.log("Unmapped products - add to extensionrefs.json");
- currentState.unmapped.forEach(element => {
- console.log(`"${element}":"https:",`);
- });
- }
-
+ }
}
-authorize().then(processExtensions).catch(console.error);
\ No newline at end of file
+authorize().then(processExtensions).catch(console.error);