From a3ec499e8792dcb4cf438e5d4cc94f2022bea6d0 Mon Sep 17 00:00:00 2001 From: mrdrivingduck Date: Mon, 18 Mar 2024 17:19:30 +0800 Subject: [PATCH 1/5] docs: polar_stat_env --- docs/.vuepress/configs/navbar/en.ts | 26 ++++------------- docs/.vuepress/configs/navbar/zh.ts | 16 +++-------- docs/.vuepress/configs/sidebar/en.ts | 1 + docs/.vuepress/configs/sidebar/zh.ts | 1 + docs/contributing/trouble-issuing.md | 38 +++++++++++++++++++++++++ docs/zh/contributing/trouble-issuing.md | 38 +++++++++++++++++++++++++ 6 files changed, 88 insertions(+), 32 deletions(-) create mode 100644 docs/contributing/trouble-issuing.md create mode 100644 docs/zh/contributing/trouble-issuing.md diff --git a/docs/.vuepress/configs/navbar/en.ts b/docs/.vuepress/configs/navbar/en.ts index 7cb3aebdfa1..990a928d18f 100644 --- a/docs/.vuepress/configs/navbar/en.ts +++ b/docs/.vuepress/configs/navbar/en.ts @@ -84,32 +84,18 @@ export const en: NavbarConfig = [ text: "Dev", link: "/development/", children: [ - { - text: "Development on Docker", - link: "/development/dev-on-docker.html", - }, - { - text: "Customize Development Environment", - link: "/development/customize-dev-env.html", - }, + "/development/dev-on-docker.html", + "/development/customize-dev-env.html", ], }, { text: "Contributing", link: "/contributing/", children: [ - { - text: "Contributing Docs", - link: "/contributing/contributing-polardb-docs.html", - }, - { - text: "Contributing Code", - link: "/contributing/contributing-polardb-kernel.html", - }, - { - text: "Coding Style", - link: "/contributing/coding-style.html", - }, + "/contributing/contributing-polardb-docs.html", + "/contributing/contributing-polardb-kernel.html", + "/contributing/coding-style.html", + "/contributing/trouble-issuing.html", ], }, ]; diff --git a/docs/.vuepress/configs/navbar/zh.ts b/docs/.vuepress/configs/navbar/zh.ts index 922bb422568..da8f85c0ece 100644 --- a/docs/.vuepress/configs/navbar/zh.ts +++ b/docs/.vuepress/configs/navbar/zh.ts @@ -121,18 +121,10 @@ export const zh: NavbarConfig = [ text: "参与社区", link: "/zh/contributing/", children: [ - { - text: "贡献文档", - link: "/zh/contributing/contributing-polardb-docs.html", - }, - { - text: "贡献代码", - link: "/zh/contributing/contributing-polardb-kernel.html", - }, - { - text: "编码风格", - link: "/zh/contributing/coding-style.html", - }, + "/zh/contributing/contributing-polardb-docs.html", + "/zh/contributing/contributing-polardb-kernel.html", + "/zh/contributing/coding-style.html", + "/zh/contributing/trouble-issuing.md", ], }, ]; diff --git a/docs/.vuepress/configs/sidebar/en.ts b/docs/.vuepress/configs/sidebar/en.ts index 3fbecf8463b..d19810b4dcd 100644 --- a/docs/.vuepress/configs/sidebar/en.ts +++ b/docs/.vuepress/configs/sidebar/en.ts @@ -102,6 +102,7 @@ export const en: SidebarConfig = { "/contributing/contributing-polardb-kernel.md", "/contributing/contributing-polardb-docs.md", "/contributing/coding-style.md", + "/contributing/trouble-issuing.md", ], }, ], diff --git a/docs/.vuepress/configs/sidebar/zh.ts b/docs/.vuepress/configs/sidebar/zh.ts index 9edd2610577..a4b286f0514 100644 --- a/docs/.vuepress/configs/sidebar/zh.ts +++ b/docs/.vuepress/configs/sidebar/zh.ts @@ -175,6 +175,7 @@ export const zh: SidebarConfig = { "/zh/contributing/contributing-polardb-kernel.md", "/zh/contributing/contributing-polardb-docs.md", "/zh/contributing/coding-style.md", + "/zh/contributing/trouble-issuing.md", ], }, ], diff --git a/docs/contributing/trouble-issuing.md b/docs/contributing/trouble-issuing.md new file mode 100644 index 00000000000..8702de87384 --- /dev/null +++ b/docs/contributing/trouble-issuing.md @@ -0,0 +1,38 @@ +# 问题报告 + +如果在运行 PolarDB for PostgreSQL 的过程中出现问题,请提供数据库的日志与机器的配置信息以方便定位问题。 + +通过 `polar_stat_env` 插件可以轻松获取数据库所在主机的硬件配置: + +```sql:no-line-numbers +=> CREATE EXTENSION polar_stat_env; +=> SELECT polar_stat_env(); + polar_stat_env +-------------------------------------------------------------------- + { + + "CPU": { + + "Architecture": "x86_64", + + "Model Name": "Intel(R) Xeon(R) Platinum 8369B CPU @ 2.70GHz",+ + "CPU Cores": "8", + + "CPU Thread Per Cores": "2", + + "CPU Core Per Socket": "4", + + "NUMA Nodes": "1", + + "L1d cache": "192 KiB (4 instances)", + + "L1i cache": "128 KiB (4 instances)", + + "L2 cache": "5 MiB (4 instances)", + + "L3 cache": "48 MiB (1 instance)" + + }, + + "Memory": { + + "Memory Total (GB)": "14", + + "HugePage Size (MB)": "2", + + "HugePage Total Size (GB)": "0" + + }, + + "OS Params": { + + "OS": "5.10.134-16.1.al8.x86_64", + + "Swappiness(1-100)": "0", + + "Vfs Cache Pressure(0-1000)": "100", + + "Min Free KBytes(KB)": "67584" + + } + + } +(1 row) +``` diff --git a/docs/zh/contributing/trouble-issuing.md b/docs/zh/contributing/trouble-issuing.md new file mode 100644 index 00000000000..8702de87384 --- /dev/null +++ b/docs/zh/contributing/trouble-issuing.md @@ -0,0 +1,38 @@ +# 问题报告 + +如果在运行 PolarDB for PostgreSQL 的过程中出现问题,请提供数据库的日志与机器的配置信息以方便定位问题。 + +通过 `polar_stat_env` 插件可以轻松获取数据库所在主机的硬件配置: + +```sql:no-line-numbers +=> CREATE EXTENSION polar_stat_env; +=> SELECT polar_stat_env(); + polar_stat_env +-------------------------------------------------------------------- + { + + "CPU": { + + "Architecture": "x86_64", + + "Model Name": "Intel(R) Xeon(R) Platinum 8369B CPU @ 2.70GHz",+ + "CPU Cores": "8", + + "CPU Thread Per Cores": "2", + + "CPU Core Per Socket": "4", + + "NUMA Nodes": "1", + + "L1d cache": "192 KiB (4 instances)", + + "L1i cache": "128 KiB (4 instances)", + + "L2 cache": "5 MiB (4 instances)", + + "L3 cache": "48 MiB (1 instance)" + + }, + + "Memory": { + + "Memory Total (GB)": "14", + + "HugePage Size (MB)": "2", + + "HugePage Total Size (GB)": "0" + + }, + + "OS Params": { + + "OS": "5.10.134-16.1.al8.x86_64", + + "Swappiness(1-100)": "0", + + "Vfs Cache Pressure(0-1000)": "100", + + "Min Free KBytes(KB)": "67584" + + } + + } +(1 row) +``` From 20dcd42dcdb3d94346c6673acd5548858334a1f3 Mon Sep 17 00:00:00 2001 From: mrdrivingduck Date: Mon, 25 Mar 2024 22:13:40 +0800 Subject: [PATCH 2/5] docs: modify README.md --- README-CN.md | 24 ++++++++++++++---------- README.md | 8 ++++++-- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/README-CN.md b/README-CN.md index ffdce5c1b29..cad64001995 100644 --- a/README-CN.md +++ b/README-CN.md @@ -29,14 +29,14 @@ PolarDB for PostgreSQL(下文简称为 PolarDB)是一款阿里云自主研发的云原生数据库产品,100% 兼容 PostgreSQL,采用基于 Shared-Storage 的存储计算分离架构,具有极致弹性、毫秒级延迟、HTAP 的能力。 1. 极致弹性:存储与计算能力均可独立地横向扩展。 - - 当计算能力不够时,可以单独扩展计算集群,数据无需复制。 - - 当存储容量或 I/O 不够时,可以单独扩展存储集群,而不中断业务。 + - 当计算能力不够时,可以单独扩展计算集群,数据无需复制 + - 当存储容量或 I/O 不够时,可以单独扩展存储集群,而不中断业务 2. 毫秒级延迟: - - WAL 日志存储在共享存储上,RW 到所有 RO 之间仅复制 WAL 的元数据。 - - 独创的 _LogIndex_ 技术,实现了 Lazy 回放和 Parallel 回放,理论上最大程度地缩小了 RW 和 RO 节点间的延迟。 + - WAL 日志存储在共享存储上,RW 到所有 RO 之间仅复制 WAL 日志的元数据 + - 独创的 _LogIndex_ 技术,实现了 Lazy 回放和 Parallel 回放,最大程度地缩小了 RW 和 RO 节点间的延迟 3. HTAP 能力:基于 Shared-Storage 的分布式并行执行框架,加速在 OLTP 场景下的 OLAP 查询。一套 OLTP 型的数据,可支持 2 套计算引擎: - - 单机执行引擎:处理高并发的 TP 型负载。 - - 分布式执行引擎:处理大查询的 AP 型负载。 + - 单机执行引擎:处理高并发的 TP 型负载 + - 分布式执行引擎:处理大查询的 AP 型负载 PolarDB 还支持时空、GIS、图像、向量、搜索、图谱等多模创新特性,应对企业对数据处理日新月异的需求。 @@ -46,13 +46,13 @@ PolarDB 还支持时空、GIS、图像、向量、搜索、图谱等多模创新 ## 产品架构 -PolarDB 采用了基于 Shared-Storage 的存储计算分离架构。数据库由传统的 Share-Nothing 架构,转变成了 Shared-Storage 架构。由原来的 N 份计算 + N 份存储,转变成了 N 份计算 + 1 份存储。虽然共享存储上数据是一份,但是数据在各节点内存中的状态是不同的,需要通过内存状态的同步来维护数据的一致性;同时主节点在刷脏时也需要做协调,避免只读节点读取到超前的 **“未来页面”**,也要避免只读节点读取到过时的没有在内存中被正确回放的 **“过去页面”**。为了解决该问题,PolarDB 创造性地设计了 _LogIndex_ 数据结构来维护页面的回放历史,该结构能够实现主节点与只读节点之间的同步。 +PolarDB for PostgreSQL 采用了基于 Shared-Storage 的存储计算分离架构。数据库由传统的 Share-Nothing 架构,转变成了 Shared-Storage 架构。由原来的 N 份计算 + N 份存储,转变成了 N 份计算 + 1 份存储。虽然共享存储上数据是一份,但是数据在各节点内存中的状态是不同的,需要通过内存状态的同步来维护数据的一致性;同时主节点在刷脏时也需要做协调,避免只读节点读取到超前的 **“未来页面”**,也要避免只读节点读取到过时的没有在内存中被正确回放的 **“过去页面”**。为了解决该问题,PolarDB 创造性地设计了 _LogIndex_ 数据结构来维护页面的回放历史,该结构能够实现主节点与只读节点之间的同步。 在存储计算分离后,I/O 单路延迟变大的同时,I/O 的吞吐也变大了。在处理分析型查询时,仅使用单个只读节点无法发挥出存储侧的大 I/O 带宽优势,也无法利用其他只读节点的 CPU、内存和 I/O 资源。为了解决该问题,PolarDB 研发了基于 Shared-Storage 的并行执行引擎,能够在 SQL 级别上弹性利用任意数目的 CPU 来加速分析查询,支持 HTAP 的混合负载场景。 详情请查阅 [产品架构](https://apsaradb.github.io/PolarDB-for-PostgreSQL/zh/theory/arch-overview.html)。 -## 快速入门 +## 快速部署 如果您已安装 Docker,那么可以从 DockerHub 上拉取 PolarDB for PostgreSQL 的 本地存储实例镜像,创建、运行并进入容器,然后直接使用 PolarDB 实例: @@ -69,7 +69,11 @@ postgres=# SELECT version(); (1 row) ``` -对于更多进阶部署方式,请移步在线文档中的 [进阶部署](https://apsaradb.github.io/PolarDB-for-PostgreSQL/zh/deploying/deploy.html)。在部署前,我们建议您先了解一下 PolarDB for PostgreSQL 的 [架构简介](https://apsaradb.github.io/PolarDB-for-PostgreSQL/zh/deploying/introduction.html)。 +对于更多进阶部署方式,请移步在线文档中的 [进阶部署](https://apsaradb.github.io/PolarDB-for-PostgreSQL/zh/deploying/deploy.html)。在部署前,了解 PolarDB for PostgreSQL 的 [架构简介](https://apsaradb.github.io/PolarDB-for-PostgreSQL/zh/deploying/introduction.html) 能够深化对每个步骤的理解。 + +## 开发 + +参考 [开发指南](https://apsaradb.github.io/PolarDB-for-PostgreSQL/zh/development/dev-on-docker.html) 进行源码编译和开发。 ## 文档 @@ -79,7 +83,7 @@ postgres=# SELECT version(); ## 参与贡献 -我们诚挚欢迎社区参与 PolarDB 的贡献,无论是代码还是文档。 +我们诚挚欢迎社区参与 PolarDB for PostgreSQL 的贡献,无论是代码还是文档。 以下是贡献者列表(由 [contrib.rocks](https://contrib.rocks) 支持): diff --git a/README.md b/README.md index 8de04444637..c22b98222cf 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ The `POLARDB_11_STABLE` is the stable branch based on PostgreSQL 11.9, which sup ## Architecture and Roadmap -PolarDB uses a shared-storage-based architecture in which computing is decoupled from storage. The conventional shared-nothing architecture is changed to the shared-storage architecture. N copies of data in the compute cluster and N copies of data in the storage cluster are changed to N copies of data in the compute cluster and one copy of data in the storage cluster. The shared storage stores one copy of data, but the data states in memory are different. The WAL logs must be synchronized from the primary node to read-only nodes to ensure data consistency. In addition, when the primary node flushes dirty pages, it must be controlled to prevent the read-only nodes from reading future pages. Meanwhile, the read-only nodes must be prevented from reading the outdated pages that are not correctly replayed in memory. To resolve this issue, PolarDB provides the index structure _LogIndex_ to maintain the page replay history. LogIndex can be used to synchronize data from the primary node to read-only nodes. +PolarDB for PostgreSQL uses a shared-storage-based architecture in which computing is decoupled from storage. The conventional shared-nothing architecture is changed to the shared-storage architecture. N copies of data in the compute cluster and N copies of data in the storage cluster are changed to N copies of data in the compute cluster and one copy of data in the storage cluster. The shared storage stores one copy of data, but the data states in memory are different. The WAL logs must be synchronized from the primary node to read-only nodes to ensure data consistency. In addition, when the primary node flushes dirty pages, it must be controlled to prevent the read-only nodes from reading future pages. Meanwhile, the read-only nodes must be prevented from reading the outdated pages that are not correctly replayed in memory. To resolve this issue, PolarDB provides the index structure _LogIndex_ to maintain the page replay history. LogIndex can be used to synchronize data from the primary node to read-only nodes. After computing is decoupled from storage, the I/O latency and throughput increase. When a single read-only node is used to process analytical queries, the CPUs, memory, and I/O of other read-only nodes and the large storage I/O bandwidth cannot be fully utilized. To resolve this issue, PolarDB provides the shared-storage-based MPP engine. The engine can use CPUs to accelerate analytical queries at SQL level and support a mix of OLAP workloads and OLTP workloads for HTAP. @@ -71,6 +71,10 @@ postgres=# SELECT version(); For more advanced deployment way, please refer to [Advanced Deployment](https://apsaradb.github.io/PolarDB-for-PostgreSQL/deploying/deploy.html). Before your deployment, we recommand to figure out the [architecture](https://apsaradb.github.io/PolarDB-for-PostgreSQL/deploying/introduction.html) of PolarDB for PostgreSQL. +## Development + +Please refer to [Development Guide](https://apsaradb.github.io/PolarDB-for-PostgreSQL/development/dev-on-docker.html) to compile and development PolarDB for PostgreSQL. + ## Documentation Please refer to [Online Documentation Website](https://apsaradb.github.io/PolarDB-for-PostgreSQL/) to see the whole documentations. @@ -79,7 +83,7 @@ If you want to explore or develop documentation locally, see [Document Contribut ## Contributing -You are welcome to make contributions to PolarDB, no matter code or documentation. +You are welcome to make contributions to PolarDB for PostgreSQL, no matter code or documentation. Here are the contributors: From d52e8258f4cfd4b6fc50d36d7149e9131a6581f3 Mon Sep 17 00:00:00 2001 From: mrdrivingduck Date: Mon, 25 Mar 2024 22:17:54 +0800 Subject: [PATCH 3/5] feat: remove check hook of polar_px_scan_unit_size --- src/backend/utils/misc/guc_px.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/src/backend/utils/misc/guc_px.c b/src/backend/utils/misc/guc_px.c index e77a58e063e..f39c110d6e4 100644 --- a/src/backend/utils/misc/guc_px.c +++ b/src/backend/utils/misc/guc_px.c @@ -2491,7 +2491,7 @@ struct config_int ConfigureNamesInt_px[] = }, &px_scan_unit_size, 512, 1, 1024, - px_check_scan_unit_size, NULL, px_show_scan_unit_size + px_check_scan_unit_size, NULL, NULL }, { @@ -2783,15 +2783,6 @@ px_check_scan_unit_size(int *newval, void **extra, GucSource source) return true; } -static const char* -px_show_scan_unit_size(void) -{ - static char nbuf[120]; - snprintf(nbuf, sizeof(nbuf), "scan_unit_size: %d, scan_unit_bit: %d", - px_scan_unit_size, px_scan_unit_bit); - return nbuf; -} - static bool px_check_ignore_function(char **newval, void **extra, GucSource source) { From 4216920577066c626b44b16b9c04dedcb78caca1 Mon Sep 17 00:00:00 2001 From: mrdrivingduck Date: Thu, 28 Mar 2024 23:27:42 +0800 Subject: [PATCH 4/5] fix: bgworker registration check --- src/backend/storage/ipc/polar_procpool.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/backend/storage/ipc/polar_procpool.c b/src/backend/storage/ipc/polar_procpool.c index cb6223a99ae..bf92ac0931e 100644 --- a/src/backend/storage/ipc/polar_procpool.c +++ b/src/backend/storage/ipc/polar_procpool.c @@ -1006,9 +1006,11 @@ polar_reg_sub_task(polar_task_sched_ctl_t *ctl, uint32 i) StrNCpy(worker.bgw_type, ctl->sched->name, BGW_MAXLEN); worker.bgw_main_arg = (Datum)(ctl->sched); - RegisterDynamicBackgroundWorker(&worker, &handle); - - Assert(handle != NULL); + if (!RegisterDynamicBackgroundWorker(&worker, &handle)) + ereport(PANIC, + (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("registering dynamic bgworker failed"), + errhint("Consider increasing configuration parameter \"max_worker_processes\"."))); ctl->sub_proc[i].handle = handle; ctl->sub_proc[i].proc = NULL; From 243bc6964ca4cfdefe8dd9c00e2d3bd2a0771dbb Mon Sep 17 00:00:00 2001 From: mrdrivingduck Date: Thu, 4 Apr 2024 23:47:35 +0800 Subject: [PATCH 5/5] docs: support pnpm and remove yarn support --- .github/workflows/docs-format-check.yml | 30 +- .github/workflows/docs.yml | 30 +- docs/.vuepress/components/ArticleInfo.vue | 10 +- docs/.vuepress/config.ts | 7 +- package.json | 17 +- pnpm-lock.yaml | 2739 +++++++++++++++++ yarn.lock | 3356 --------------------- 7 files changed, 2784 insertions(+), 3405 deletions(-) create mode 100644 pnpm-lock.yaml delete mode 100644 yarn.lock diff --git a/.github/workflows/docs-format-check.yml b/.github/workflows/docs-format-check.yml index 08cc79eabcb..7ff95084169 100644 --- a/.github/workflows/docs-format-check.yml +++ b/.github/workflows/docs-format-check.yml @@ -21,27 +21,21 @@ jobs: # fetch all commits to get last updated time or other git log info fetch-depth: 0 + - name: Setup pnpm + uses: pnpm/action-setup@v3 + with: + # choose pnpm version to use + version: 8 + # install deps with pnpm + run_install: true + - name: Setup Node.js uses: actions/setup-node@v4 with: # choose node.js version to use - node-version: "16" - - # cache node_modules - - name: Cache dependencies - uses: actions/cache@v4 - id: yarn-cache - with: - path: | - **/node_modules - key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }} - restore-keys: | - ${{ runner.os }}-yarn- - - # install dependencies if the cache did not hit - - name: Install dependencies - if: steps.yarn-cache.outputs.cache-hit != 'true' - run: yarn --frozen-lockfile + node-version: 20 + # cache deps for pnpm + cache: pnpm - name: Prettier check - run: npx prettier --check docs/ + run: pnpm prettier:check diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 71852c36cb5..066e29ce5ac 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -21,31 +21,25 @@ jobs: # fetch all commits to get last updated time or other git log info fetch-depth: 0 + - name: Setup pnpm + uses: pnpm/action-setup@v3 + with: + # choose pnpm version to use + version: 8 + # install deps with pnpm + run_install: true + - name: Setup Node.js uses: actions/setup-node@v4 with: # choose node.js version to use - node-version: "16" - - # cache node_modules - - name: Cache dependencies - uses: actions/cache@v4 - id: yarn-cache - with: - path: | - **/node_modules - key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }} - restore-keys: | - ${{ runner.os }}-yarn- - - # install dependencies if the cache did not hit - - name: Install dependencies - if: steps.yarn-cache.outputs.cache-hit != 'true' - run: yarn --frozen-lockfile + node-version: 20 + # cache deps for pnpm + cache: pnpm # run build script - name: Build VuePress site - run: yarn docs:build + run: pnpm docs:build # please check out the docs of the workflow for more details # @see https://github.com/crazy-max/ghaction-github-pages diff --git a/docs/.vuepress/components/ArticleInfo.vue b/docs/.vuepress/components/ArticleInfo.vue index e4ab2fad7b0..ece15e5b3c0 100644 --- a/docs/.vuepress/components/ArticleInfo.vue +++ b/docs/.vuepress/components/ArticleInfo.vue @@ -11,7 +11,7 @@ const { frontmatter } = toRefs(props);