diff --git a/.github/actions/setup-kind/action.yml b/.github/actions/setup-kind/action.yml index 8d312e9e6..9648f2d2f 100644 --- a/.github/actions/setup-kind/action.yml +++ b/.github/actions/setup-kind/action.yml @@ -12,9 +12,9 @@ runs: steps: - id: helm name: Set up Helm - uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5 + uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0 with: - version: v3.6.2 + version: v3.16.2 - id: kubectl name: Install kubectl diff --git a/.github/actions/verify-pooler-ready/action.yml b/.github/actions/verify-pooler-ready/action.yml deleted file mode 100644 index 38a7f493b..000000000 --- a/.github/actions/verify-pooler-ready/action.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Verifies that a CNPG cluster has a certain amount of ready instances -description: Verifies that a CNPG cluster has a certain amount of ready instances -inputs: - pooler-name: - description: The name of the pooler to verify - required: true - default: database-cluster - ready-instances: - description: The amount of ready instances to wait for - required: true - default: "3" - -runs: - using: composite - steps: - - name: Wait for the pooler to become ready - shell: bash - run: | - ITER=0 - while true; do - if [[ $ITER -ge 300 ]]; then - echo "Pooler not ready" - exit 1 - fi - READY_INSTANCES=$(kubectl get deployments.apps ${INPUT_POOLER_NAME} -o jsonpath='{.status.readyReplicas}') - if [[ "$READY_INSTANCES" == ${INPUT_READY_INSTANCES} ]]; then - echo "Pooler up and running" - break - fi - sleep 1 - (( ++ITER )) - done diff --git a/.github/minio.yaml b/.github/minio.yaml new file mode 100644 index 000000000..6bcb77acc --- /dev/null +++ b/.github/minio.yaml @@ -0,0 +1,9 @@ +tenant: + pools: + - servers: 1 + name: pool0 + volumesPerServer: 1 + size: 1Gi + buckets: + - name: mybucket + region: local diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 9cf1e3624..2b4bec257 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -5,24 +5,27 @@ on: branches: - '**' - '!gh-pages' + pull_request: + branches-ignore: + - 'gh-pages' jobs: linter: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 - name: Set up Helm - uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5 + uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0 with: - version: v3.4.0 + version: v3.16.2 - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.7 + python-version: 3.12 - name: Set up chart-testing uses: helm/chart-testing-action@e6669bcd63d7cb57cb4380c33043eebe5d111992 # v2.6.1 diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml index 448808a59..aec68975c 100644 --- a/.github/workflows/release-pr.yml +++ b/.github/workflows/release-pr.yml @@ -13,10 +13,10 @@ permissions: jobs: create-pull-request: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Create Pull Request id: create-pr env: diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index e15206600..4d9a58568 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -12,10 +12,10 @@ permissions: jobs: release: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 # important for fetching all history to run comparison against @@ -35,7 +35,7 @@ jobs: - name: Set up Helm uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0 with: - version: v3.14.1 + version: v3.16.2 - name: Add chart dependencies run: | @@ -58,14 +58,14 @@ jobs: run: shred --remove=wipesync /tmp/keyring.gpg /tmp/passphrase-file.txt - name: Login to GitHub Container Registry - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Install sigstore/cosign - uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0 + uses: sigstore/cosign-installer@dc72c7d5c4d10cd6bcb8cf6e3fd625a9e5e537da # v3.7.0 - name: Push charts to GHCR env: diff --git a/.github/workflows/tests-cluster-chainsaw.yaml b/.github/workflows/tests-cluster-chainsaw.yaml new file mode 100644 index 000000000..2d619b24a --- /dev/null +++ b/.github/workflows/tests-cluster-chainsaw.yaml @@ -0,0 +1,54 @@ +name: tests-cluster-chainsaw + +on: + pull_request: + branches-ignore: + - 'gh-pages' + +jobs: + test-cluster-standalone: + runs-on: ubuntu-24.04 + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: 0 + + - name: Install Cosign + uses: sigstore/cosign-installer@dc72c7d5c4d10cd6bcb8cf6e3fd625a9e5e537da # v3.7.0 + + - name: Setup kind + uses: ./.github/actions/setup-kind + + - name: Deploy the operator + uses: ./.github/actions/deploy-operator + + - name: Install Prometheus CRDs + run: | + helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + helm install prometheus-crds prometheus-community/prometheus-operator-crds + + - name: Install Chainsaw + uses: kyverno/action-install-chainsaw@d311eacde764f806c9658574ff64c9c3b21f8397 # v0.2.11 + with: + verify: true + + - name: Setup MinIO + run: | + helm repo add minio-operator https://operator.min.io + helm upgrade \ + --install \ + --namespace minio-system \ + --create-namespace \ + --wait \ + operator minio-operator/operator + helm upgrade \ + --install \ + --namespace minio \ + --create-namespace \ + --wait \ + --values ./.github/minio.yaml \ + tenant minio-operator/tenant + + - name: Run Kyverno/Chainsaw + run: chainsaw test diff --git a/.github/workflows/tests-cluster-standalone.yml b/.github/workflows/tests-cluster-standalone.yml deleted file mode 100644 index 70b4c6f83..000000000 --- a/.github/workflows/tests-cluster-standalone.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: tests-cluster-standalone - -on: - pull_request: - branches-ignore: - - 'gh-pages' - -jobs: - test-cluster-standalone: - runs-on: ubuntu-22.04 - steps: - - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - with: - fetch-depth: 0 - - - name: Setup kind - uses: ./.github/actions/setup-kind - - - name: Deploy the operator - uses: ./.github/actions/deploy-operator - - - name: Deploy a standalone cluster - run: | - helm upgrade --install \ - --values charts/cluster/examples/basic.yaml \ - --namespace database \ - --create-namespace \ - --wait \ - database ./charts/cluster - - - name: Verify that the cluster is ready - uses: ./.github/actions/verify-cluster-ready - with: - cluster-name: database-cluster - ready-instances: 1 - - test-cluster-pgbouncer: - runs-on: ubuntu-22.04 - steps: - - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - with: - fetch-depth: 0 - - - name: Setup kind - uses: ./.github/actions/setup-kind - - - name: Deploy the operator - uses: ./.github/actions/deploy-operator - - - name: Deploy a standalone cluster - run: | - helm upgrade --install \ - --values charts/cluster/examples/pgbouncer.yaml \ - --namespace database \ - --create-namespace \ - --wait \ - database ./charts/cluster - - - name: Verify that the cluster is ready - uses: ./.github/actions/verify-cluster-ready - with: - cluster-name: database-cluster - ready-instances: 1 - - - name: Verify that the pooler is ready - uses: ./.github/actions/verify-pooler-ready - with: - pooler-name: database-cluster-pooler-rw - ready-instances: 1 diff --git a/.github/workflows/tests-operator.yml b/.github/workflows/tests-operator.yml index 87474c18d..ec7052ed5 100644 --- a/.github/workflows/tests-operator.yml +++ b/.github/workflows/tests-operator.yml @@ -7,10 +7,10 @@ on: jobs: deploy_operator: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0c8321019..ae6f50c48 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,7 +11,7 @@ Before reporting a new issue, please make sure you have: ## How to contribute -You can contribute to cnp-bench by submitting a new pull request. +You can contribute to charts by submitting a new pull request. Please: diff --git a/README.md b/README.md index 67eccbd93..dda335965 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,13 @@ # CloudNativePG Helm Charts +[![Stack Overflow](https://img.shields.io/badge/stackoverflow-cloudnative--pg-blue?logo=stackoverflow&logoColor=%23F48024&link=https%3A%2F%2Fstackoverflow.com%2Fquestions%2Ftagged%2Fcloudnative-pg)][stackoverflow] +[![GitHub License](https://img.shields.io/github/license/cloudnative-pg/charts)][license] + + +[![GitHub Release](https://img.shields.io/github/v/release/cloudnative-pg/charts?filter=cloudnative-pg-*)](https://github.com/cloudnative-pg/charts/tree/main/charts/cloudnative-pg) +[![GitHub Release](https://img.shields.io/github/v/release/cloudnative-pg/charts?filter=cluster-*)](https://github.com/cloudnative-pg/charts/tree/main/charts/cluster) + + ## Operator chart Helm chart to install the @@ -40,3 +48,6 @@ Please read the [code of conduct](CODE-OF-CONDUCT.md) and the ## Copyright Helm charts for CloudNativePG are distributed under [Apache License 2.0](LICENSE). + +[stackoverflow]: https://stackoverflow.com/questions/tagged/cloudnative-pg +[license]: https://github.com/cloudnative-pg/charts?tab=Apache-2.0-1-ov-file diff --git a/charts/cloudnative-pg/Chart.yaml b/charts/cloudnative-pg/Chart.yaml index 0a8c12a07..b34e3817a 100644 --- a/charts/cloudnative-pg/Chart.yaml +++ b/charts/cloudnative-pg/Chart.yaml @@ -18,12 +18,12 @@ name: cloudnative-pg description: CloudNativePG Operator Helm Chart icon: https://raw.githubusercontent.com/cloudnative-pg/artwork/main/cloudnativepg-logo.svg type: application -version: "0.21.2" +version: "0.22.1" # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning, they should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.23.1" +appVersion: "1.24.1" sources: - https://github.com/cloudnative-pg/charts keywords: diff --git a/charts/cloudnative-pg/README.md b/charts/cloudnative-pg/README.md index d636f614c..f78461cfc 100644 --- a/charts/cloudnative-pg/README.md +++ b/charts/cloudnative-pg/README.md @@ -1,6 +1,6 @@ # cloudnative-pg -![Version: 0.21.2](https://img.shields.io/badge/Version-0.21.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.23.1](https://img.shields.io/badge/AppVersion-1.23.1-informational?style=flat-square) +![Version: 0.22.1](https://img.shields.io/badge/Version-0.22.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.24.1](https://img.shields.io/badge/AppVersion-1.24.1-informational?style=flat-square) CloudNativePG Operator Helm Chart @@ -27,6 +27,7 @@ CloudNativePG Operator Helm Chart | Key | Type | Default | Description | |-----|------|---------|-------------| | additionalArgs | list | `[]` | Additinal arguments to be added to the operator's args list. | +| additionalEnv | list | `[]` | Array containing extra environment variables which can be templated. For example: - name: RELEASE_NAME value: "{{ .Release.Name }}" - name: MY_VAR value: "mySpecialKey" | | affinity | object | `{}` | Affinity for the operator to be installed. | | commonAnnotations | object | `{}` | Annotations to be added to all other resources. | | config | object | `{"create":true,"data":{},"name":"cnpg-controller-manager-config","secret":false}` | Operator configuration. | @@ -36,7 +37,9 @@ CloudNativePG Operator Helm Chart | config.secret | bool | `false` | Specifies whether it should be stored in a secret, instead of a configmap. | | containerSecurityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":10001,"runAsUser":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | Container Security Context. | | crds.create | bool | `true` | Specifies whether the CRDs should be created when installing the chart. | +| dnsPolicy | string | `""` | | | fullnameOverride | string | `""` | | +| hostNetwork | bool | `false` | | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"ghcr.io/cloudnative-pg/cloudnative-pg"` | | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | @@ -50,8 +53,10 @@ CloudNativePG Operator Helm Chart | monitoring.grafanaDashboard.sidecarLabelValue | string | `"1"` | Label value that ConfigMaps should have to be loaded as dashboards. DEPRECATED: Use labels instead. | | monitoring.podMonitorAdditionalLabels | object | `{}` | Additional labels for the podMonitor | | monitoring.podMonitorEnabled | bool | `false` | Specifies whether the monitoring should be enabled. Requires Prometheus Operator CRDs. | +| monitoring.podMonitorMetricRelabelings | list | `[]` | Metrics relabel configurations to apply to samples before ingestion. | +| monitoring.podMonitorRelabelings | list | `[]` | Relabel configurations to apply to samples before scraping. | | monitoringQueriesConfigMap.name | string | `"cnpg-default-monitoring"` | The name of the default monitoring configmap. | -| monitoringQueriesConfigMap.queries | string | `"backends:\n query: |\n SELECT sa.datname\n , sa.usename\n , sa.application_name\n , states.state\n , COALESCE(sa.count, 0) AS total\n , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds\n FROM ( VALUES ('active')\n , ('idle')\n , ('idle in transaction')\n , ('idle in transaction (aborted)')\n , ('fastpath function call')\n , ('disabled')\n ) AS states(state)\n LEFT JOIN (\n SELECT datname\n , state\n , usename\n , COALESCE(application_name, '') AS application_name\n , COUNT(*)\n , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs\n FROM pg_catalog.pg_stat_activity\n GROUP BY datname, state, usename, application_name\n ) sa ON states.state = sa.state\n WHERE sa.usename IS NOT NULL\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - usename:\n usage: \"LABEL\"\n description: \"Name of the user\"\n - application_name:\n usage: \"LABEL\"\n description: \"Name of the application\"\n - state:\n usage: \"LABEL\"\n description: \"State of the backend\"\n - total:\n usage: \"GAUGE\"\n description: \"Number of backends\"\n - max_tx_duration_seconds:\n usage: \"GAUGE\"\n description: \"Maximum duration of a transaction in seconds\"\n\nbackends_waiting:\n query: |\n SELECT count(*) AS total\n FROM pg_catalog.pg_locks blocked_locks\n JOIN pg_catalog.pg_locks blocking_locks\n ON blocking_locks.locktype = blocked_locks.locktype\n AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database\n AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation\n AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page\n AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple\n AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid\n AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid\n AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid\n AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid\n AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid\n AND blocking_locks.pid != blocked_locks.pid\n JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid\n WHERE NOT blocked_locks.granted\n metrics:\n - total:\n usage: \"GAUGE\"\n description: \"Total number of backends that are currently waiting on other queries\"\n\npg_database:\n query: |\n SELECT datname\n , pg_catalog.pg_database_size(datname) AS size_bytes\n , pg_catalog.age(datfrozenxid) AS xid_age\n , pg_catalog.mxid_age(datminmxid) AS mxid_age\n FROM pg_catalog.pg_database\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - size_bytes:\n usage: \"GAUGE\"\n description: \"Disk space used by the database\"\n - xid_age:\n usage: \"GAUGE\"\n description: \"Number of transactions from the frozen XID to the current one\"\n - mxid_age:\n usage: \"GAUGE\"\n description: \"Number of multiple transactions (Multixact) from the frozen XID to the current one\"\n\npg_postmaster:\n query: |\n SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time\n FROM pg_catalog.pg_postmaster_start_time()\n metrics:\n - start_time:\n usage: \"GAUGE\"\n description: \"Time at which postgres started (based on epoch)\"\n\npg_replication:\n query: \"SELECT CASE WHEN (\n NOT pg_catalog.pg_is_in_recovery()\n OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn())\n THEN 0\n ELSE GREATEST (0,\n EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp())))\n END AS lag,\n pg_catalog.pg_is_in_recovery() AS in_recovery,\n EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,\n (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas\"\n metrics:\n - lag:\n usage: \"GAUGE\"\n description: \"Replication lag behind primary in seconds\"\n - in_recovery:\n usage: \"GAUGE\"\n description: \"Whether the instance is in recovery\"\n - is_wal_receiver_up:\n usage: \"GAUGE\"\n description: \"Whether the instance wal_receiver is up\"\n - streaming_replicas:\n usage: \"GAUGE\"\n description: \"Number of streaming replicas connected to the instance\"\n\npg_replication_slots:\n query: |\n SELECT slot_name,\n slot_type,\n database,\n active,\n (CASE pg_catalog.pg_is_in_recovery()\n WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)\n ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)\n END) as pg_wal_lsn_diff\n FROM pg_catalog.pg_replication_slots\n WHERE NOT temporary\n metrics:\n - slot_name:\n usage: \"LABEL\"\n description: \"Name of the replication slot\"\n - slot_type:\n usage: \"LABEL\"\n description: \"Type of the replication slot\"\n - database:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - active:\n usage: \"GAUGE\"\n description: \"Flag indicating whether the slot is active\"\n - pg_wal_lsn_diff:\n usage: \"GAUGE\"\n description: \"Replication lag in bytes\"\n\npg_stat_archiver:\n query: |\n SELECT archived_count\n , failed_count\n , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival\n , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure\n , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time\n , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time\n , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn\n , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn\n , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time\n FROM pg_catalog.pg_stat_archiver\n metrics:\n - archived_count:\n usage: \"COUNTER\"\n description: \"Number of WAL files that have been successfully archived\"\n - failed_count:\n usage: \"COUNTER\"\n description: \"Number of failed attempts for archiving WAL files\"\n - seconds_since_last_archival:\n usage: \"GAUGE\"\n description: \"Seconds since the last successful archival operation\"\n - seconds_since_last_failure:\n usage: \"GAUGE\"\n description: \"Seconds since the last failed archival operation\"\n - last_archived_time:\n usage: \"GAUGE\"\n description: \"Epoch of the last time WAL archiving succeeded\"\n - last_failed_time:\n usage: \"GAUGE\"\n description: \"Epoch of the last time WAL archiving failed\"\n - last_archived_wal_start_lsn:\n usage: \"GAUGE\"\n description: \"Archived WAL start LSN\"\n - last_failed_wal_start_lsn:\n usage: \"GAUGE\"\n description: \"Last failed WAL LSN\"\n - stats_reset_time:\n usage: \"GAUGE\"\n description: \"Time at which these statistics were last reset\"\n\npg_stat_bgwriter:\n runonserver: \"<17.0.0\"\n query: |\n SELECT checkpoints_timed\n , checkpoints_req\n , checkpoint_write_time\n , checkpoint_sync_time\n , buffers_checkpoint\n , buffers_clean\n , maxwritten_clean\n , buffers_backend\n , buffers_backend_fsync\n , buffers_alloc\n FROM pg_catalog.pg_stat_bgwriter\n metrics:\n - checkpoints_timed:\n usage: \"COUNTER\"\n description: \"Number of scheduled checkpoints that have been performed\"\n - checkpoints_req:\n usage: \"COUNTER\"\n description: \"Number of requested checkpoints that have been performed\"\n - checkpoint_write_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds\"\n - checkpoint_sync_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds\"\n - buffers_checkpoint:\n usage: \"COUNTER\"\n description: \"Number of buffers written during checkpoints\"\n - buffers_clean:\n usage: \"COUNTER\"\n description: \"Number of buffers written by the background writer\"\n - maxwritten_clean:\n usage: \"COUNTER\"\n description: \"Number of times the background writer stopped a cleaning scan because it had written too many buffers\"\n - buffers_backend:\n usage: \"COUNTER\"\n description: \"Number of buffers written directly by a backend\"\n - buffers_backend_fsync:\n usage: \"COUNTER\"\n description: \"Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)\"\n - buffers_alloc:\n usage: \"COUNTER\"\n description: \"Number of buffers allocated\"\n\npg_stat_database:\n query: |\n SELECT datname\n , xact_commit\n , xact_rollback\n , blks_read\n , blks_hit\n , tup_returned\n , tup_fetched\n , tup_inserted\n , tup_updated\n , tup_deleted\n , conflicts\n , temp_files\n , temp_bytes\n , deadlocks\n , blk_read_time\n , blk_write_time\n FROM pg_catalog.pg_stat_database\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of this database\"\n - xact_commit:\n usage: \"COUNTER\"\n description: \"Number of transactions in this database that have been committed\"\n - xact_rollback:\n usage: \"COUNTER\"\n description: \"Number of transactions in this database that have been rolled back\"\n - blks_read:\n usage: \"COUNTER\"\n description: \"Number of disk blocks read in this database\"\n - blks_hit:\n usage: \"COUNTER\"\n description: \"Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)\"\n - tup_returned:\n usage: \"COUNTER\"\n description: \"Number of rows returned by queries in this database\"\n - tup_fetched:\n usage: \"COUNTER\"\n description: \"Number of rows fetched by queries in this database\"\n - tup_inserted:\n usage: \"COUNTER\"\n description: \"Number of rows inserted by queries in this database\"\n - tup_updated:\n usage: \"COUNTER\"\n description: \"Number of rows updated by queries in this database\"\n - tup_deleted:\n usage: \"COUNTER\"\n description: \"Number of rows deleted by queries in this database\"\n - conflicts:\n usage: \"COUNTER\"\n description: \"Number of queries canceled due to conflicts with recovery in this database\"\n - temp_files:\n usage: \"COUNTER\"\n description: \"Number of temporary files created by queries in this database\"\n - temp_bytes:\n usage: \"COUNTER\"\n description: \"Total amount of data written to temporary files by queries in this database\"\n - deadlocks:\n usage: \"COUNTER\"\n description: \"Number of deadlocks detected in this database\"\n - blk_read_time:\n usage: \"COUNTER\"\n description: \"Time spent reading data file blocks by backends in this database, in milliseconds\"\n - blk_write_time:\n usage: \"COUNTER\"\n description: \"Time spent writing data file blocks by backends in this database, in milliseconds\"\n\npg_stat_replication:\n primary: true\n query: |\n SELECT usename\n , COALESCE(application_name, '') AS application_name\n , COALESCE(client_addr::text, '') AS client_addr\n , COALESCE(client_port::text, '') AS client_port\n , EXTRACT(EPOCH FROM backend_start) AS backend_start\n , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes\n , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes\n , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds\n , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds\n , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds\n FROM pg_catalog.pg_stat_replication\n metrics:\n - usename:\n usage: \"LABEL\"\n description: \"Name of the replication user\"\n - application_name:\n usage: \"LABEL\"\n description: \"Name of the application\"\n - client_addr:\n usage: \"LABEL\"\n description: \"Client IP address\"\n - client_port:\n usage: \"LABEL\"\n description: \"Client TCP port\"\n - backend_start:\n usage: \"COUNTER\"\n description: \"Time when this process was started\"\n - backend_xmin_age:\n usage: \"COUNTER\"\n description: \"The age of this standby's xmin horizon\"\n - sent_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location sent on this connection\"\n - write_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location written to disk by this standby server\"\n - flush_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location flushed to disk by this standby server\"\n - replay_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location replayed into the database on this standby server\"\n - write_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it\"\n - flush_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it\"\n - replay_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it\"\n\npg_settings:\n query: |\n SELECT name,\n CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting\n FROM pg_catalog.pg_settings\n WHERE vartype IN ('integer', 'real', 'bool')\n ORDER BY 1\n metrics:\n - name:\n usage: \"LABEL\"\n description: \"Name of the setting\"\n - setting:\n usage: \"GAUGE\"\n description: \"Setting value\"\n"` | A string representation of a YAML defining monitoring queries. | +| monitoringQueriesConfigMap.queries | string | `"backends:\n query: |\n SELECT sa.datname\n , sa.usename\n , sa.application_name\n , states.state\n , COALESCE(sa.count, 0) AS total\n , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds\n FROM ( VALUES ('active')\n , ('idle')\n , ('idle in transaction')\n , ('idle in transaction (aborted)')\n , ('fastpath function call')\n , ('disabled')\n ) AS states(state)\n LEFT JOIN (\n SELECT datname\n , state\n , usename\n , COALESCE(application_name, '') AS application_name\n , COUNT(*)\n , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs\n FROM pg_catalog.pg_stat_activity\n GROUP BY datname, state, usename, application_name\n ) sa ON states.state = sa.state\n WHERE sa.usename IS NOT NULL\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - usename:\n usage: \"LABEL\"\n description: \"Name of the user\"\n - application_name:\n usage: \"LABEL\"\n description: \"Name of the application\"\n - state:\n usage: \"LABEL\"\n description: \"State of the backend\"\n - total:\n usage: \"GAUGE\"\n description: \"Number of backends\"\n - max_tx_duration_seconds:\n usage: \"GAUGE\"\n description: \"Maximum duration of a transaction in seconds\"\n\nbackends_waiting:\n query: |\n SELECT count(*) AS total\n FROM pg_catalog.pg_locks blocked_locks\n JOIN pg_catalog.pg_locks blocking_locks\n ON blocking_locks.locktype = blocked_locks.locktype\n AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database\n AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation\n AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page\n AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple\n AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid\n AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid\n AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid\n AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid\n AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid\n AND blocking_locks.pid != blocked_locks.pid\n JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid\n WHERE NOT blocked_locks.granted\n metrics:\n - total:\n usage: \"GAUGE\"\n description: \"Total number of backends that are currently waiting on other queries\"\n\npg_database:\n query: |\n SELECT datname\n , pg_catalog.pg_database_size(datname) AS size_bytes\n , pg_catalog.age(datfrozenxid) AS xid_age\n , pg_catalog.mxid_age(datminmxid) AS mxid_age\n FROM pg_catalog.pg_database\n WHERE datallowconn\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - size_bytes:\n usage: \"GAUGE\"\n description: \"Disk space used by the database\"\n - xid_age:\n usage: \"GAUGE\"\n description: \"Number of transactions from the frozen XID to the current one\"\n - mxid_age:\n usage: \"GAUGE\"\n description: \"Number of multiple transactions (Multixact) from the frozen XID to the current one\"\n\npg_postmaster:\n query: |\n SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time\n FROM pg_catalog.pg_postmaster_start_time()\n metrics:\n - start_time:\n usage: \"GAUGE\"\n description: \"Time at which postgres started (based on epoch)\"\n\npg_replication:\n query: \"SELECT CASE WHEN (\n NOT pg_catalog.pg_is_in_recovery()\n OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn())\n THEN 0\n ELSE GREATEST (0,\n EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp())))\n END AS lag,\n pg_catalog.pg_is_in_recovery() AS in_recovery,\n EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,\n (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas\"\n metrics:\n - lag:\n usage: \"GAUGE\"\n description: \"Replication lag behind primary in seconds\"\n - in_recovery:\n usage: \"GAUGE\"\n description: \"Whether the instance is in recovery\"\n - is_wal_receiver_up:\n usage: \"GAUGE\"\n description: \"Whether the instance wal_receiver is up\"\n - streaming_replicas:\n usage: \"GAUGE\"\n description: \"Number of streaming replicas connected to the instance\"\n\npg_replication_slots:\n query: |\n SELECT slot_name,\n slot_type,\n database,\n active,\n (CASE pg_catalog.pg_is_in_recovery()\n WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)\n ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)\n END) as pg_wal_lsn_diff\n FROM pg_catalog.pg_replication_slots\n WHERE NOT temporary\n metrics:\n - slot_name:\n usage: \"LABEL\"\n description: \"Name of the replication slot\"\n - slot_type:\n usage: \"LABEL\"\n description: \"Type of the replication slot\"\n - database:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - active:\n usage: \"GAUGE\"\n description: \"Flag indicating whether the slot is active\"\n - pg_wal_lsn_diff:\n usage: \"GAUGE\"\n description: \"Replication lag in bytes\"\n\npg_stat_archiver:\n query: |\n SELECT archived_count\n , failed_count\n , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival\n , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure\n , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time\n , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time\n , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn\n , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn\n , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time\n FROM pg_catalog.pg_stat_archiver\n metrics:\n - archived_count:\n usage: \"COUNTER\"\n description: \"Number of WAL files that have been successfully archived\"\n - failed_count:\n usage: \"COUNTER\"\n description: \"Number of failed attempts for archiving WAL files\"\n - seconds_since_last_archival:\n usage: \"GAUGE\"\n description: \"Seconds since the last successful archival operation\"\n - seconds_since_last_failure:\n usage: \"GAUGE\"\n description: \"Seconds since the last failed archival operation\"\n - last_archived_time:\n usage: \"GAUGE\"\n description: \"Epoch of the last time WAL archiving succeeded\"\n - last_failed_time:\n usage: \"GAUGE\"\n description: \"Epoch of the last time WAL archiving failed\"\n - last_archived_wal_start_lsn:\n usage: \"GAUGE\"\n description: \"Archived WAL start LSN\"\n - last_failed_wal_start_lsn:\n usage: \"GAUGE\"\n description: \"Last failed WAL LSN\"\n - stats_reset_time:\n usage: \"GAUGE\"\n description: \"Time at which these statistics were last reset\"\n\npg_stat_bgwriter:\n runonserver: \"<17.0.0\"\n query: |\n SELECT checkpoints_timed\n , checkpoints_req\n , checkpoint_write_time\n , checkpoint_sync_time\n , buffers_checkpoint\n , buffers_clean\n , maxwritten_clean\n , buffers_backend\n , buffers_backend_fsync\n , buffers_alloc\n FROM pg_catalog.pg_stat_bgwriter\n metrics:\n - checkpoints_timed:\n usage: \"COUNTER\"\n description: \"Number of scheduled checkpoints that have been performed\"\n - checkpoints_req:\n usage: \"COUNTER\"\n description: \"Number of requested checkpoints that have been performed\"\n - checkpoint_write_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds\"\n - checkpoint_sync_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds\"\n - buffers_checkpoint:\n usage: \"COUNTER\"\n description: \"Number of buffers written during checkpoints\"\n - buffers_clean:\n usage: \"COUNTER\"\n description: \"Number of buffers written by the background writer\"\n - maxwritten_clean:\n usage: \"COUNTER\"\n description: \"Number of times the background writer stopped a cleaning scan because it had written too many buffers\"\n - buffers_backend:\n usage: \"COUNTER\"\n description: \"Number of buffers written directly by a backend\"\n - buffers_backend_fsync:\n usage: \"COUNTER\"\n description: \"Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)\"\n - buffers_alloc:\n usage: \"COUNTER\"\n description: \"Number of buffers allocated\"\n\npg_stat_bgwriter_17:\n runonserver: \">=17.0.0\"\n name: pg_stat_bgwriter\n query: |\n SELECT buffers_clean\n , maxwritten_clean\n , buffers_alloc\n , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time\n FROM pg_catalog.pg_stat_bgwriter\n metrics:\n - buffers_clean:\n usage: \"COUNTER\"\n description: \"Number of buffers written by the background writer\"\n - maxwritten_clean:\n usage: \"COUNTER\"\n description: \"Number of times the background writer stopped a cleaning scan because it had written too many buffers\"\n - buffers_alloc:\n usage: \"COUNTER\"\n description: \"Number of buffers allocated\"\n - stats_reset_time:\n usage: \"GAUGE\"\n description: \"Time at which these statistics were last reset\"\n\npg_stat_checkpointer:\n runonserver: \">=17.0.0\"\n query: |\n SELECT num_timed AS checkpoints_timed\n , num_requested AS checkpoints_req\n , restartpoints_timed\n , restartpoints_req\n , restartpoints_done\n , write_time\n , sync_time\n , buffers_written\n , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time\n FROM pg_catalog.pg_stat_checkpointer\n metrics:\n - checkpoints_timed:\n usage: \"COUNTER\"\n description: \"Number of scheduled checkpoints that have been performed\"\n - checkpoints_req:\n usage: \"COUNTER\"\n description: \"Number of requested checkpoints that have been performed\"\n - restartpoints_timed:\n usage: \"COUNTER\"\n description: \"Number of scheduled restartpoints due to timeout or after a failed attempt to perform it\"\n - restartpoints_req:\n usage: \"COUNTER\"\n description: \"Number of requested restartpoints that have been performed\"\n - restartpoints_done:\n usage: \"COUNTER\"\n description: \"Number of restartpoints that have been performed\"\n - write_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds\"\n - sync_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds\"\n - buffers_written:\n usage: \"COUNTER\"\n description: \"Number of buffers written during checkpoints and restartpoints\"\n - stats_reset_time:\n usage: \"GAUGE\"\n description: \"Time at which these statistics were last reset\"\n\npg_stat_database:\n query: |\n SELECT datname\n , xact_commit\n , xact_rollback\n , blks_read\n , blks_hit\n , tup_returned\n , tup_fetched\n , tup_inserted\n , tup_updated\n , tup_deleted\n , conflicts\n , temp_files\n , temp_bytes\n , deadlocks\n , blk_read_time\n , blk_write_time\n FROM pg_catalog.pg_stat_database\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of this database\"\n - xact_commit:\n usage: \"COUNTER\"\n description: \"Number of transactions in this database that have been committed\"\n - xact_rollback:\n usage: \"COUNTER\"\n description: \"Number of transactions in this database that have been rolled back\"\n - blks_read:\n usage: \"COUNTER\"\n description: \"Number of disk blocks read in this database\"\n - blks_hit:\n usage: \"COUNTER\"\n description: \"Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)\"\n - tup_returned:\n usage: \"COUNTER\"\n description: \"Number of rows returned by queries in this database\"\n - tup_fetched:\n usage: \"COUNTER\"\n description: \"Number of rows fetched by queries in this database\"\n - tup_inserted:\n usage: \"COUNTER\"\n description: \"Number of rows inserted by queries in this database\"\n - tup_updated:\n usage: \"COUNTER\"\n description: \"Number of rows updated by queries in this database\"\n - tup_deleted:\n usage: \"COUNTER\"\n description: \"Number of rows deleted by queries in this database\"\n - conflicts:\n usage: \"COUNTER\"\n description: \"Number of queries canceled due to conflicts with recovery in this database\"\n - temp_files:\n usage: \"COUNTER\"\n description: \"Number of temporary files created by queries in this database\"\n - temp_bytes:\n usage: \"COUNTER\"\n description: \"Total amount of data written to temporary files by queries in this database\"\n - deadlocks:\n usage: \"COUNTER\"\n description: \"Number of deadlocks detected in this database\"\n - blk_read_time:\n usage: \"COUNTER\"\n description: \"Time spent reading data file blocks by backends in this database, in milliseconds\"\n - blk_write_time:\n usage: \"COUNTER\"\n description: \"Time spent writing data file blocks by backends in this database, in milliseconds\"\n\npg_stat_replication:\n primary: true\n query: |\n SELECT usename\n , COALESCE(application_name, '') AS application_name\n , COALESCE(client_addr::text, '') AS client_addr\n , COALESCE(client_port::text, '') AS client_port\n , EXTRACT(EPOCH FROM backend_start) AS backend_start\n , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes\n , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes\n , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds\n , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds\n , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds\n FROM pg_catalog.pg_stat_replication\n metrics:\n - usename:\n usage: \"LABEL\"\n description: \"Name of the replication user\"\n - application_name:\n usage: \"LABEL\"\n description: \"Name of the application\"\n - client_addr:\n usage: \"LABEL\"\n description: \"Client IP address\"\n - client_port:\n usage: \"LABEL\"\n description: \"Client TCP port\"\n - backend_start:\n usage: \"COUNTER\"\n description: \"Time when this process was started\"\n - backend_xmin_age:\n usage: \"COUNTER\"\n description: \"The age of this standby's xmin horizon\"\n - sent_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location sent on this connection\"\n - write_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location written to disk by this standby server\"\n - flush_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location flushed to disk by this standby server\"\n - replay_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location replayed into the database on this standby server\"\n - write_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it\"\n - flush_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it\"\n - replay_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it\"\n\npg_settings:\n query: |\n SELECT name,\n CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting\n FROM pg_catalog.pg_settings\n WHERE vartype IN ('integer', 'real', 'bool')\n ORDER BY 1\n metrics:\n - name:\n usage: \"LABEL\"\n description: \"Name of the setting\"\n - setting:\n usage: \"GAUGE\"\n description: \"Setting value\"\n"` | A string representation of a YAML defining monitoring queries. | | nameOverride | string | `""` | | | nodeSelector | object | `{}` | Nodeselector for the operator to be installed. | | podAnnotations | object | `{}` | Annotations to be added to the pod. | diff --git a/charts/cloudnative-pg/templates/crds/crds.yaml b/charts/cloudnative-pg/templates/crds/crds.yaml index 0d4ea03f0..5c5d98132 100644 --- a/charts/cloudnative-pg/templates/crds/crds.yaml +++ b/charts/cloudnative-pg/templates/crds/crds.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 helm.sh/resource-policy: keep name: backups.postgresql.cnpg.io spec: @@ -440,7 +440,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 helm.sh/resource-policy: keep name: clusterimagecatalogs.postgresql.cnpg.io spec: @@ -522,7 +522,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 helm.sh/resource-policy: keep name: clusters.postgresql.cnpg.io spec: @@ -639,11 +639,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -658,13 +660,13 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -673,13 +675,13 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -719,11 +721,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -743,6 +747,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -765,6 +770,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -814,11 +820,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -833,13 +841,13 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -848,13 +856,13 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -893,11 +901,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -917,6 +927,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -929,6 +940,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object additionalPodAntiAffinity: description: |- @@ -986,11 +998,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1005,13 +1019,13 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1020,13 +1034,13 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1066,11 +1080,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1090,6 +1106,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -1112,6 +1129,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the anti-affinity requirements specified by this field are not met at @@ -1161,11 +1179,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1180,13 +1200,13 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1195,13 +1215,13 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1240,11 +1260,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1264,6 +1286,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -1276,6 +1299,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object enablePodAntiAffinity: description: |- @@ -1334,11 +1358,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -1366,11 +1392,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -1383,6 +1411,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -1427,11 +1456,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -1459,14 +1490,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -1622,13 +1656,11 @@ spec: provide flexibility to customize the backup process further according to specific requirements or configurations. - Example: In a scenario where specialized backup options are required, such as setting a specific timeout or defining custom behavior, users can use this field to specify additional command arguments. - Note: It's essential to ensure that the provided arguments are valid and supported by the 'barman-cloud-backup' command, to avoid potential errors or unintended @@ -1810,6 +1842,24 @@ spec: When not defined, WAL files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array compression: description: |- Compress a WAL file before sending it to the object store. Available @@ -1839,6 +1889,24 @@ spec: value - with 1 being the minimum accepted value. minimum: 1 type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array type: object required: - destinationPath @@ -2035,17 +2103,19 @@ spec: postInitApplicationSQL: description: |- List of SQL queries to be executed as a superuser in the application - database right after is created - to be used with extreme care + database right after the cluster has been created - to be used with extreme care (by default empty) items: type: string type: array postInitApplicationSQLRefs: description: |- - PostInitApplicationSQLRefs points references to ConfigMaps or Secrets which - contain SQL files, the general implementation order to these references is - from all Secrets to all ConfigMaps, and inside Secrets or ConfigMaps, - the implementation order is same as the order of each array + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. (by default empty) properties: configMapRefs: @@ -2089,20 +2159,118 @@ spec: type: object postInitSQL: description: |- - List of SQL queries to be executed as a superuser immediately - after the cluster has been created - to be used with extreme care + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care (by default empty) items: type: string type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object postInitTemplateSQL: description: |- List of SQL queries to be executed as a superuser in the `template1` - after the cluster has been created - to be used with extreme care + database right after the cluster has been created - to be used with extreme care (by default empty) items: type: string type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object secret: description: |- Name of the secret containing the initial credentials for the @@ -2453,10 +2621,13 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its key @@ -2515,10 +2686,13 @@ spec: be a valid secret key. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key must @@ -2544,10 +2718,13 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap must be defined @@ -2562,10 +2739,13 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret must be defined @@ -2588,7 +2768,6 @@ spec: entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). - An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until @@ -2598,11 +2777,9 @@ spec: this should not be necessary, but it may be useful when manually reconstructing a broken cluster. - This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. - Required, must not be nil. properties: metadata: @@ -2625,6 +2802,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: description: |- dataSource field can be used to specify either: @@ -2764,11 +2942,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -2796,8 +2976,8 @@ spec: If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -2923,13 +3103,11 @@ spec: provide flexibility to customize the backup process further according to specific requirements or configurations. - Example: In a scenario where specialized backup options are required, such as setting a specific timeout or defining custom behavior, users can use this field to specify additional command arguments. - Note: It's essential to ensure that the provided arguments are valid and supported by the 'barman-cloud-backup' command, to avoid potential errors or unintended @@ -3111,6 +3289,24 @@ spec: When not defined, WAL files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array compression: description: |- Compress a WAL file before sending it to the object store. Available @@ -3140,6 +3336,24 @@ spec: value - with 1 being the minimum accepted value. minimum: 1 type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array type: object required: - destinationPath @@ -3168,10 +3382,13 @@ spec: be a valid secret key. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key must @@ -3191,10 +3408,13 @@ spec: be a valid secret key. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key must @@ -3214,10 +3434,13 @@ spec: be a valid secret key. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key must @@ -3237,10 +3460,13 @@ spec: be a valid secret key. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key must @@ -3342,6 +3568,14 @@ spec: description: Number of instances required in the cluster minimum: 1 type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer logLevel: default: info description: 'The instances'' log level, one of the following values: @@ -3365,7 +3599,6 @@ spec: with the additional field Ensure specifying whether to ensure the presence or absence of the role in the database - The defaults of the CREATE ROLE command are applied Reference: https://www.postgresql.org/docs/current/sql-createrole.html properties: @@ -3469,6 +3702,438 @@ spec: - name type: object type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + allOf: + - enum: + - rw + - r + - ro + - enum: + - rw + - r + - ro + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is an alpha field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object type: object maxSyncReplicas: default: 0 @@ -3545,7 +4210,6 @@ spec: RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config properties: action: @@ -3553,11 +4217,9 @@ spec: description: |- Action to perform based on the regex matching. - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - Default: "Replace" enum: - replace @@ -3587,7 +4249,6 @@ spec: description: |- Modulus to take of the hash of the source label values. - Only applicable when the action is `HashMod`. format: int64 type: integer @@ -3600,7 +4261,6 @@ spec: Replacement value against which a Replace action is performed if the regular expression matches. - Regex capture groups are available. type: string separator: @@ -3623,11 +4283,9 @@ spec: description: |- Label to which the resulting string is written in a replacement. - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. - Regex capture groups are available. type: string type: object @@ -3640,7 +4298,6 @@ spec: RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config properties: action: @@ -3648,11 +4305,9 @@ spec: description: |- Action to perform based on the regex matching. - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - Default: "Replace" enum: - replace @@ -3682,7 +4337,6 @@ spec: description: |- Modulus to take of the hash of the source label values. - Only applicable when the action is `HashMod`. format: int64 type: integer @@ -3695,7 +4349,6 @@ spec: Replacement value against which a Replace action is performed if the regular expression matches. - Regex capture groups are available. type: string separator: @@ -3718,15 +4371,25 @@ spec: description: |- Label to which the resulting string is written in a replacement. - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. - Regex capture groups are available. type: string type: object type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object type: object nodeMaintenanceWindow: description: Define a maintenance window for the Kubernetes nodes @@ -3816,10 +4479,13 @@ spec: be a valid secret key. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key @@ -3904,6 +4570,54 @@ spec: required: - enabled type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object type: object primaryUpdateMethod: default: restart @@ -3948,24 +4662,24 @@ spec: format: int32 type: integer sources: - description: sources is the list of volume projections + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. items: - description: Projection that may be projected along with other - supported volume types + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. properties: clusterTrustBundle: description: |- ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. - Alpha, gated by the ClusterTrustBundleProjection feature gate. - ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. - Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. @@ -4005,11 +4719,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -4088,11 +4804,15 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: optional specify whether the ConfigMap @@ -4112,8 +4832,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of the - pod: only annotations, labels, name and namespace - are supported.' + pod: only annotations, labels, name, namespace + and uid are supported.' properties: apiVersion: description: Version of the schema the FieldPath @@ -4172,6 +4892,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to @@ -4215,11 +4936,15 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: optional field specify whether the Secret @@ -4258,6 +4983,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object replica: description: Replica cluster configuration @@ -4269,13 +4995,35 @@ spec: object store or via streaming through pg_basebackup. Refer to the Replica clusters page of the documentation for more information. type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string source: description: The name of the external cluster which is the replication origin minLength: 1 type: string required: - - enabled - source type: object replicationSlots: @@ -4347,11 +5095,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -4362,6 +5108,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -4418,7 +5170,6 @@ spec: type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. @@ -4452,6 +5203,10 @@ spec: and services. More info: http://kubernetes.io/docs/user-guide/labels type: object + name: + description: The name of the resource. Only supported for + certain types + type: string type: object required: - metadata @@ -4494,6 +5249,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: description: |- dataSource field can be used to specify either: @@ -4633,11 +5389,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -4665,8 +5423,8 @@ spec: If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -4745,6 +5503,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: description: |- dataSource field can be used to specify either: @@ -4888,11 +5647,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -4920,8 +5681,8 @@ spec: If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -5005,11 +5766,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -5031,7 +5794,6 @@ spec: Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). items: type: string @@ -5071,7 +5833,6 @@ spec: Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | @@ -5080,9 +5841,6 @@ spec: In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - - - This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). format: int32 type: integer nodeAffinityPolicy: @@ -5092,7 +5850,6 @@ spec: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string @@ -5104,7 +5861,6 @@ spec: has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. - If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string @@ -5163,6 +5919,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: description: |- dataSource field can be used to specify either: @@ -5302,11 +6059,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -5334,8 +6093,8 @@ spec: If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -5462,16 +6221,8 @@ spec: conditions: description: Conditions for cluster object items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -5512,12 +6263,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -5563,6 +6309,13 @@ spec: items: type: string type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string firstRecoverabilityPoint: description: |- The first recoverability point, stored as a date in RFC3339 format. @@ -5630,6 +6383,11 @@ spec: lastFailedBackup: description: Stored as a date in RFC3339 format type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string lastSuccessfulBackup: description: |- Last successful backup, stored as a date in RFC3339 format @@ -5723,6 +6481,10 @@ spec: items: type: string type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string version: description: |- Version is the version of the plugin loaded by the @@ -5921,7 +6683,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 helm.sh/resource-policy: keep name: imagecatalogs.postgresql.cnpg.io spec: @@ -6002,7 +6764,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 helm.sh/resource-policy: keep name: poolers.postgresql.cnpg.io spec: @@ -6070,9 +6832,6 @@ spec: description: |- Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate. - --- - TODO: Update this to follow our convention for oneOf, whatever we decide it - to be. properties: maxSurge: anyOf: @@ -6134,7 +6893,6 @@ spec: RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config properties: action: @@ -6142,11 +6900,9 @@ spec: description: |- Action to perform based on the regex matching. - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - Default: "Replace" enum: - replace @@ -6176,7 +6932,6 @@ spec: description: |- Modulus to take of the hash of the source label values. - Only applicable when the action is `HashMod`. format: int64 type: integer @@ -6189,7 +6944,6 @@ spec: Replacement value against which a Replace action is performed if the regular expression matches. - Regex capture groups are available. type: string separator: @@ -6212,11 +6966,9 @@ spec: description: |- Label to which the resulting string is written in a replacement. - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. - Regex capture groups are available. type: string type: object @@ -6229,7 +6981,6 @@ spec: RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config properties: action: @@ -6237,11 +6988,9 @@ spec: description: |- Action to perform based on the regex matching. - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - Default: "Replace" enum: - replace @@ -6271,7 +7020,6 @@ spec: description: |- Modulus to take of the hash of the source label values. - Only applicable when the action is `HashMod`. format: int64 type: integer @@ -6284,7 +7032,6 @@ spec: Replacement value against which a Replace action is performed if the regular expression matches. - Regex capture groups are available. type: string separator: @@ -6307,11 +7054,9 @@ spec: description: |- Label to which the resulting string is written in a replacement. - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. - Regex capture groups are available. type: string type: object @@ -6396,6 +7141,10 @@ spec: and services. More info: http://kubernetes.io/docs/user-guide/labels type: object + name: + description: The name of the resource. Only supported for + certain types + type: string type: object spec: description: |- @@ -6451,7 +7200,6 @@ spec: clients must ensure that clusterIPs[0] and clusterIP have the same value. - This field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. @@ -6470,6 +7218,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic externalName: description: |- externalName is the external reference that discovery mechanisms will @@ -6530,7 +7279,6 @@ spec: NodePort, and LoadBalancer, and does apply to "headless" services. This field will be wiped when updating a Service to type ExternalName. - This field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are @@ -6585,6 +7333,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic ports: description: |- The list of ports that are exposed by this service. @@ -6600,17 +7349,14 @@ spec: This field follows standard Kubernetes label syntax. Valid values are either: - * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 - * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. type: string @@ -6715,6 +7461,16 @@ spec: type: integer type: object type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is an alpha field and requires enabling ServiceTrafficDistribution feature. + type: string type: description: |- type determines how the Service is exposed. Defaults to ClusterIP. Valid @@ -6762,6 +7518,10 @@ spec: and services. More info: http://kubernetes.io/docs/user-guide/labels type: object + name: + description: The name of the resource. Only supported for + certain types + type: string type: object spec: description: |- @@ -6829,11 +7589,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -6861,11 +7623,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -6879,6 +7643,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -6923,11 +7688,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -6955,14 +7722,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -7026,11 +7796,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -7045,13 +7817,13 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -7060,13 +7832,13 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -7107,11 +7879,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -7131,6 +7905,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -7153,6 +7928,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -7203,11 +7979,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -7222,13 +8000,13 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -7237,13 +8015,13 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -7283,11 +8061,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -7307,6 +8087,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -7319,6 +8100,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -7378,11 +8160,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -7397,13 +8181,13 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -7412,13 +8196,13 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -7459,11 +8243,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -7483,6 +8269,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -7505,6 +8292,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the anti-affinity requirements specified by this field are not met at @@ -7555,11 +8343,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -7574,13 +8364,13 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -7589,13 +8379,13 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -7635,11 +8425,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -7659,6 +8451,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -7671,6 +8464,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -7700,6 +8494,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: description: |- Entrypoint array. Not executed within a shell. @@ -7713,6 +8508,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic env: description: |- List of environment variables to set in the container. @@ -7748,10 +8544,13 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -7814,10 +8613,13 @@ spec: key. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret @@ -7832,6 +8634,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: description: |- List of sources to populate environment variables in the container. @@ -7848,10 +8653,13 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -7867,10 +8675,13 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret must @@ -7880,6 +8691,7 @@ spec: x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: description: |- Container image name. @@ -7920,6 +8732,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request @@ -7950,6 +8763,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -8031,6 +8845,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request @@ -8061,6 +8876,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -8138,6 +8954,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -8155,11 +8972,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -8194,6 +9011,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8351,6 +9169,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -8368,11 +9187,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -8407,6 +9226,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8520,11 +9340,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry @@ -8536,6 +9354,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -8602,6 +9426,30 @@ spec: 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: description: |- The capabilities to add/drop when running containers. @@ -8615,6 +9463,7 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -8622,6 +9471,7 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: description: |- @@ -8633,7 +9483,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -8715,7 +9565,6 @@ spec: type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. @@ -8779,6 +9628,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -8796,11 +9646,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -8835,6 +9685,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8977,6 +9828,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: description: |- Pod volumes to mount into the container's filesystem. @@ -8996,6 +9850,8 @@ spec: to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name of a Volume. @@ -9005,6 +9861,25 @@ spec: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: description: |- Path within the volume from which the container's volume should be mounted. @@ -9022,6 +9897,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: description: |- Container's working directory. @@ -9033,6 +9911,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map dnsConfig: description: |- Specifies the DNS parameters of a pod. @@ -9047,6 +9928,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: description: |- A list of DNS resolver options. @@ -9064,6 +9946,7 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: description: |- A list of DNS search domains for host-name lookup. @@ -9072,6 +9955,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: description: |- @@ -9102,7 +9986,6 @@ spec: removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. - To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. properties: @@ -9119,6 +10002,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: description: |- Entrypoint array. Not executed within a shell. @@ -9132,6 +10016,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic env: description: |- List of environment variables to set in the container. @@ -9167,10 +10052,13 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -9233,10 +10121,13 @@ spec: key. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret @@ -9251,6 +10142,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: description: |- List of sources to populate environment variables in the container. @@ -9267,10 +10161,13 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -9286,10 +10183,13 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret must @@ -9299,6 +10199,7 @@ spec: x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: description: |- Container image name. @@ -9336,6 +10237,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request @@ -9366,6 +10268,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -9447,6 +10350,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request @@ -9477,6 +10381,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -9550,6 +10455,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -9567,11 +10473,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -9606,6 +10512,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -9751,6 +10658,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -9768,11 +10676,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -9807,6 +10715,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -9919,11 +10828,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry @@ -9935,6 +10842,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -9989,6 +10902,30 @@ spec: 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: description: |- The capabilities to add/drop when running containers. @@ -10002,6 +10939,7 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -10009,6 +10947,7 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: description: |- @@ -10020,7 +10959,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -10102,7 +11041,6 @@ spec: type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. @@ -10159,6 +11097,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -10176,11 +11115,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -10215,6 +11154,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -10317,7 +11257,6 @@ spec: The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. - The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined. type: string @@ -10367,6 +11306,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: description: |- Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. @@ -10386,6 +11328,8 @@ spec: to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name of a Volume. @@ -10395,6 +11339,25 @@ spec: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: description: |- Path within the volume from which the container's volume should be mounted. @@ -10412,6 +11375,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: description: |- Container's working directory. @@ -10423,10 +11389,13 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map hostAliases: description: |- HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - file if specified. This is only valid for non-hostNetwork pods. + file if specified. items: description: |- HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the @@ -10437,11 +11406,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic ip: description: IP address of the host file entry. type: string + required: + - ip type: object type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map hostIPC: description: |- Use the host's ipc namespace. @@ -10486,14 +11461,20 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map initContainers: description: |- List of initialization containers belonging to the pod. @@ -10526,6 +11507,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: description: |- Entrypoint array. Not executed within a shell. @@ -10539,6 +11521,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic env: description: |- List of environment variables to set in the container. @@ -10574,10 +11557,13 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -10640,10 +11626,13 @@ spec: key. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret @@ -10658,6 +11647,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: description: |- List of sources to populate environment variables in the container. @@ -10674,10 +11666,13 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -10693,10 +11688,13 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret must @@ -10706,6 +11704,7 @@ spec: x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: description: |- Container image name. @@ -10746,6 +11745,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request @@ -10776,6 +11776,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -10857,6 +11858,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request @@ -10887,6 +11889,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -10964,6 +11967,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -10981,11 +11985,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -11020,6 +12024,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -11177,6 +12182,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -11194,11 +12200,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -11233,6 +12239,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -11346,11 +12353,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry @@ -11362,6 +12367,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -11428,6 +12439,30 @@ spec: 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: description: |- The capabilities to add/drop when running containers. @@ -11441,6 +12476,7 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -11448,6 +12484,7 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: description: |- @@ -11459,7 +12496,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -11541,7 +12578,6 @@ spec: type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. @@ -11605,6 +12641,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -11622,11 +12659,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -11661,6 +12698,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -11803,6 +12841,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: description: |- Pod volumes to mount into the container's filesystem. @@ -11822,6 +12863,8 @@ spec: to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name of a Volume. @@ -11831,6 +12874,25 @@ spec: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: description: |- Path within the volume from which the container's volume should be mounted. @@ -11848,6 +12910,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: description: |- Container's working directory. @@ -11859,11 +12924,16 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map nodeName: description: |- - NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - the scheduler simply schedules this pod onto that node, assuming that it fits resource - requirements. + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename type: string nodeSelector: additionalProperties: @@ -11879,15 +12949,14 @@ spec: Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. - If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions - If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers + - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup @@ -11897,6 +12966,8 @@ spec: - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities @@ -11975,6 +13046,7 @@ spec: - conditionType type: object type: array + x-kubernetes-list-type: atomic resourceClaims: description: |- ResourceClaims defines which ResourceClaims must be allocated @@ -11982,15 +13054,16 @@ spec: will be made available to those containers which consume them by name. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. items: description: |- - PodResourceClaim references exactly one ResourceClaim through a ClaimSource. + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. properties: @@ -11999,32 +13072,32 @@ spec: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. type: string - source: - description: Source describes where to find the ResourceClaim. - properties: - resourceClaimName: - description: |- - ResourceClaimName is the name of a ResourceClaim object in the same - namespace as this pod. - type: string - resourceClaimTemplateName: - description: |- - ResourceClaimTemplateName is the name of a ResourceClaimTemplate - object in the same namespace as this pod. + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. - The template will be used to create a new ResourceClaim, which will - be bound to this pod. When this pod is deleted, the ResourceClaim - will also be deleted. The pod name and resource name, along with a - generated component, will be used to form a unique name for the - ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. - This field is immutable and no changes will be made to the - corresponding ResourceClaim by the control plane after creating the - ResourceClaim. - type: string - type: object + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string required: - name type: object @@ -12058,11 +13131,7 @@ spec: If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod. - SchedulingGates can only be set at pod creation time, and be removed only afterwards. - - - This is a beta feature enabled by the PodSchedulingReadiness feature gate. items: description: PodSchedulingGate is associated to a Pod to guard its scheduling. @@ -12084,18 +13153,39 @@ spec: SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object fsGroup: description: |- A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: - 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- - If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows. format: int64 @@ -12182,7 +13272,6 @@ spec: type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. @@ -12192,17 +13281,28 @@ spec: type: object supplementalGroups: description: |- - A list of groups applied to the first process run in each container, in addition - to the container's primary GID, the fsGroup (if specified), and group memberships - defined in the container image for the uid of the container process. If unspecified, - no additional groups are added to any container. Note that group memberships - defined in the container image for the uid of the container process are still effective, - even if they are not included in this list. + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string sysctls: description: |- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported @@ -12223,6 +13323,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: description: |- The Windows specific settings applied to all containers. @@ -12258,7 +13359,7 @@ spec: type: object serviceAccount: description: |- - DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. type: string serviceAccountName: @@ -12338,6 +13439,7 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic topologySpreadConstraints: description: |- TopologySpreadConstraints describes how a group of pods ought to spread across topology @@ -12379,11 +13481,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -12405,7 +13509,6 @@ spec: Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). items: type: string @@ -12445,7 +13548,6 @@ spec: Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | @@ -12454,9 +13556,6 @@ spec: In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - - - This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). format: int32 type: integer nodeAffinityPolicy: @@ -12466,7 +13565,6 @@ spec: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string @@ -12478,7 +13576,6 @@ spec: has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. - If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string @@ -12546,7 +13643,6 @@ spec: Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising the machine type: string partition: description: |- @@ -12586,6 +13682,7 @@ spec: the blob storage type: string fsType: + default: ext4 description: |- fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. @@ -12599,6 +13696,7 @@ spec: set). defaults to shared' type: string readOnly: + default: false description: |- readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. @@ -12638,6 +13736,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default @@ -12660,10 +13759,13 @@ spec: More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic @@ -12699,10 +13801,13 @@ spec: to OpenStack. properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic @@ -12767,11 +13872,15 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: optional specify whether the ConfigMap @@ -12804,10 +13913,13 @@ spec: secret object contains more than one secret, all secret references are passed. properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic @@ -12852,8 +13964,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' + the pod: only annotations, labels, name, + namespace and uid are supported.' properties: apiVersion: description: Version of the schema the @@ -12914,6 +14026,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: description: |- @@ -12947,7 +14060,6 @@ spec: The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. - Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity @@ -12958,17 +14070,14 @@ spec: information on the connection between this volume type and PersistentVolumeClaim). - Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. - Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. - A pod can use both types of ephemeral volumes and persistent volumes at the same time. properties: @@ -12982,7 +14091,6 @@ spec: entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). - An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until @@ -12992,11 +14100,9 @@ spec: this should not be necessary, but it may be useful when manually reconstructing a broken cluster. - This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. - Required, must not be nil. properties: metadata: @@ -13019,6 +14125,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: description: |- dataSource field can be used to specify either: @@ -13164,11 +14271,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -13196,8 +14305,8 @@ spec: If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -13223,7 +14332,6 @@ spec: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising the machine type: string lun: description: 'lun is Optional: FC target lun number' @@ -13240,6 +14348,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: description: |- wwids Optional: FC volume world wide identifiers (wwids) @@ -13247,6 +14356,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: description: |- @@ -13283,10 +14393,13 @@ spec: scripts. properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic @@ -13320,7 +14433,6 @@ spec: Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising the machine type: string partition: description: |- @@ -13401,9 +14513,6 @@ spec: used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- - TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - mount host directories as read/write. properties: path: description: |- @@ -13420,6 +14529,41 @@ spec: required: - path type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object iscsi: description: |- iscsi represents an ISCSI Disk resource that is attached to a @@ -13440,7 +14584,6 @@ spec: Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising the machine type: string initiatorName: description: |- @@ -13452,6 +14595,7 @@ spec: description: iqn is the target iSCSI Qualified Name. type: string iscsiInterface: + default: default description: |- iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). @@ -13467,6 +14611,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: description: |- readOnly here will force the ReadOnly setting in VolumeMounts. @@ -13477,10 +14622,13 @@ spec: target and initiator authentication properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic @@ -13599,24 +14747,24 @@ spec: format: int32 type: integer sources: - description: sources is the list of volume projections + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. items: - description: Projection that may be projected - along with other supported volume types + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. properties: clusterTrustBundle: description: |- ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. - Alpha, gated by the ClusterTrustBundleProjection feature gate. - ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. - Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. @@ -13658,11 +14806,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -13741,11 +14891,15 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: optional specify whether @@ -13768,8 +14922,8 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, - labels, name and namespace are - supported.' + labels, name, namespace and uid + are supported.' properties: apiVersion: description: Version of the @@ -13835,6 +14989,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the @@ -13878,11 +15033,15 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: optional field specify whether @@ -13921,6 +15080,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: description: quobyte represents a Quobyte mount on the @@ -13971,7 +15131,6 @@ spec: Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising the machine type: string image: description: |- @@ -13979,6 +15138,7 @@ spec: More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it type: string keyring: + default: /etc/ceph/keyring description: |- keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. @@ -13991,7 +15151,9 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic pool: + default: rbd description: |- pool is the rados pool name. Default is rbd. @@ -14011,14 +15173,18 @@ spec: More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic user: + default: admin description: |- user is the rados user name. Default is admin. @@ -14033,6 +15199,7 @@ spec: volume attached and mounted on Kubernetes nodes. properties: fsType: + default: xfs description: |- fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. @@ -14058,10 +15225,13 @@ spec: sensitive information. If this is not provided, Login operation will fail. properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic @@ -14070,6 +15240,7 @@ spec: communication with Gateway, default false type: boolean storageMode: + default: ThinProvisioned description: |- storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. @@ -14146,6 +15317,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: description: optional field specify whether the Secret or its keys must be defined @@ -14177,10 +15349,13 @@ spec: credentials. If not specified, default values will be attempted. properties: name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic @@ -14229,6 +15404,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map required: - containers type: object @@ -14319,7 +15497,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.4 helm.sh/resource-policy: keep name: scheduledbackups.postgresql.cnpg.io spec: @@ -14396,11 +15574,12 @@ spec: method: default: barmanObjectStore description: |- - The backup method to be used, possible options are `barmanObjectStore` - and `volumeSnapshot`. Defaults to: `barmanObjectStore`. + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. enum: - barmanObjectStore - volumeSnapshot + - plugin type: string online: description: |- diff --git a/charts/cloudnative-pg/templates/deployment.yaml b/charts/cloudnative-pg/templates/deployment.yaml index 858248a66..569752007 100644 --- a/charts/cloudnative-pg/templates/deployment.yaml +++ b/charts/cloudnative-pg/templates/deployment.yaml @@ -46,6 +46,12 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} + {{- if .Values.hostNetwork }} + hostNetwork: {{ .Values.hostNetwork }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end }} containers: - args: - controller @@ -72,6 +78,9 @@ spec: fieldPath: metadata.namespace - name: MONITORING_QUERIES_CONFIGMAP value: "{{ .Values.monitoringQueriesConfigMap.name }}" + {{- if .Values.additionalEnv }} + {{- tpl (.Values.additionalEnv | toYaml) . | nindent 8 }} + {{- end }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} livenessProbe: diff --git a/charts/cloudnative-pg/templates/podmonitor.yaml b/charts/cloudnative-pg/templates/podmonitor.yaml index bae86ca8d..cc7bd7622 100644 --- a/charts/cloudnative-pg/templates/podmonitor.yaml +++ b/charts/cloudnative-pg/templates/podmonitor.yaml @@ -18,4 +18,12 @@ spec: {{- include "cloudnative-pg.selectorLabels" . | nindent 6 }} podMetricsEndpoints: - port: metrics + {{- with .Values.monitoring.podMonitorMetricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.monitoring.podMonitorRelabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} {{- end }} diff --git a/charts/cloudnative-pg/templates/rbac.yaml b/charts/cloudnative-pg/templates/rbac.yaml index 1865bccab..4452005f1 100644 --- a/charts/cloudnative-pg/templates/rbac.yaml +++ b/charts/cloudnative-pg/templates/rbac.yaml @@ -44,6 +44,8 @@ rules: - "" resources: - configmaps + - secrets + - services verbs: - create - delete @@ -56,6 +58,7 @@ rules: - "" resources: - configmaps/status + - secrets/status verbs: - get - patch @@ -67,14 +70,6 @@ rules: verbs: - create - patch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch - apiGroups: - "" resources: @@ -87,27 +82,7 @@ rules: - "" resources: - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - pods/exec verbs: - create @@ -122,26 +97,6 @@ rules: - pods/status verbs: - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - secrets/status - verbs: - - get - - patch - - update - apiGroups: - "" resources: @@ -153,44 +108,14 @@ rules: - patch - update - watch -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - admissionregistration.k8s.io resources: - mutatingwebhookconfigurations - verbs: - - get - - list - - patch - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - validatingwebhookconfigurations verbs: - get - - list - patch - - update -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get - - list - - update - apiGroups: - apps resources: @@ -249,6 +174,9 @@ rules: - postgresql.cnpg.io resources: - backups + - clusters + - poolers + - scheduledbackups verbs: - create - delete @@ -261,6 +189,7 @@ rules: - postgresql.cnpg.io resources: - backups/status + - scheduledbackups/status verbs: - get - patch @@ -269,40 +198,6 @@ rules: - postgresql.cnpg.io resources: - clusterimagecatalogs - verbs: - - get - - list - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.cnpg.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - imagecatalogs verbs: - get @@ -311,64 +206,24 @@ rules: - apiGroups: - postgresql.cnpg.io resources: - - poolers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.cnpg.io - resources: + - clusters/finalizers - poolers/finalizers verbs: - update - apiGroups: - postgresql.cnpg.io resources: + - clusters/status - poolers/status verbs: - get - patch - update - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update - apiGroups: - rbac.authorization.k8s.io resources: - rolebindings - verbs: - - create - - get - - list - - patch - - update - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - roles verbs: - create diff --git a/charts/cloudnative-pg/values.schema.json b/charts/cloudnative-pg/values.schema.json index cd64b7c03..762bb54d1 100644 --- a/charts/cloudnative-pg/values.schema.json +++ b/charts/cloudnative-pg/values.schema.json @@ -5,6 +5,9 @@ "additionalArgs": { "type": "array" }, + "additionalEnv": { + "type": "array" + }, "affinity": { "type": "object" }, @@ -72,9 +75,15 @@ } } }, + "dnsPolicy": { + "type": "string" + }, "fullnameOverride": { "type": "string" }, + "hostNetwork": { + "type": "boolean" + }, "image": { "type": "object", "properties": { @@ -126,6 +135,12 @@ }, "podMonitorEnabled": { "type": "boolean" + }, + "podMonitorMetricRelabelings": { + "type": "array" + }, + "podMonitorRelabelings": { + "type": "array" } } }, diff --git a/charts/cloudnative-pg/values.yaml b/charts/cloudnative-pg/values.yaml index 107b499c6..c39912580 100644 --- a/charts/cloudnative-pg/values.yaml +++ b/charts/cloudnative-pg/values.yaml @@ -29,6 +29,9 @@ imagePullSecrets: [] nameOverride: "" fullnameOverride: "" +hostNetwork: false +dnsPolicy: "" + crds: # -- Specifies whether the CRDs should be created when installing the chart. create: true @@ -66,6 +69,14 @@ config: # -- Additinal arguments to be added to the operator's args list. additionalArgs: [] +# -- Array containing extra environment variables which can be templated. +# For example: +# - name: RELEASE_NAME +# value: "{{ .Release.Name }}" +# - name: MY_VAR +# value: "mySpecialKey" +additionalEnv: [] + serviceAccount: # -- Specifies whether the service account should be created. create: true @@ -144,6 +155,10 @@ monitoring: # -- Specifies whether the monitoring should be enabled. Requires Prometheus Operator CRDs. podMonitorEnabled: false + # -- Metrics relabel configurations to apply to samples before ingestion. + podMonitorMetricRelabelings: [] + # -- Relabel configurations to apply to samples before scraping. + podMonitorRelabelings: [] # -- Additional labels for the podMonitor podMonitorAdditionalLabels: {} @@ -244,6 +259,7 @@ monitoringQueriesConfigMap: , pg_catalog.age(datfrozenxid) AS xid_age , pg_catalog.mxid_age(datminmxid) AS mxid_age FROM pg_catalog.pg_database + WHERE datallowconn metrics: - datname: usage: "LABEL" @@ -408,6 +424,71 @@ monitoringQueriesConfigMap: usage: "COUNTER" description: "Number of buffers allocated" + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + pg_stat_database: query: | SELECT datname diff --git a/charts/cluster/Chart.yaml b/charts/cluster/Chart.yaml index b41a8aa5b..cb3aff83b 100644 --- a/charts/cluster/Chart.yaml +++ b/charts/cluster/Chart.yaml @@ -18,7 +18,7 @@ name: cluster description: Deploys and manages a CloudNativePG cluster and its associated resources. icon: https://raw.githubusercontent.com/cloudnative-pg/artwork/main/cloudnativepg-logo.svg type: application -version: 0.0.8 +version: 0.1.0 sources: - https://github.com/cloudnative-pg/charts keywords: diff --git a/charts/cluster/README.md b/charts/cluster/README.md index ff59e34ef..99cc8c378 100644 --- a/charts/cluster/README.md +++ b/charts/cluster/README.md @@ -1,6 +1,6 @@ # cluster -![Version: 0.0.8](https://img.shields.io/badge/Version-0.0.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) +![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) > **Warning** > ### This chart is under active development. @@ -143,6 +143,8 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | backups.scheduledBackups[0].method | string | `"barmanObjectStore"` | Backup method, can be `barmanObjectStore` (default) or `volumeSnapshot` | | backups.scheduledBackups[0].name | string | `"daily-backup"` | Scheduled backup name | | backups.scheduledBackups[0].schedule | string | `"0 0 0 * * *"` | Schedule in cron format | +| backups.secret.create | bool | `true` | Whether to create a secret for the backup credentials | +| backups.secret.name | string | `""` | Name of the backup credentials secret | | backups.wal.compression | string | `"gzip"` | WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. | | backups.wal.encryption | string | `"AES256"` | Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`. | | backups.wal.maxParallel | int | `1` | Number of WAL files to be archived or restored in parallel. | @@ -151,21 +153,29 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | cluster.annotations | object | `{}` | | | cluster.certificates | object | `{}` | The configuration for the CA and related certificates. See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-CertificatesConfiguration | | cluster.enableSuperuserAccess | bool | `true` | When this option is enabled, the operator will use the SuperuserSecret to update the postgres user password. If the secret is not present, the operator will automatically create one. When this option is disabled, the operator will ignore the SuperuserSecret content, delete it when automatically created, and then blank the password of the postgres user by setting it to NULL. | +| cluster.imageCatalogRef | object | `{}` | Reference to `ImageCatalog` of `ClusterImageCatalog`, if specified takes precedence over `cluster.imageName` | | cluster.imageName | string | `""` | Name of the container image, supporting both tags (:) and digests for deterministic and repeatable deployments: :@sha256: | | cluster.imagePullPolicy | string | `"IfNotPresent"` | Image pull policy. One of Always, Never or IfNotPresent. If not defined, it defaults to IfNotPresent. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images | | cluster.imagePullSecrets | list | `[]` | The list of pull secrets to be used to pull the images. See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-LocalObjectReference | | cluster.initdb | object | `{}` | BootstrapInitDB is the configuration of the bootstrap process when initdb is used. See: https://cloudnative-pg.io/documentation/current/bootstrap/ See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb | | cluster.instances | int | `3` | Number of instances | | cluster.logLevel | string | `"info"` | The instances' log level, one of the following values: error, warning, info (default), debug, trace | -| cluster.monitoring.customQueries | list | `[]` | Custom Prometheus metrics | +| cluster.monitoring.customQueries | list | `[]` | Custom Prometheus metrics Will be stored in the ConfigMap | +| cluster.monitoring.customQueriesSecret | list | `[]` | The list of secrets containing the custom queries | +| cluster.monitoring.disableDefaultQueries | bool | `false` | Whether the default queries should be injected. Set it to true if you don't want to inject default queries into the cluster. | | cluster.monitoring.enabled | bool | `false` | Whether to enable monitoring | | cluster.monitoring.podMonitor.enabled | bool | `true` | Whether to enable the PodMonitor | +| cluster.monitoring.podMonitor.metricRelabelings | list | `[]` | The list of metric relabelings for the PodMonitor. Applied to samples before ingestion. | +| cluster.monitoring.podMonitor.relabelings | list | `[]` | The list of relabelings for the PodMonitor. Applied to samples before scraping. | | cluster.monitoring.prometheusRule.enabled | bool | `true` | Whether to enable the PrometheusRule automated alerts | | cluster.monitoring.prometheusRule.excludeRules | list | `[]` | Exclude specified rules | -| cluster.postgresGID | int | `26` | The GID of the postgres user inside the image, defaults to 26 | -| cluster.postgresUID | int | `26` | The UID of the postgres user inside the image, defaults to 26 | -| cluster.postgresql | object | `{}` | Configuration of the PostgreSQL server. See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PostgresConfiguration | -| cluster.primaryUpdateMethod | string | `"switchover"` | Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated. It can be switchover (default) or in-place (restart). | +| cluster.postgresGID | int | `-1` | The GID of the postgres user inside the image, defaults to 26 | +| cluster.postgresUID | int | `-1` | The UID of the postgres user inside the image, defaults to 26 | +| cluster.postgresql.parameters | object | `{}` | PostgreSQL configuration options (postgresql.conf) | +| cluster.postgresql.pg_hba | list | `[]` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | +| cluster.postgresql.pg_ident | list | `[]` | PostgreSQL User Name Maps rules (lines to be appended to the pg_ident.conf file) | +| cluster.postgresql.shared_preload_libraries | list | `[]` | Lists of shared preload libraries to add to the default ones | +| cluster.primaryUpdateMethod | string | `"switchover"` | Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated. It can be switchover (default) or restart. | | cluster.primaryUpdateStrategy | string | `"unsupervised"` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (unsupervised - default) or manual (supervised) | | cluster.priorityClassName | string | `""` | | | cluster.resources | object | `{}` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. We strongly advise you use the same setting for limits and requests so that your cluster pods are given a Guaranteed QoS. See: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/ | @@ -173,17 +183,15 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | cluster.storage.size | string | `"8Gi"` | | | cluster.storage.storageClass | string | `""` | | | cluster.superuserSecret | string | `""` | | +| cluster.walStorage.enabled | bool | `false` | | +| cluster.walStorage.size | string | `"1Gi"` | | +| cluster.walStorage.storageClass | string | `""` | | | fullnameOverride | string | `""` | Override the full name of the chart | +| imageCatalog.create | bool | `true` | Whether to provision an image catalog. If imageCatalog.images is empty this option will be ignored. | +| imageCatalog.images | list | `[]` | List of images to be provisioned in an image catalog. | | mode | string | `"standalone"` | Cluster mode of operation. Available modes: * `standalone` - default mode. Creates new or updates an existing CNPG cluster. * `replica` - Creates a replica cluster from an existing CNPG cluster. # TODO * `recovery` - Same as standalone but creates a cluster from a backup, object store or via pg_basebackup. | | nameOverride | string | `""` | Override the name of the chart | -| pooler.enabled | bool | `false` | Whether to enable PgBouncer | -| pooler.instances | int | `3` | Number of PgBouncer instances | -| pooler.monitoring.enabled | bool | `false` | Whether to enable monitoring | -| pooler.monitoring.podMonitor.enabled | bool | `true` | Whether to enable the PodMonitor | -| pooler.parameters | object | `{"default_pool_size":"25","max_client_conn":"1000"}` | PgBouncer configuration parameters | -| pooler.poolMode | string | `"transaction"` | PgBouncer pooling mode | -| pooler.template | object | `{}` | Custom PgBouncer deployment template. Use to override image, specify resources, etc. | -| pooler.type | string | `"rw"` | PgBouncer type of service to forward traffic to. | +| poolers | list | `[]` | List of PgBouncer poolers | | recovery.azure.connectionString | string | `""` | | | recovery.azure.containerName | string | `""` | | | recovery.azure.inheritFromAzureAD | bool | `false` | | @@ -203,6 +211,24 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | recovery.google.gkeEnvironment | bool | `false` | | | recovery.google.path | string | `"/"` | | | recovery.method | string | `"backup"` | Available recovery methods: * `backup` - Recovers a CNPG cluster from a CNPG backup (PITR supported) Needs to be on the same cluster in the same namespace. * `object_store` - Recovers a CNPG cluster from a barman object store (PITR supported). * `pg_basebackup` - Recovers a CNPG cluster viaa streaming replication protocol. Useful if you want to migrate databases to CloudNativePG, even from outside Kubernetes. # TODO | +| recovery.pgBaseBackup.database | string | `"app"` | Name of the database used by the application. Default: `app`. | +| recovery.pgBaseBackup.owner | string | `""` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | +| recovery.pgBaseBackup.secret | string | `""` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. | +| recovery.pgBaseBackup.source.database | string | `"app"` | | +| recovery.pgBaseBackup.source.host | string | `""` | | +| recovery.pgBaseBackup.source.passwordSecret.create | bool | `false` | Whether to create a secret for the password | +| recovery.pgBaseBackup.source.passwordSecret.key | string | `"password"` | The key in the secret containing the password | +| recovery.pgBaseBackup.source.passwordSecret.name | string | `""` | Name of the secret containing the password | +| recovery.pgBaseBackup.source.passwordSecret.value | string | `""` | The password value to use when creating the secret | +| recovery.pgBaseBackup.source.port | int | `5432` | | +| recovery.pgBaseBackup.source.sslCertSecret.key | string | `""` | | +| recovery.pgBaseBackup.source.sslCertSecret.name | string | `""` | | +| recovery.pgBaseBackup.source.sslKeySecret.key | string | `""` | | +| recovery.pgBaseBackup.source.sslKeySecret.name | string | `""` | | +| recovery.pgBaseBackup.source.sslMode | string | `"verify-full"` | | +| recovery.pgBaseBackup.source.sslRootCertSecret.key | string | `""` | | +| recovery.pgBaseBackup.source.sslRootCertSecret.name | string | `""` | | +| recovery.pgBaseBackup.source.username | string | `""` | | | recovery.pitrTarget.time | string | `""` | Time in RFC3339 format | | recovery.provider | string | `"s3"` | One of `s3`, `azure` or `google` | | recovery.s3.accessKey | string | `""` | | @@ -210,7 +236,24 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | recovery.s3.path | string | `"/"` | | | recovery.s3.region | string | `""` | | | recovery.s3.secretKey | string | `""` | | -| type | string | `"postgresql"` | Type of the CNPG database. Available types: * `postgresql` * `postgis` | +| recovery.secret.create | bool | `true` | Whether to create a secret for the backup credentials | +| recovery.secret.name | string | `""` | Name of the backup credentials secret | +| type | string | `"postgresql"` | Type of the CNPG database. Available types: * `postgresql` * `postgis` * `timescaledb` | +| version.postgis | string | `"3.4"` | If using PostGIS, specify the version | +| version.postgresql | string | `"16"` | PostgreSQL major version to use | +| version.timescaledb | string | `"2.15"` | If using TimescaleDB, specify the version | +| poolers[].name | string | `` | Name of the pooler resource | +| poolers[].instances | number | `1` | The number of replicas we want | +| poolers[].type | [PoolerType][PoolerType] | `rw` | Type of service to forward traffic to. Default: `rw`. | +| poolers[].poolMode | [PgBouncerPoolMode][PgBouncerPoolMode] | `session` | The pool mode. Default: `session`. | +| poolers[].authQuerySecret | [LocalObjectReference][LocalObjectReference] | `{}` | The credentials of the user that need to be used for the authentication query. | +| poolers[].authQuery | string | `{}` | The credentials of the user that need to be used for the authentication query. | +| poolers[].parameters | map[string]string | `{}` | Additional parameters to be passed to PgBouncer - please check the CNPG documentation for a list of options you can configure | +| poolers[].template | [PodTemplateSpec][PodTemplateSpec] | `{}` | The template of the Pod to be created | +| poolers[].template | [ServiceTemplateSpec][ServiceTemplateSpec] | `{}` | Template for the Service to be created | +| poolers[].pg_hba | []string | `{}` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | +| poolers[].monitoring.enabled | bool | `false` | Whether to enable monitoring for the Pooler. | +| poolers[].monitoring.podMonitor.enabled | bool | `true` | Create a podMonitor for the Pooler. | ## Maintainers diff --git a/charts/cluster/README.md.gotmpl b/charts/cluster/README.md.gotmpl index e1a4d2f05..1ca7bebaa 100644 --- a/charts/cluster/README.md.gotmpl +++ b/charts/cluster/README.md.gotmpl @@ -123,7 +123,18 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat {{ template "chart.valuesSection" . }} - +| poolers[].name | string | `` | Name of the pooler resource | +| poolers[].instances | number | `1` | The number of replicas we want | +| poolers[].type | [PoolerType][PoolerType] | `rw` | Type of service to forward traffic to. Default: `rw`. | +| poolers[].poolMode | [PgBouncerPoolMode][PgBouncerPoolMode] | `session` | The pool mode. Default: `session`. | +| poolers[].authQuerySecret | [LocalObjectReference][LocalObjectReference] | `{}` | The credentials of the user that need to be used for the authentication query. | +| poolers[].authQuery | string | `{}` | The credentials of the user that need to be used for the authentication query. | +| poolers[].parameters | map[string]string | `{}` | Additional parameters to be passed to PgBouncer - please check the CNPG documentation for a list of options you can configure | +| poolers[].template | [PodTemplateSpec][PodTemplateSpec] | `{}` | The template of the Pod to be created | +| poolers[].template | [ServiceTemplateSpec][ServiceTemplateSpec] | `{}` | Template for the Service to be created | +| poolers[].pg_hba | []string | `{}` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | +| poolers[].monitoring.enabled | bool | `false` | Whether to enable monitoring for the Pooler. | +| poolers[].monitoring.podMonitor.enabled | bool | `true` | Create a podMonitor for the Pooler. | {{ template "chart.maintainersSection" . }} diff --git a/charts/cluster/docs/Recovery.md b/charts/cluster/docs/Recovery.md index 6a1be6593..1a7a9153c 100644 --- a/charts/cluster/docs/Recovery.md +++ b/charts/cluster/docs/Recovery.md @@ -10,7 +10,7 @@ You can find more information about the recovery process in the [CNPG documentat There are 3 types of recovery possible with CNPG: * Recovery from a backup object in the same Kubernetes namespace. * Recovery from a Barman Object Store, that could be located anywhere. -* Streaming replication from an operating cluster using `pg_basebackup` (not supported by the chart yet). +* Streaming replication from an operating cluster using `pg_basebackup`. When performing a recovery you are strongly advised to use the same configuration and PostgreSQL version as the original cluster. @@ -18,10 +18,10 @@ To begin, create a `values.yaml` that contains the following: 1. Set `mode: recovery` to indicate that you want to perform bootstrap the new cluster from an existing one. 2. Set the `recovery.method` to the type of recovery you want to perform. -3. Set either the `recovery.backupName` or the Barman Object Store configuration - i.e. `recovery.provider` and appropriate S3, Azure or GCS configuration. -4. Optionally set the `recovery.pitrTarget.time` in RFC3339 format to perform a point-in-time recovery. -4. Retain the identical PostgreSQL version and configuration as the original cluster. -5. Make sure you don't use the same backup section name as the original cluster. We advise you change the `path` within the storage location if you want to reuse the same storage location/bucket. +3. Set either the `recovery.backupName` or the Barman Object Store configuration - i.e. `recovery.provider` and appropriate S3, Azure or GCS configuration. In case of `pg_basebackup` complete the `recovery.pgBaseBackup` section. +4. Optionally set the `recovery.pitrTarget.time` in RFC3339 format to perform a point-in-time recovery (not applicable for `pgBaseBackup`). +5. Retain the identical PostgreSQL version and configuration as the original cluster. +6. Make sure you don't use the same backup section name as the original cluster. We advise you change the `path` within the storage location if you want to reuse the same storage location/bucket. One pattern is adding a version number at the end of the path, e.g. `/v1` or `/v2` after each recovery procedure. Example recovery configurations can be found in the [examples](../examples) directory. diff --git a/charts/cluster/examples/basic.yaml b/charts/cluster/examples/basic.yaml index 5b608c267..b4c15bb15 100644 --- a/charts/cluster/examples/basic.yaml +++ b/charts/cluster/examples/basic.yaml @@ -1,4 +1,7 @@ +type: postgresql mode: standalone +version: + postgresql: "16" cluster: instances: 1 backups: diff --git a/charts/cluster/examples/image-catalog-ref.yaml b/charts/cluster/examples/image-catalog-ref.yaml new file mode 100644 index 000000000..e4833a3b6 --- /dev/null +++ b/charts/cluster/examples/image-catalog-ref.yaml @@ -0,0 +1,12 @@ +type: postgresql +mode: standalone +version: + major: "16" + timescaledb: "2.15" +cluster: + instances: 1 + imageCatalogRef: + kind: ImageCatalog + name: my-image-catalog +backups: + enabled: false diff --git a/charts/cluster/examples/image-catalog.yaml b/charts/cluster/examples/image-catalog.yaml new file mode 100644 index 000000000..c610229b0 --- /dev/null +++ b/charts/cluster/examples/image-catalog.yaml @@ -0,0 +1,14 @@ +type: postgresql +mode: standalone +version: + major: "16" + timescaledb: "2.15" +cluster: + instances: 1 +backups: + enabled: false +imageCatalog: + create: true + images: + - major: 16 + image: my-custom-postgres-image:mytag diff --git a/charts/cluster/examples/pgbouncer.yaml b/charts/cluster/examples/pgbouncer.yaml index 1da966275..94d4987e6 100644 --- a/charts/cluster/examples/pgbouncer.yaml +++ b/charts/cluster/examples/pgbouncer.yaml @@ -1,8 +1,34 @@ +type: postgresql mode: standalone + cluster: instances: 1 + monitoring: + enabled: true + podMonitor: + enabled: true + backups: enabled: false -pooler: - enabled: true - instances: 1 + +poolers: + - name: rw + type: rw + instances: 1 + monitoring: + enabled: true + podMonitor: + enabled: true + relabelings: + - targetLabel: type + replacement: rw + - name: ro + type: ro + instances: 1 + monitoring: + enabled: true + podMonitor: + enabled: true + relabelings: + - targetLabel: type + replacement: ro diff --git a/charts/cluster/examples/postgis.yaml b/charts/cluster/examples/postgis.yaml index 6c686dc62..168ac9fbf 100644 --- a/charts/cluster/examples/postgis.yaml +++ b/charts/cluster/examples/postgis.yaml @@ -1,6 +1,9 @@ type: postgis mode: standalone +version: + postgresql: "16" + postgis: "3.4" cluster: instances: 1 backups: - enabled: false \ No newline at end of file + enabled: false diff --git a/charts/cluster/examples/recovery-backup.yaml b/charts/cluster/examples/recovery-backup.yaml index d11187f5c..c478e7a9e 100644 --- a/charts/cluster/examples/recovery-backup.yaml +++ b/charts/cluster/examples/recovery-backup.yaml @@ -1,3 +1,4 @@ +type: postgresql mode: recovery recovery: diff --git a/charts/cluster/examples/recovery-object_store.yaml b/charts/cluster/examples/recovery-object_store.yaml index 92722a159..060f5328d 100644 --- a/charts/cluster/examples/recovery-object_store.yaml +++ b/charts/cluster/examples/recovery-object_store.yaml @@ -1,8 +1,9 @@ +type: postgresql mode: recovery recovery: method: object_store - serverName: "cluster-name-to-recover-from" + clusterName: "cluster-name-to-recover-from" provider: s3 s3: region: "eu-west-1" @@ -27,4 +28,4 @@ backups: - name: daily-backup # Daily at midnight schedule: "0 0 0 * * *" # Daily at midnight backupOwnerReference: self - retentionPolicy: "30d" \ No newline at end of file + retentionPolicy: "30d" diff --git a/charts/cluster/examples/recovery-pg_basebackup.yaml b/charts/cluster/examples/recovery-pg_basebackup.yaml new file mode 100644 index 000000000..05358bf50 --- /dev/null +++ b/charts/cluster/examples/recovery-pg_basebackup.yaml @@ -0,0 +1,15 @@ +type: postgresql +mode: "recovery" + +recovery: + method: "pg_basebackup" + pgBaseBackup: + sourceHost: "source-db.foo.com" + sourceUsername: "streaming_replica" + existingPasswordSecret: "source-db-replica-password" + +cluster: + instances: 1 + +backups: + enabled: false \ No newline at end of file diff --git a/charts/cluster/examples/standalone-s3.yaml b/charts/cluster/examples/standalone-s3.yaml index bf1794d06..44a4bb104 100644 --- a/charts/cluster/examples/standalone-s3.yaml +++ b/charts/cluster/examples/standalone-s3.yaml @@ -1,3 +1,4 @@ +type: postgresql mode: standalone cluster: diff --git a/charts/cluster/examples/timescaledb.yaml b/charts/cluster/examples/timescaledb.yaml new file mode 100644 index 000000000..328b6c1eb --- /dev/null +++ b/charts/cluster/examples/timescaledb.yaml @@ -0,0 +1,9 @@ +type: timescaledb +mode: standalone +version: + postgresql: "15.7" + timescaledb: "2.15" +cluster: + instances: 1 +backups: + enabled: false diff --git a/charts/cluster/prometheus_rules/cluster-high_connection-critical.yaml b/charts/cluster/prometheus_rules/cluster-high_connection-critical.yaml index e5de95225..df13ce3b3 100644 --- a/charts/cluster/prometheus_rules/cluster-high_connection-critical.yaml +++ b/charts/cluster/prometheus_rules/cluster-high_connection-critical.yaml @@ -8,7 +8,7 @@ annotations: the maximum number of connections. runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md expr: | - sum by (pod) (cnpg_backends_total{namespace=~"{{ .namespace }}", pod=~"{{ .podSelector }}"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace=~"{{ .namespace }}", pod=~"{{ .podSelector }}"}) * 100 > 95 + sum by (pod) (cnpg_backends_total{namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"}) * 100 > 95 for: 5m labels: severity: critical diff --git a/charts/cluster/prometheus_rules/cluster-high_connection-warning.yaml b/charts/cluster/prometheus_rules/cluster-high_connection-warning.yaml index ae706ee0b..73cc78392 100644 --- a/charts/cluster/prometheus_rules/cluster-high_connection-warning.yaml +++ b/charts/cluster/prometheus_rules/cluster-high_connection-warning.yaml @@ -8,7 +8,7 @@ annotations: the maximum number of connections. runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md expr: | - sum by (pod) (cnpg_backends_total{namespace=~"{{ .namespace }}", pod=~"{{ .podSelector }}"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace=~"{{ .namespace }}", pod=~"{{ .podSelector }}"}) * 100 > 80 + sum by (pod) (cnpg_backends_total{namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"}) * 100 > 80 for: 5m labels: severity: warning diff --git a/charts/cluster/prometheus_rules/cluster-high_replication_lag.yaml b/charts/cluster/prometheus_rules/cluster-high_replication_lag.yaml index ab1c175a1..660db254f 100644 --- a/charts/cluster/prometheus_rules/cluster-high_replication_lag.yaml +++ b/charts/cluster/prometheus_rules/cluster-high_replication_lag.yaml @@ -10,7 +10,7 @@ annotations: High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md expr: | - max(cnpg_pg_replication_lag{namespace=~"{{ .namespace }}",pod=~"{{ .podSelector }}"}) * 1000 > 1000 + max(cnpg_pg_replication_lag{namespace="{{ .namespace }}",pod=~"{{ .podSelector }}"}) * 1000 > 1000 for: 5m labels: severity: warning diff --git a/charts/cluster/prometheus_rules/cluster-instances_on_same_node.yaml b/charts/cluster/prometheus_rules/cluster-instances_on_same_node.yaml index b5a90742e..aafcfab1e 100644 --- a/charts/cluster/prometheus_rules/cluster-instances_on_same_node.yaml +++ b/charts/cluster/prometheus_rules/cluster-instances_on_same_node.yaml @@ -10,7 +10,7 @@ annotations: A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md expr: | - count by (node) (kube_pod_info{namespace=~"{{ .namespace }}", pod=~"{{ .podSelector }}"}) > 1 + count by (node) (kube_pod_info{namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"}) > 1 for: 5m labels: severity: warning diff --git a/charts/cluster/prometheus_rules/cluster-offline.yaml b/charts/cluster/prometheus_rules/cluster-offline.yaml index 4ac68ce35..4206c02f3 100644 --- a/charts/cluster/prometheus_rules/cluster-offline.yaml +++ b/charts/cluster/prometheus_rules/cluster-offline.yaml @@ -4,13 +4,13 @@ alert: {{ $alert }} annotations: summary: CNPG Cluster has no running instances! description: |- - CloudNativePG Cluster "{{ .labels.job }}" has no ready instances. + CloudNativePG Cluster "{{ .namespace }}/{{ .cluster }}" has no ready instances. Having an offline cluster means your applications will not be able to access the database, leading to potential service disruption and/or data loss. runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md expr: | - ({{ .Values.cluster.instances }} - count(cnpg_collector_up{namespace=~"{{ .namespace }}",pod=~"{{ .podSelector }}"}) OR vector(0)) > 0 + (count(cnpg_collector_up{namespace="{{ .namespace }}",pod=~"{{ .podSelector }}"}) OR on() vector(0)) == 0 for: 5m labels: severity: critical diff --git a/charts/cluster/prometheus_rules/cluster-zone_spread-warning.yaml b/charts/cluster/prometheus_rules/cluster-zone_spread-warning.yaml index 0959ae87b..41fa4002a 100644 --- a/charts/cluster/prometheus_rules/cluster-zone_spread-warning.yaml +++ b/charts/cluster/prometheus_rules/cluster-zone_spread-warning.yaml @@ -9,7 +9,7 @@ annotations: A disaster in one availability zone will lead to a potential service disruption and/or data loss. runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md expr: | - {{ .Values.cluster.instances }} > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace=~"{{ .namespace }}", pod=~"{{ .podSelector }}"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 + {{ .Values.cluster.instances }} > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 for: 5m labels: severity: warning diff --git a/charts/cluster/templates/NOTES.txt b/charts/cluster/templates/NOTES.txt index dd5142ecc..5e96a74ea 100644 --- a/charts/cluster/templates/NOTES.txt +++ b/charts/cluster/templates/NOTES.txt @@ -1,3 +1,12 @@ +{{- if .Values.pooler -}} + {{ fail ".Values.pooler has been deprecated. Use .Values.poolers instead." }} +{{- end -}} + +{{- if gt (omit .Values.cluster.postgresql "parameters" "synchronous" "pg_hba" "pg_ident" "syncReplicaElectionConstraint" "shared_preload_libraries" "ldap" "promotionTimeout" "enableAlterSystem" | keys | len) 0 -}} + {{ fail ".Values.cluster.postgresql has been deprecated. Use .Values.cluster.postgresql.parameters instead." }} +{{- end -}} + + {{ if .Release.IsInstall }} The {{ include "cluster.color-info" (include "cluster.fullname" .) }} has been installed successfully. {{ else if .Release.IsUpgrade }} @@ -37,26 +46,50 @@ Configuration {{- $redundancyColor = "ok" -}} {{- end }} -{{ $scheduledBackups := (first .Values.backups.scheduledBackups).name }} +{{- $scheduledBackups := (first .Values.backups.scheduledBackups).name -}} {{- range (rest .Values.backups.scheduledBackups) -}} {{ $scheduledBackups = printf "%s, %s" $scheduledBackups .name }} {{- end -}} +{{- if eq (len .Values.backups.scheduledBackups) 0 }} + {{- $scheduledBackups = "None" -}} +{{- end -}} -╭───────────────────┬────────────────────────────────────────────────────────╮ -│ Configuration │ Value │ -┝━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┥ -│ Cluster mode │ {{ (printf "%-54s" .Values.mode) }} │ -│ Type │ {{ (printf "%-54s" .Values.type) }} │ -│ Image │ {{ include "cluster.color-info" (printf "%-54s" (include "cluster.imageName" .)) }} │ -│ Instances │ {{ include (printf "%s%s" "cluster.color-" $redundancyColor) (printf "%-54s" (toString .Values.cluster.instances)) }} │ -│ Backups │ {{ include (printf "%s%s" "cluster.color-" (ternary "ok" "error" .Values.backups.enabled)) (printf "%-54s" (ternary "Enabled" "Disabled" .Values.backups.enabled)) }} │ -│ Backup Provider │ {{ (printf "%-54s" (title .Values.backups.provider)) }} │ -│ Scheduled Backups │ {{ (printf "%-54s" $scheduledBackups) }} │ -│ Storage │ {{ (printf "%-54s" .Values.cluster.storage.size) }} │ -│ Storage Class │ {{ (printf "%-54s" (default "Default" .Values.cluster.storage.storageClass)) }} │ -│ PGBouncer │ {{ (printf "%-54s" (ternary "Enabled" "Disabled" .Values.pooler.enabled)) }} │ -│ Monitoring │ {{ include (printf "%s%s" "cluster.color-" (ternary "ok" "error" .Values.cluster.monitoring.enabled)) (printf "%-54s" (ternary "Enabled" "Disabled" .Values.cluster.monitoring.enabled)) }} │ -╰───────────────────┴────────────────────────────────────────────────────────╯ +{{- $mode := .Values.mode -}} +{{- $source := "" -}} +{{- if eq .Values.mode "recovery" }} +{{- $mode = printf "%s (%s)" .Values.mode .Values.recovery.method -}} + {{- if eq .Values.recovery.method "pg_basebackup" }} + {{- $source = printf "postgresql://%s@%s:%.0f/%s" .Values.recovery.pgBaseBackup.source.username .Values.recovery.pgBaseBackup.source.host .Values.recovery.pgBaseBackup.source.port .Values.recovery.pgBaseBackup.source.database -}} + {{- end -}} +{{- end -}} + +{{- $image := (include "cluster.image" .) | fromYaml -}} +{{- if $image.imageCatalogRef -}} + {{- $image = printf "%s: %s(%s)" $image.imageCatalogRef.kind $image.imageCatalogRef.name (include "cluster.postgresqlMajor" .) -}} +{{- else if $image.imageName -}} + {{- $image = $image.imageName -}} +{{- end }} + +╭───────────────────┬──────────────────────────────────────────────────────────╮ +│ Configuration │ Value │ +┝━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┥ +│ Cluster mode │ {{ printf "%-56s" $mode }} │ +│ Type │ {{ printf "%-56s" .Values.type }} │ +│ Image │ {{ include "cluster.color-info" (printf "%-56s" $image) }} │ +{{- if eq .Values.mode "recovery" }} +│ Source │ {{ printf "%-56s" $source }} │ +{{- end }} +│ Instances │ {{ include (printf "%s%s" "cluster.color-" $redundancyColor) (printf "%-56s" (toString .Values.cluster.instances)) }} │ +│ Backups │ {{ include (printf "%s%s" "cluster.color-" (ternary "ok" "error" .Values.backups.enabled)) (printf "%-56s" (ternary "Enabled" "Disabled" .Values.backups.enabled)) }} │ +{{- if .Values.backups.enabled }} +│ Backup Provider │ {{ printf "%-56s" (title .Values.backups.provider) }} │ +│ Scheduled Backups │ {{ printf "%-56s" $scheduledBackups }} │ +{{- end }} +│ Storage │ {{ printf "%-56s" .Values.cluster.storage.size }} │ +│ Storage Class │ {{ printf "%-56s" (default "Default" .Values.cluster.storage.storageClass) }} │ +│ PGBouncer │ {{ printf "%-56s" (ternary "Enabled" "Disabled" (gt (len .Values.poolers) 0)) }} │ +│ Monitoring │ {{ include (printf "%s%s" "cluster.color-" (ternary "ok" "error" .Values.cluster.monitoring.enabled)) (printf "%-56s" (ternary "Enabled" "Disabled" .Values.cluster.monitoring.enabled)) }} │ +╰───────────────────┴──────────────────────────────────────────────────────────╯ {{ if not .Values.backups.enabled }} {{- include "cluster.color-error" "Warning! Backups not enabled. Recovery will not be possible! Do not use this configuration in production.\n" }} diff --git a/charts/cluster/templates/_backup.tpl b/charts/cluster/templates/_backup.tpl index 8b9a094d3..8059618c4 100644 --- a/charts/cluster/templates/_backup.tpl +++ b/charts/cluster/templates/_backup.tpl @@ -1,6 +1,6 @@ {{- define "cluster.backup" -}} -backup: {{- if .Values.backups.enabled }} +backup: target: "prefer-standby" retentionPolicy: {{ .Values.backups.retentionPolicy }} barmanObjectStore: @@ -13,7 +13,7 @@ backup: encryption: {{ .Values.backups.data.encryption }} jobs: {{ .Values.backups.data.jobs }} - {{- $d := dict "chartFullname" (include "cluster.fullname" .) "scope" .Values.backups }} + {{- $d := dict "chartFullname" (include "cluster.fullname" .) "scope" .Values.backups "secretPrefix" "backup" }} {{- include "cluster.barmanObjectStoreConfig" $d | nindent 2 }} {{- end }} {{- end }} diff --git a/charts/cluster/templates/_barman_object_store.tpl b/charts/cluster/templates/_barman_object_store.tpl index 4269ad951..881047655 100644 --- a/charts/cluster/templates/_barman_object_store.tpl +++ b/charts/cluster/templates/_barman_object_store.tpl @@ -1,13 +1,13 @@ {{- define "cluster.barmanObjectStoreConfig" -}} {{- if .scope.endpointURL }} - endpointURL: {{ .scope.endpointURL }} + endpointURL: {{ .scope.endpointURL | quote }} {{- end }} {{- if or (.scope.endpointCA.create) (.scope.endpointCA.name) }} endpointCA: - name: {{ .chartFullname }}-ca-bundle - key: ca-bundle.crt + name: {{.scope.endpointCA.name }} + key: {{ .scope.endpointCA.key }} {{- end }} {{- if .scope.destinationPath }} @@ -21,35 +21,37 @@ {{- if empty .scope.destinationPath }} destinationPath: "s3://{{ required "You need to specify S3 bucket if destinationPath is not specified." .scope.s3.bucket }}{{ .scope.s3.path }}" {{- end }} + {{- $secretName := coalesce .scope.secret.name (printf "%s-%s-s3-creds" .chartFullname .secretPrefix) }} s3Credentials: accessKeyId: - name: {{ .chartFullname }}-backup-s3{{ .secretSuffix }}-creds + name: {{ $secretName }} key: ACCESS_KEY_ID secretAccessKey: - name: {{ .chartFullname }}-backup-s3{{ .secretSuffix }}-creds + name: {{ $secretName }} key: ACCESS_SECRET_KEY {{- else if eq .scope.provider "azure" }} {{- if empty .scope.destinationPath }} destinationPath: "https://{{ required "You need to specify Azure storageAccount if destinationPath is not specified." .scope.azure.storageAccount }}.{{ .scope.azure.serviceName }}.core.windows.net/{{ .scope.azure.containerName }}{{ .scope.azure.path }}" {{- end }} + {{- $secretName := coalesce .scope.secret.name (printf "%s-%s-azure-creds" .chartFullname .secretPrefix) }} azureCredentials: {{- if .scope.azure.inheritFromAzureAD }} inheritFromAzureAD: true {{- else if .scope.azure.connectionString }} connectionString: - name: {{ .chartFullname }}-backup-azure{{ .secretSuffix }}-creds + name: {{ $secretName }} key: AZURE_CONNECTION_STRING {{- else }} storageAccount: - name: {{ .chartFullname }}-backup-azure{{ .secretSuffix }}-creds + name: {{ $secretName }} key: AZURE_STORAGE_ACCOUNT {{- if .scope.azure.storageKey }} storageKey: - name: {{ .chartFullname }}-backup-azure{{ .secretSuffix }}-creds + name: {{ $secretName }} key: AZURE_STORAGE_KEY {{- else }} storageSasToken: - name: {{ .chartFullname }}-backup-azure{{ .secretSuffix }}-creds + name: {{ $secretName }} key: AZURE_STORAGE_SAS_TOKEN {{- end }} {{- end }} @@ -57,10 +59,13 @@ {{- if empty .scope.destinationPath }} destinationPath: "gs://{{ required "You need to specify Google storage bucket if destinationPath is not specified." .scope.google.bucket }}{{ .scope.google.path }}" {{- end }} + {{- $secretName := coalesce .scope.secret.name (printf "%s-%s-google-creds" .chartFullname .secretPrefix) }} googleCredentials: gkeEnvironment: {{ .scope.google.gkeEnvironment }} +{{- if not .scope.google.gkeEnvironment }} applicationCredentials: - name: {{ .chartFullname }}-backup-google{{ .secretSuffix }}-creds + name: {{ $secretName }} key: APPLICATION_CREDENTIALS +{{- end }} {{- end -}} {{- end -}} diff --git a/charts/cluster/templates/_bootstrap.tpl b/charts/cluster/templates/_bootstrap.tpl index 6147f3a77..aea7d9429 100644 --- a/charts/cluster/templates/_bootstrap.tpl +++ b/charts/cluster/templates/_bootstrap.tpl @@ -1,12 +1,15 @@ {{- define "cluster.bootstrap" -}} -bootstrap: {{- if eq .Values.mode "standalone" }} +bootstrap: initdb: {{- with .Values.cluster.initdb }} - {{- with (omit . "postInitApplicationSQL") }} + {{- with (omit . "postInitApplicationSQL" "owner") }} {{- . | toYaml | nindent 4 }} {{- end }} {{- end }} + {{- if .Values.cluster.initdb.owner }} + owner: {{ tpl .Values.cluster.initdb.owner . }} + {{- end }} postInitApplicationSQL: {{- if eq .Values.type "postgis" }} - CREATE EXTENSION IF NOT EXISTS postgis; @@ -21,7 +24,52 @@ bootstrap: {{- printf "- %s" . | nindent 6 }} {{- end -}} {{- end -}} -{{- else if eq .Values.mode "recovery" }} +{{- else if eq .Values.mode "recovery" -}} +bootstrap: +{{- if eq .Values.recovery.method "pg_basebackup" }} + pg_basebackup: + source: pgBaseBackupSource + {{ with .Values.recovery.pgBaseBackup.database }} + database: {{ . }} + {{- end }} + {{ with .Values.recovery.pgBaseBackup.owner }} + owner: {{ . }} + {{- end }} + {{ with .Values.recovery.pgBaseBackup.secret }} + secret: + {{- toYaml . | nindent 6 }} + {{- end }} + +externalClusters: +- name: pgBaseBackupSource + connectionParameters: + host: {{ .Values.recovery.pgBaseBackup.source.host | quote }} + port: {{ .Values.recovery.pgBaseBackup.source.port | quote }} + user: {{ .Values.recovery.pgBaseBackup.source.username | quote }} + dbname: {{ .Values.recovery.pgBaseBackup.source.database | quote }} + sslmode: {{ .Values.recovery.pgBaseBackup.source.sslMode | quote }} + {{- if .Values.recovery.pgBaseBackup.source.passwordSecret.name }} + password: + name: {{ default (printf "%s-pg-basebackup-password" (include "cluster.fullname" .)) .Values.recovery.pgBaseBackup.source.passwordSecret.name }} + key: {{ .Values.recovery.pgBaseBackup.source.passwordSecret.key }} + {{- end }} + {{- if .Values.recovery.pgBaseBackup.source.sslKeySecret.name }} + sslKey: + name: {{ .Values.recovery.pgBaseBackup.source.sslKeySecret.name }} + key: {{ .Values.recovery.pgBaseBackup.source.sslKeySecret.key }} + {{- end }} + {{- if .Values.recovery.pgBaseBackup.source.sslCertSecret.name }} + sslCert: + name: {{ .Values.recovery.pgBaseBackup.source.sslCertSecret.name }} + key: {{ .Values.recovery.pgBaseBackup.source.sslCertSecret.key }} + {{- end }} + {{- if .Values.recovery.pgBaseBackup.source.sslRootCertSecret.name }} + sslRootCert: + name: {{ .Values.recovery.pgBaseBackup.source.sslRootCertSecret.name }} + key: {{ .Values.recovery.pgBaseBackup.source.sslRootCertSecret.key }} + {{- end }} + +{{- else }} recovery: {{- with .Values.recovery.pitrTarget.time }} recoveryTarget: @@ -38,8 +86,9 @@ externalClusters: - name: objectStoreRecoveryCluster barmanObjectStore: serverName: {{ .Values.recovery.clusterName }} - {{- $d := dict "chartFullname" (include "cluster.fullname" .) "scope" .Values.recovery "secretSuffix" "-recovery" -}} + {{- $d := dict "chartFullname" (include "cluster.fullname" .) "scope" .Values.recovery "secretPrefix" "recovery" -}} {{- include "cluster.barmanObjectStoreConfig" $d | nindent 4 }} +{{- end }} {{- else }} {{ fail "Invalid cluster mode!" }} {{- end }} diff --git a/charts/cluster/templates/_helpers.tpl b/charts/cluster/templates/_helpers.tpl index db3c253e5..96726fdfe 100644 --- a/charts/cluster/templates/_helpers.tpl +++ b/charts/cluster/templates/_helpers.tpl @@ -51,6 +51,20 @@ app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/part-of: cloudnative-pg {{- end }} +{{/* +Whether we need to use TimescaleDB defaults +*/}} +{{- define "cluster.useTimescaleDBDefaults" -}} +{{ and (eq .Values.type "timescaledb") .Values.imageCatalog.create (empty .Values.cluster.imageCatalogRef.name) (empty .Values.imageCatalog.images) (empty .Values.cluster.imageName) }} +{{- end -}} + +{{/* +Get the PostgreSQL major version from .Values.version.postgresql +*/}} +{{- define "cluster.postgresqlMajor" -}} +{{ index (regexSplit "\\." (toString .Values.version.postgresql) 2) 0 }} +{{- end -}} + {{/* Cluster Image Name If a custom imageName is available, use it, otherwise use the defaults based on the .Values.type @@ -59,12 +73,63 @@ If a custom imageName is available, use it, otherwise use the defaults based on {{- if .Values.cluster.imageName -}} {{- .Values.cluster.imageName -}} {{- else if eq .Values.type "postgresql" -}} - {{- "ghcr.io/cloudnative-pg/postgresql:15.2" -}} + {{- printf "ghcr.io/cloudnative-pg/postgresql:%s" .Values.version.postgresql -}} {{- else if eq .Values.type "postgis" -}} - {{- "ghcr.io/cloudnative-pg/postgis:14" -}} - {{- else if eq .Values.type "timescaledb" -}} - {{ fail "You need to provide your own cluster.imageName as an official timescaledb image doesn't exist yet." }} + {{- printf "ghcr.io/cloudnative-pg/postgis:%s-%s" .Values.version.postgresql .Values.version.postgis -}} {{- else -}} {{ fail "Invalid cluster type!" }} {{- end }} {{- end -}} + +{{/* +Cluster Image +If imageCatalogRef defined, use it, otherwice calculate ordinary imageName. +*/}} +{{- define "cluster.image" }} +{{- if .Values.cluster.imageCatalogRef.name }} +imageCatalogRef: + apiGroup: postgresql.cnpg.io + {{- toYaml .Values.cluster.imageCatalogRef | nindent 2 }} + major: {{ include "cluster.postgresqlMajor" . }} +{{- else if and .Values.imageCatalog.create (not (empty .Values.imageCatalog.images )) }} +imageCatalogRef: + apiGroup: postgresql.cnpg.io + kind: ImageCatalog + name: {{ include "cluster.fullname" . }} + major: {{ include "cluster.postgresqlMajor" . }} +{{- else if eq (include "cluster.useTimescaleDBDefaults" .) "true" -}} +imageCatalogRef: + apiGroup: postgresql.cnpg.io + kind: ImageCatalog + name: {{ include "cluster.fullname" . }}-timescaledb-ha + major: {{ include "cluster.postgresqlMajor" . }} +{{- else }} +imageName: {{ include "cluster.imageName" . }} +{{- end }} +{{- end }} + +{{/* +Postgres UID +*/}} +{{- define "cluster.postgresUID" -}} + {{- if ge (int .Values.cluster.postgresUID) 0 -}} + {{- .Values.cluster.postgresUID }} + {{- else if and (eq (include "cluster.useTimescaleDBDefaults" .) "true") (eq .Values.type "timescaledb") -}} + {{- 1000 -}} + {{- else -}} + {{- 26 -}} + {{- end -}} +{{- end -}} + +{{/* +Postgres GID +*/}} +{{- define "cluster.postgresGID" -}} + {{- if ge (int .Values.cluster.postgresGID) 0 -}} + {{- .Values.cluster.postgresGID }} + {{- else if and (eq (include "cluster.useTimescaleDBDefaults" .) "true") (eq .Values.type "timescaledb") -}} + {{- 1000 -}} + {{- else -}} + {{- 26 -}} + {{- end -}} +{{- end -}} diff --git a/charts/cluster/templates/backup-azure-creds.yaml b/charts/cluster/templates/backup-azure-creds.yaml index 19a651eb3..6c84308dd 100644 --- a/charts/cluster/templates/backup-azure-creds.yaml +++ b/charts/cluster/templates/backup-azure-creds.yaml @@ -1,11 +1,11 @@ -{{- if and .Values.backups.enabled (eq .Values.backups.provider "azure") }} +{{- if and .Values.backups.enabled (eq .Values.backups.provider "azure") .Values.backups.secret.create }} apiVersion: v1 kind: Secret metadata: - name: {{ include "cluster.fullname" . }}-backup-azure-creds + name: {{ default (printf "%s-backup-azure-creds" (include "cluster.fullname" .)) .Values.backups.secret.name }} data: AZURE_CONNECTION_STRING: {{ .Values.backups.azure.connectionString | b64enc | quote }} AZURE_STORAGE_ACCOUNT: {{ .Values.backups.azure.storageAccount | b64enc | quote }} AZURE_STORAGE_KEY: {{ .Values.backups.azure.storageKey | b64enc | quote }} AZURE_STORAGE_SAS_TOKEN: {{ .Values.backups.azure.storageSasToken | b64enc | quote }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/cluster/templates/backup-google-creds.yaml b/charts/cluster/templates/backup-google-creds.yaml index 252a27064..cc05c4c59 100644 --- a/charts/cluster/templates/backup-google-creds.yaml +++ b/charts/cluster/templates/backup-google-creds.yaml @@ -1,8 +1,8 @@ -{{- if and .Values.backups.enabled (eq .Values.backups.provider "google") }} +{{- if and .Values.backups.enabled (eq .Values.backups.provider "google") .Values.backups.secret.create }} apiVersion: v1 kind: Secret metadata: - name: {{ include "cluster.fullname" . }}-backup-google-creds + name: {{ default (printf "%s-backup-google-creds" (include "cluster.fullname" .)) .Values.backups.secret.name }} data: APPLICATION_CREDENTIALS: {{ .Values.backups.google.applicationCredentials | b64enc | quote }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/cluster/templates/backup-google-recovery-creds.yaml b/charts/cluster/templates/backup-google-recovery-creds.yaml deleted file mode 100644 index 942bb897b..000000000 --- a/charts/cluster/templates/backup-google-recovery-creds.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if and (eq .Values.mode "recovery" ) (eq .Values.recovery.method "object_store") (eq .Values.recovery.provider "google") }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "cluster.fullname" . }}-backup-google-recovery-creds -data: - APPLICATION_CREDENTIALS: {{ .Values.recovery.google.applicationCredentials | b64enc | quote }} -{{- end }} \ No newline at end of file diff --git a/charts/cluster/templates/backup-s3-creds.yaml b/charts/cluster/templates/backup-s3-creds.yaml index b906d2453..ddd8e2717 100644 --- a/charts/cluster/templates/backup-s3-creds.yaml +++ b/charts/cluster/templates/backup-s3-creds.yaml @@ -1,8 +1,8 @@ -{{- if and .Values.backups.enabled (eq .Values.backups.provider "s3") }} +{{- if and .Values.backups.enabled (eq .Values.backups.provider "s3") .Values.backups.secret.create }} apiVersion: v1 kind: Secret metadata: - name: {{ include "cluster.fullname" . }}-backup-s3-creds + name: {{ default (printf "%s-backup-s3-creds" (include "cluster.fullname" .)) .Values.backups.secret.name }} data: ACCESS_KEY_ID: {{ required ".Values.backups.s3.accessKey is required, but not specified." .Values.backups.s3.accessKey | b64enc | quote }} ACCESS_SECRET_KEY: {{ required ".Values.backups.s3.secretKey is required, but not specified." .Values.backups.s3.secretKey | b64enc | quote }} diff --git a/charts/cluster/templates/cluster.yaml b/charts/cluster/templates/cluster.yaml index 5ff0bb2fa..169683fef 100644 --- a/charts/cluster/templates/cluster.yaml +++ b/charts/cluster/templates/cluster.yaml @@ -13,18 +13,22 @@ metadata: {{- end }} spec: instances: {{ .Values.cluster.instances }} - imageName: {{ include "cluster.imageName" . }} + {{- include "cluster.image" . | nindent 2 }} imagePullPolicy: {{ .Values.cluster.imagePullPolicy }} - {{- with .Values.cluster.imagePullSecrets}} + {{- with .Values.cluster.imagePullSecrets }} imagePullSecrets: {{- . | toYaml | nindent 4 }} {{- end }} - postgresUID: {{ .Values.cluster.postgresUID }} - postgresGID: {{ .Values.cluster.postgresGID }} + postgresUID: {{ include "cluster.postgresUID" . }} + postgresGID: {{ include "cluster.postgresGID" . }} storage: size: {{ .Values.cluster.storage.size }} storageClass: {{ .Values.cluster.storage.storageClass }} - +{{- if .Values.cluster.walStorage.enabled }} + walStorage: + size: {{ .Values.cluster.walStorage.size }} + storageClass: {{ .Values.cluster.walStorage.storageClass }} +{{- end }} {{- with .Values.cluster.resources }} resources: {{- toYaml . | nindent 4 }} @@ -52,9 +56,16 @@ spec: {{- if eq .Values.type "timescaledb" }} - timescaledb {{- end }} + {{- with .Values.cluster.postgresql.shared_preload_libraries }} + {{- toYaml . | nindent 6 }} + {{- end }} {{- with .Values.cluster.postgresql }} + pg_hba: + {{- toYaml .pg_hba | nindent 6 }} + pg_ident: + {{- toYaml .pg_ident | nindent 6 }} parameters: - {{- toYaml . | nindent 6 }} + {{- toYaml .parameters | nindent 6 }} {{ end }} managed: @@ -65,10 +76,29 @@ spec: monitoring: enablePodMonitor: {{ and .Values.cluster.monitoring.enabled .Values.cluster.monitoring.podMonitor.enabled }} + disableDefaultQueries: {{ .Values.cluster.monitoring.disableDefaultQueries }} {{- if not (empty .Values.cluster.monitoring.customQueries) }} customQueriesConfigMap: - name: {{ include "cluster.fullname" . }}-monitoring key: custom-queries {{- end }} + {{- if not (empty .Values.cluster.monitoring.customQueriesSecret) }} + {{- with .Values.cluster.monitoring.customQueriesSecret }} + customQueriesSecret: + {{- toYaml . | nindent 6 }} + {{ end }} + {{- end }} + {{- if not (empty .Values.cluster.monitoring.podMonitor.relabelings) }} + {{- with .Values.cluster.monitoring.podMonitor.relabelings }} + podMonitorRelabelings: + {{- toYaml . | nindent 6 }} + {{ end }} + {{- end }} + {{- if not (empty .Values.cluster.monitoring.podMonitor.metricRelabelings) }} + {{- with .Values.cluster.monitoring.podMonitor.metricRelabelings }} + podMonitorMetricRelabelings: + {{- toYaml . | nindent 6 }} + {{ end }} + {{- end }} {{ include "cluster.bootstrap" . | nindent 2 }} {{ include "cluster.backup" . | nindent 2 }} diff --git a/charts/cluster/templates/image-catalog-timescaledb-ha.yaml b/charts/cluster/templates/image-catalog-timescaledb-ha.yaml new file mode 100644 index 000000000..9728f5673 --- /dev/null +++ b/charts/cluster/templates/image-catalog-timescaledb-ha.yaml @@ -0,0 +1,18 @@ +{{- if eq (include "cluster.useTimescaleDBDefaults" .) "true" -}} +apiVersion: postgresql.cnpg.io/v1 +kind: ImageCatalog +metadata: + name: {{ include "cluster.fullname" . }}-timescaledb-ha +spec: + images: + - major: 12 + image: timescale/timescaledb-ha:pg12-ts{{ .Values.version.timescaledb }} + - major: 13 + image: timescale/timescaledb-ha:pg13-ts{{ .Values.version.timescaledb }} + - major: 14 + image: timescale/timescaledb-ha:pg14-ts{{ .Values.version.timescaledb }} + - major: 15 + image: timescale/timescaledb-ha:pg15-ts{{ .Values.version.timescaledb }} + - major: 16 + image: timescale/timescaledb-ha:pg16-ts{{ .Values.version.timescaledb }} +{{ end }} diff --git a/charts/cluster/templates/image-catalog.yaml b/charts/cluster/templates/image-catalog.yaml new file mode 100644 index 000000000..6dc707222 --- /dev/null +++ b/charts/cluster/templates/image-catalog.yaml @@ -0,0 +1,12 @@ +{{ if and .Values.imageCatalog.create (not (empty .Values.imageCatalog.images )) }} +apiVersion: postgresql.cnpg.io/v1 +kind: ImageCatalog +metadata: + name: {{ include "cluster.fullname" . }} +spec: + images: + {{- range $image := .Values.imageCatalog.images }} + - image: {{ $image.image }} + major: {{ $image.major }} + {{- end }} +{{- end }} diff --git a/charts/cluster/templates/pooler.yaml b/charts/cluster/templates/pooler.yaml index 5e01fe498..13a5a0681 100644 --- a/charts/cluster/templates/pooler.yaml +++ b/charts/cluster/templates/pooler.yaml @@ -1,21 +1,48 @@ -{{ if .Values.pooler.enabled }} +{{- range .Values.poolers }} +--- apiVersion: postgresql.cnpg.io/v1 kind: Pooler metadata: - name: {{ include "cluster.fullname" . }}-pooler-rw + name: {{ include "cluster.fullname" $ }}-pooler-{{ .name }} spec: cluster: - name: {{ include "cluster.fullname" . }} - instances: {{ .Values.pooler.instances }} - type: {{ .Values.pooler.type }} + name: {{ include "cluster.fullname" $ }} + instances: {{ .instances }} + type: {{ default "rw" .type }} pgbouncer: - poolMode: {{ .Values.pooler.poolMode }} + poolMode: {{ default "session" .poolMode }} + {{- with .authQuerySecret }} + authQuerySecret: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .authQuery }} + authQuery: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .parameters }} parameters: - {{- .Values.pooler.parameters | toYaml | nindent 6 }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .pg_hba }} + pg_hba: + {{- toYaml . | nindent 6 }} + {{- end }} + {{ with .monitoring }} monitoring: - enablePodMonitor: {{ and .Values.pooler.monitoring.enabled .Values.pooler.monitoring.podMonitor.enabled }} - {{- with .Values.pooler.template }} + {{- if not (empty .podMonitor) }} + enablePodMonitor: {{ and .enabled .podMonitor.enabled }} + {{- with .podMonitor.relabelings }} + podMonitorRelabelings: + {{- toYaml . | nindent 6 }} + {{ end }} + {{- with .podMonitor.metricRelabelings }} + podMonitorMetricRelabelings: + {{- toYaml . | nindent 6 }} + {{ end }} + {{- end }} + {{- end }} + {{- with .template }} template: {{- . | toYaml | nindent 4 }} {{- end }} -{{ end }} +{{- end }} diff --git a/charts/cluster/templates/backup-azure-recovery-creds.yaml b/charts/cluster/templates/recovery-azure-creds.yaml similarity index 67% rename from charts/cluster/templates/backup-azure-recovery-creds.yaml rename to charts/cluster/templates/recovery-azure-creds.yaml index b4aecb558..9fb707651 100644 --- a/charts/cluster/templates/backup-azure-recovery-creds.yaml +++ b/charts/cluster/templates/recovery-azure-creds.yaml @@ -1,11 +1,11 @@ -{{- if and (eq .Values.mode "recovery" ) (eq .Values.recovery.method "object_store") (eq .Values.recovery.provider "azure") }} +{{- if and (eq .Values.mode "recovery" ) (eq .Values.recovery.method "object_store") (eq .Values.recovery.provider "azure") .Values.recovery.secret.create }} apiVersion: v1 kind: Secret metadata: - name: {{ include "cluster.fullname" . }}-backup-azure-recovery-creds + name: {{ default (printf "%s-recovery-azure-creds" (include "cluster.fullname" .)) .Values.recovery.secret.name }} data: AZURE_CONNECTION_STRING: {{ .Values.recovery.azure.connectionString | b64enc | quote }} AZURE_STORAGE_ACCOUNT: {{ .Values.recovery.azure.storageAccount | b64enc | quote }} AZURE_STORAGE_KEY: {{ .Values.recovery.azure.storageKey | b64enc | quote }} AZURE_STORAGE_SAS_TOKEN: {{ .Values.recovery.azure.storageSasToken | b64enc | quote }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/cluster/templates/recovery-google-creds.yaml b/charts/cluster/templates/recovery-google-creds.yaml new file mode 100644 index 000000000..e7366ec4c --- /dev/null +++ b/charts/cluster/templates/recovery-google-creds.yaml @@ -0,0 +1,8 @@ +{{- if and (eq .Values.mode "recovery" ) (eq .Values.recovery.method "object_store") (eq .Values.recovery.provider "google") .Values.recovery.secret.create }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ default (printf "%s-recovery-google-creds" (include "cluster.fullname" .)) .Values.recovery.secret.name }} +data: + APPLICATION_CREDENTIALS: {{ .Values.recovery.google.applicationCredentials | b64enc | quote }} +{{- end }} diff --git a/charts/cluster/templates/recovery-pg_basebackup-password.yaml b/charts/cluster/templates/recovery-pg_basebackup-password.yaml new file mode 100644 index 000000000..456ee75d9 --- /dev/null +++ b/charts/cluster/templates/recovery-pg_basebackup-password.yaml @@ -0,0 +1,8 @@ +{{- if and (eq .Values.mode "recovery") (eq .Values.recovery.method "pg_basebackup") .Values.recovery.pgBaseBackup.source.passwordSecret.create }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ default (printf "%s-pg-basebackup-password" (include "cluster.fullname" .)) .Values.recovery.pgBaseBackup.source.passwordSecret.name }} +data: + {{ .Values.recovery.pgBaseBackup.source.passwordSecret.key }}: {{ required ".Values.recovery.pgBaseBackup.source.passwordSecret.value required when creating a password secret." .Values.recovery.pgBaseBackup.source.passwordSecret.value | b64enc | quote }} +{{- end }} diff --git a/charts/cluster/templates/backup-s3-recovery-creds.yaml b/charts/cluster/templates/recovery-s3-creds.yaml similarity index 64% rename from charts/cluster/templates/backup-s3-recovery-creds.yaml rename to charts/cluster/templates/recovery-s3-creds.yaml index 9cc615fcd..950c74c4b 100644 --- a/charts/cluster/templates/backup-s3-recovery-creds.yaml +++ b/charts/cluster/templates/recovery-s3-creds.yaml @@ -1,9 +1,9 @@ -{{- if and (eq .Values.mode "recovery" ) (eq .Values.recovery.method "object_store") (eq .Values.recovery.provider "s3") }} +{{- if and (eq .Values.mode "recovery" ) (eq .Values.recovery.method "object_store") (eq .Values.recovery.provider "s3") .Values.recovery.secret.create }} apiVersion: v1 kind: Secret metadata: - name: {{ include "cluster.fullname" . }}-backup-s3-recovery-creds + name: {{ default (printf "%s-recovery-s3-creds" (include "cluster.fullname" .)) .Values.recovery.secret.name }} data: ACCESS_KEY_ID: {{ required ".Values.recovery.s3.accessKey is required, but not specified." .Values.recovery.s3.accessKey | b64enc | quote }} ACCESS_SECRET_KEY: {{ required ".Values.recovery.s3.secretKey is required, but not specified." .Values.recovery.s3.secretKey | b64enc | quote }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/cluster/test/monitoring/01-monitoring_cluster-assert.yaml b/charts/cluster/test/monitoring/01-monitoring_cluster-assert.yaml new file mode 100644 index 000000000..ce6544e2e --- /dev/null +++ b/charts/cluster/test/monitoring/01-monitoring_cluster-assert.yaml @@ -0,0 +1,133 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: monitoring-cluster + labels: + foo: bar + annotations: + foo: bar +spec: + instances: 2 + storage: + size: 256Mi + storageClass: standard + monitoring: + disableDefaultQueries: true + customQueriesConfigMap: + - name: monitoring-cluster-monitoring + key: custom-queries + enablePodMonitor: true + podMonitorRelabelings: + - action: replace + replacement: test + targetLabel: environment + - action: replace + replacement: alpha + targetLabel: team + podMonitorMetricRelabelings: + - action: replace + sourceLabels: + - cluster + targetLabel: cnpg_cluster + - action: labeldrop + regex: cluster +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: monitoring-cluster +spec: + selector: + matchLabels: + cnpg.io/cluster: monitoring-cluster + podMetricsEndpoints: + - bearerTokenSecret: + key: '' + name: '' + relabelings: + - targetLabel: environment + replacement: test + - targetLabel: team + replacement: alpha + metricRelabelings: + - action: replace + sourceLabels: + - cluster + targetLabel: cnpg_cluster + - action: labeldrop + regex: cluster +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: monitoring-cluster-pooler-rw +spec: + selector: + matchLabels: + cnpg.io/poolerName: monitoring-cluster-pooler-rw + podMetricsEndpoints: + - bearerTokenSecret: + key: '' + name: '' + relabelings: + - targetLabel: type + replacement: rw + action: replace + - targetLabel: team + replacement: alpha + action: replace + metricRelabelings: + - action: replace + sourceLabels: + - cluster + targetLabel: cnpg_cluster + - action: labeldrop + regex: cluster +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: monitoring-cluster-pooler-ro +spec: + selector: + matchLabels: + cnpg.io/poolerName: monitoring-cluster-pooler-ro + podMetricsEndpoints: + - bearerTokenSecret: + key: '' + name: '' + relabelings: + - targetLabel: type + replacement: ro + action: replace + - targetLabel: team + replacement: alpha + action: replace + metricRelabelings: + - action: replace + sourceLabels: + - cluster + targetLabel: cnpg_cluster + - action: labeldrop + regex: cluster +--- +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: monitoring-cluster-alert-rules +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: monitoring-cluster-monitoring +data: + custom-queries: | + pg_cache_hit_ratio: + query: "SELECT current_database() as datname, sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) as ratio FROM pg_statio_user_tables;" + metrics: + - datname: + description: Name of the database + usage: LABEL + - ratio: + description: Cache hit ratio + usage: GAUGE diff --git a/charts/cluster/test/monitoring/01-monitoring_cluster.yaml b/charts/cluster/test/monitoring/01-monitoring_cluster.yaml new file mode 100644 index 000000000..3dc3cf648 --- /dev/null +++ b/charts/cluster/test/monitoring/01-monitoring_cluster.yaml @@ -0,0 +1,78 @@ +type: postgresql +mode: standalone +cluster: + instances: 2 + storage: + size: 256Mi + storageClass: standard + monitoring: + enabled: true + disableDefaultQueries: true + customQueries: + - name: "pg_cache_hit_ratio" + query: "SELECT current_database() as datname, sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) as ratio FROM pg_statio_user_tables;" + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - ratio: + usage: GAUGE + description: "Cache hit ratio" + podMonitor: + relabelings: + - targetLabel: environment + replacement: test + - targetLabel: team + replacement: alpha + metricRelabelings: + - action: replace + sourceLabels: + - cluster + targetLabel: cnpg_cluster + - action: labeldrop + regex: cluster + additionalLabels: + foo: bar + annotations: + foo: bar +backups: + enabled: false +poolers: + - name: rw + type: rw + instances: 1 + monitoring: + enabled: true + podMonitor: + enabled: true + relabelings: + - targetLabel: type + replacement: rw + - targetLabel: team + replacement: alpha + metricRelabelings: + - action: replace + sourceLabels: + - cluster + targetLabel: cnpg_cluster + - action: labeldrop + regex: cluster + - name: ro + type: ro + instances: 1 + monitoring: + enabled: true + podMonitor: + enabled: true + relabelings: + - targetLabel: type + replacement: ro + - targetLabel: team + replacement: alpha + metricRelabelings: + - action: replace + sourceLabels: + - cluster + targetLabel: cnpg_cluster + - action: labeldrop + regex: cluster \ No newline at end of file diff --git a/charts/cluster/test/monitoring/chainsaw-test.yaml b/charts/cluster/test/monitoring/chainsaw-test.yaml new file mode 100644 index 000000000..ce647a48d --- /dev/null +++ b/charts/cluster/test/monitoring/chainsaw-test.yaml @@ -0,0 +1,29 @@ +## +# This is a test that checks if PodMonitors, ConfigMaps and PrometheusRules are correctly provisioned when requested. +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: monitoring +spec: + timeouts: + apply: 1s + assert: 20s + cleanup: 30s + steps: + - name: Install the monitoring cluster + try: + - script: + content: | + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./01-monitoring_cluster.yaml \ + --wait \ + monitoring ../../ + - assert: + file: ./01-monitoring_cluster-assert.yaml + - name: Cleanup + try: + - script: + content: | + helm uninstall --namespace $NAMESPACE monitoring diff --git a/charts/cluster/test/pooler/01-pooler_cluster-assert.yaml b/charts/cluster/test/pooler/01-pooler_cluster-assert.yaml new file mode 100644 index 000000000..db23167c7 --- /dev/null +++ b/charts/cluster/test/pooler/01-pooler_cluster-assert.yaml @@ -0,0 +1,37 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pooler-cluster-pooler-rw +status: + readyReplicas: 2 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pooler-cluster-pooler-ro +status: + readyReplicas: 2 +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Pooler +metadata: + name: pooler-cluster-pooler-rw +spec: + cluster: + name: pooler-cluster + instances: 2 + pgbouncer: + poolMode: transaction + type: rw +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Pooler +metadata: + name: pooler-cluster-pooler-ro +spec: + cluster: + name: pooler-cluster + instances: 2 + pgbouncer: + poolMode: session + type: ro diff --git a/charts/cluster/test/pooler/01-pooler_cluster.yaml b/charts/cluster/test/pooler/01-pooler_cluster.yaml new file mode 100644 index 000000000..8fcbf6555 --- /dev/null +++ b/charts/cluster/test/pooler/01-pooler_cluster.yaml @@ -0,0 +1,17 @@ +type: postgresql +mode: standalone +cluster: + instances: 2 + storage: + size: 256Mi + storageClass: standard +backups: + enabled: false +poolers: + - name: rw + type: rw + instances: 2 + poolMode: transaction + - name: ro + type: ro + instances: 2 diff --git a/charts/cluster/test/pooler/chainsaw-test.yaml b/charts/cluster/test/pooler/chainsaw-test.yaml new file mode 100644 index 000000000..427b84eb4 --- /dev/null +++ b/charts/cluster/test/pooler/chainsaw-test.yaml @@ -0,0 +1,30 @@ +## +# This is a test that verifies that non-default configuration options are correctly propagated to the CNPG cluster. +# P.S. This test is not designed to have a good running configuration, it is designed to test the configuration propagation! +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: pooler +spec: + timeouts: + apply: 1s + assert: 20s + cleanup: 30s + steps: + - name: Install the non-default configuration cluster + try: + - script: + content: | + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./01-pooler_cluster.yaml \ + --wait \ + pooler ../../ + - assert: + file: ./01-pooler_cluster-assert.yaml + - name: Cleanup + try: + - script: + content: | + helm uninstall --namespace $NAMESPACE pooler diff --git a/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml b/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml new file mode 100644 index 000000000..5f5c62a68 --- /dev/null +++ b/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml @@ -0,0 +1,82 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: non-default-configuration-cluster + labels: + foo: bar + annotations: + foo: bar +spec: + imageName: ghcr.io/cloudnative-pg/crazycustomimage:99.99 + imagePullPolicy: Always + postgresUID: 1001 + postgresGID: 1002 + instances: 2 + postgresql: + parameters: + max_connections: "42" + pg_hba: + - host all 1.2.3.4/32 trust + pg_ident: + - mymap /^(.*)@mydomain\.com$ \1 + shared_preload_libraries: + - pgaudit + bootstrap: + initdb: + database: mydb + owner: dante + secret: + name: mydb-secret + postInitApplicationSQL: + - CREATE TABLE mytable (id serial PRIMARY KEY, name VARCHAR(255)); + postInitTemplateSQL: + - CREATE TABLE mytable (id serial PRIMARY KEY, name VARCHAR(255)); + postInitSQL: + - CREATE TABLE mytable (id serial PRIMARY KEY, name VARCHAR(255)); + superuserSecret: + name: supersecret-secret + enableSuperuserAccess: true + certificates: + serverCASecret: ca-secret + serverTLSSecret: tls-secret + replicationTLSSecret: replication-tls-secret + clientCASecret: client-ca-secret + imagePullSecrets: + - name: image-pull-secret + storage: + size: 256Mi + storageClass: standard + walStorage: + size: 256Mi + storageClass: standard + affinity: + topologyKey: kubernetes.io/hostname + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - node1 + - node2 + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 100m + memory: 256Mi + priorityClassName: mega-high + primaryUpdateStrategy: supervised + primaryUpdateMethod: restart + logLevel: warning + managed: + roles: + - name: dante + ensure: present + comment: Dante Alighieri + login: true + inRoles: + - pg_monitor + - pg_signal_backend diff --git a/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml b/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml new file mode 100644 index 000000000..570ea8409 --- /dev/null +++ b/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml @@ -0,0 +1,81 @@ +type: postgresql +mode: standalone +cluster: + instances: 2 + imageName: ghcr.io/cloudnative-pg/crazycustomimage:99.99 + imagePullPolicy: Always + imagePullSecrets: + - name: "image-pull-secret" + storage: + size: 256Mi + storageClass: standard + walStorage: + enabled: true + size: 256Mi + storageClass: standard + postgresUID: 1001 + postgresGID: 1002 + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 100m + memory: 256Mi + priorityClassName: mega-high + primaryUpdateMethod: restart + primaryUpdateStrategy: supervised + logLevel: warning + affinity: + topologyKey: kubernetes.io/hostname + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - node1 + - node2 + certificates: + serverCASecret: ca-secret + serverTLSSecret: tls-secret + replicationTLSSecret: replication-tls-secret + clientCASecret: client-ca-secret + enableSuperuserAccess: true + superuserSecret: supersecret-secret + roles: + - name: dante + ensure: present + comment: Dante Alighieri + login: true + inRoles: + - pg_monitor + - pg_signal_backend + postgresql: + parameters: + max_connections: "42" + pg_hba: + - host all 1.2.3.4/32 trust + pg_ident: + - mymap /^(.*)@mydomain\.com$ \1 + shared_preload_libraries: + - pgaudit + initdb: + database: mydb + owner: dante + secret: + name: mydb-secret + postInitApplicationSQL: + - CREATE TABLE mytable (id serial PRIMARY KEY, name VARCHAR(255)); + postInitTemplateSQL: + - CREATE TABLE mytable (id serial PRIMARY KEY, name VARCHAR(255)); + postInitSQL: + - CREATE TABLE mytable (id serial PRIMARY KEY, name VARCHAR(255)); + additionalLabels: + foo: bar + annotations: + foo: bar + +backups: + enabled: false diff --git a/charts/cluster/test/postgresql-cluster-configuration/chainsaw-test.yaml b/charts/cluster/test/postgresql-cluster-configuration/chainsaw-test.yaml new file mode 100644 index 000000000..b02b4900d --- /dev/null +++ b/charts/cluster/test/postgresql-cluster-configuration/chainsaw-test.yaml @@ -0,0 +1,30 @@ +## +# This is a test that verifies that non-default configuration options are correctly propagated to the CNPG cluster. +# P.S. This test is not designed to have a good running configuration, it is designed to test the configuration propagation! +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: postgresql-cluster-configuration +spec: + timeouts: + apply: 1s + assert: 5s + cleanup: 30s + steps: + - name: Install the non-default configuration cluster + try: + - script: + content: | + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./01-non_default_configuration_cluster.yaml \ + --wait \ + non-default-configuration ../../ + - assert: + file: ./01-non_default_configuration_cluster-assert.yaml + - name: Cleanup + try: + - script: + content: | + helm uninstall --namespace $NAMESPACE non-default-configuration diff --git a/charts/cluster/test/postgresql-minio-backup-restore/00-minio_cleanup-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/00-minio_cleanup-assert.yaml new file mode 100644 index 000000000..9c0f3eb48 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/00-minio_cleanup-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: minio-cleanup +status: + succeeded: 1 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/00-minio_cleanup.yaml b/charts/cluster/test/postgresql-minio-backup-restore/00-minio_cleanup.yaml new file mode 100644 index 000000000..97cfc7389 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/00-minio_cleanup.yaml @@ -0,0 +1,16 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: minio-cleanup +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: minio-cleanup + image: minio/mc + command: ['sh', '-c'] + args: + - | + mc alias set myminio https://minio.minio.svc.cluster.local minio minio123 + mc rm --recursive --force myminio/mybucket/postgresql-minio-backup-restore diff --git a/charts/cluster/test/postgresql-minio-backup-restore/01-standalone_cluster-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/01-standalone_cluster-assert.yaml new file mode 100644 index 000000000..0663e78c9 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/01-standalone_cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: standalone-cluster +status: + readyInstances: 2 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/01-standalone_cluster.yaml b/charts/cluster/test/postgresql-minio-backup-restore/01-standalone_cluster.yaml new file mode 100644 index 000000000..7db3fe9af --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/01-standalone_cluster.yaml @@ -0,0 +1,27 @@ +type: postgresql +mode: standalone + +cluster: + instances: 2 + storage: + size: 256Mi + +backups: + enabled: true + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/postgresql-minio-backup-restore/v1" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" diff --git a/charts/cluster/test/postgresql-minio-backup-restore/02-data_write-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/02-data_write-assert.yaml new file mode 100644 index 000000000..831f963d9 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/02-data_write-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write +status: + succeeded: 1 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/02-data_write.yaml b/charts/cluster/test/postgresql-minio-backup-restore/02-data_write.yaml new file mode 100644 index 000000000..e674d8b53 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/02-data_write.yaml @@ -0,0 +1,23 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-write + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: standalone-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + psql "$DB_URI" -c "CREATE TABLE mygoodtable (id serial PRIMARY KEY);" diff --git a/charts/cluster/test/postgresql-minio-backup-restore/03-backup.yaml b/charts/cluster/test/postgresql-minio-backup-restore/03-backup.yaml new file mode 100644 index 000000000..c3afd4676 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/03-backup.yaml @@ -0,0 +1,8 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: post-init-backup +spec: + method: barmanObjectStore + cluster: + name: standalone-cluster diff --git a/charts/cluster/test/postgresql-minio-backup-restore/03-backup_completed-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/03-backup_completed-assert.yaml new file mode 100644 index 000000000..7b1e9e534 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/03-backup_completed-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: post-init-backup +spec: + cluster: + name: standalone-cluster + method: barmanObjectStore +status: + phase: completed diff --git a/charts/cluster/test/postgresql-minio-backup-restore/03-backup_running-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/03-backup_running-assert.yaml new file mode 100644 index 000000000..cbd9645c5 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/03-backup_running-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: post-init-backup +spec: + cluster: + name: standalone-cluster + method: barmanObjectStore +status: + phase: running diff --git a/charts/cluster/test/postgresql-minio-backup-restore/03-checkpoint.yaml b/charts/cluster/test/postgresql-minio-backup-restore/03-checkpoint.yaml new file mode 100644 index 000000000..52862bf07 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/03-checkpoint.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: backup-checkpoint +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: create-checkpoint + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: standalone-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + END_TIME=$(( $(date +%s) + 30 )) + while [ $(date +%s) -lt $END_TIME ]; do + psql "$DB_URI" -c "SELECT pg_switch_wal();CHECKPOINT;" + sleep 5 + done diff --git a/charts/cluster/test/postgresql-minio-backup-restore/04-post_backup_data_write-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/04-post_backup_data_write-assert.yaml new file mode 100644 index 000000000..ad9be77a7 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/04-post_backup_data_write-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write-post-backup +status: + succeeded: 1 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/04-post_backup_data_write.yaml b/charts/cluster/test/postgresql-minio-backup-restore/04-post_backup_data_write.yaml new file mode 100644 index 000000000..2e56595de --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/04-post_backup_data_write.yaml @@ -0,0 +1,57 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: configmap-creator-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: configmap-creator +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: configmap-creator-binding +subjects: +- kind: ServiceAccount + name: configmap-creator-sa +roleRef: + kind: Role + name: configmap-creator + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write-post-backup +spec: + template: + spec: + serviceAccountName: configmap-creator-sa + restartPolicy: OnFailure + containers: + - name: data-write + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: standalone-cluster-superuser + key: uri + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client kubectl coreutils + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + DATE_NO_BAD_TABLE=$(date --rfc-3339=ns) + sleep 30 + psql "$DB_URI" -c "CREATE TABLE mybadtable (id serial PRIMARY KEY);" + kubectl create configmap date-no-bad-table --from-literal=date="$DATE_NO_BAD_TABLE" diff --git a/charts/cluster/test/postgresql-minio-backup-restore/05-recovery_backup_cluster-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/05-recovery_backup_cluster-assert.yaml new file mode 100644 index 000000000..90c4b24db --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/05-recovery_backup_cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: recovery-backup-cluster +status: + readyInstances: 2 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/05-recovery_backup_cluster.yaml b/charts/cluster/test/postgresql-minio-backup-restore/05-recovery_backup_cluster.yaml new file mode 100644 index 000000000..c2731b3bf --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/05-recovery_backup_cluster.yaml @@ -0,0 +1,48 @@ +type: postgresql +mode: recovery + +cluster: + instances: 2 + storage: + size: 256Mi + +recovery: + method: backup + backupName: "post-init-backup" + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/postgresql-minio-backup-restore/v1" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" + +backups: + enabled: true + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/postgresql-minio-backup-restore/v2" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" diff --git a/charts/cluster/test/postgresql-minio-backup-restore/06-data_test-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/06-data_test-assert.yaml new file mode 100644 index 000000000..2ef11b8c9 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/06-data_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-backup +status: + succeeded: 1 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/06-data_test.yaml b/charts/cluster/test/postgresql-minio-backup-restore/06-data_test.yaml new file mode 100644 index 000000000..86a15439b --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/06-data_test.yaml @@ -0,0 +1,23 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-backup +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-test + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: recovery-backup-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = $$mygoodtable$$)' --csv -q 2>/dev/null)" = "t" diff --git a/charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster-assert.yaml new file mode 100644 index 000000000..f8693036b --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: recovery-object-store-cluster +status: + readyInstances: 2 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster.yaml b/charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster.yaml new file mode 100644 index 000000000..7f059e394 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster.yaml @@ -0,0 +1,48 @@ +type: postgresql +mode: recovery + +cluster: + instances: 2 + storage: + size: 256Mi + +recovery: + method: object_store + clusterName: "standalone-cluster" + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/postgresql-minio-backup-restore/v1" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" + +backups: + enabled: true + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/postgresql-minio-backup-restore/v2" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" diff --git a/charts/cluster/test/postgresql-minio-backup-restore/08-data_test-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/08-data_test-assert.yaml new file mode 100644 index 000000000..36eb4ff81 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/08-data_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-object-store +status: + succeeded: 1 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/08-data_test.yaml b/charts/cluster/test/postgresql-minio-backup-restore/08-data_test.yaml new file mode 100644 index 000000000..94ac2c34e --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/08-data_test.yaml @@ -0,0 +1,23 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-object-store +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-test + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: recovery-object-store-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = $$mygoodtable$$)' --csv -q 2>/dev/null)" = "t" diff --git a/charts/cluster/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster-assert.yaml new file mode 100644 index 000000000..2b6b9651f --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: recovery-backup-pitr-cluster +status: + readyInstances: 2 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster.yaml b/charts/cluster/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster.yaml new file mode 100644 index 000000000..c2731b3bf --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster.yaml @@ -0,0 +1,48 @@ +type: postgresql +mode: recovery + +cluster: + instances: 2 + storage: + size: 256Mi + +recovery: + method: backup + backupName: "post-init-backup" + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/postgresql-minio-backup-restore/v1" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" + +backups: + enabled: true + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/postgresql-minio-backup-restore/v2" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" diff --git a/charts/cluster/test/postgresql-minio-backup-restore/10-data_test-assert.yaml b/charts/cluster/test/postgresql-minio-backup-restore/10-data_test-assert.yaml new file mode 100644 index 000000000..6f14d5f23 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/10-data_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-backup-pitr +status: + succeeded: 1 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/10-data_test.yaml b/charts/cluster/test/postgresql-minio-backup-restore/10-data_test.yaml new file mode 100644 index 000000000..5fb4faf39 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/10-data_test.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-backup-pitr +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-test + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: recovery-backup-pitr-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + set -e + test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = $$mygoodtable$$)' --csv -q 2>/dev/null)" = "t" + echo "Good table exists" + test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = $$mybadtable$$)' --csv -q 2>/dev/null)" = "f" + echo "Bad table does not exist" diff --git a/charts/cluster/test/postgresql-minio-backup-restore/chainsaw-test.yaml b/charts/cluster/test/postgresql-minio-backup-restore/chainsaw-test.yaml new file mode 100644 index 000000000..79a7d2d76 --- /dev/null +++ b/charts/cluster/test/postgresql-minio-backup-restore/chainsaw-test.yaml @@ -0,0 +1,148 @@ +## +# This test sets up a CNPG cluster with MinIO backups and then restores the cluster from the backup using backup, +# object store, and object store with PITR recovery. +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: postgresql-minio-backup-restore +spec: + timeouts: + apply: 1s + assert: 2m + cleanup: 1m + steps: + - name: Clear the MinIO bucket + try: + - apply: + file: ./00-minio_cleanup.yaml + - assert: + file: ./00-minio_cleanup-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=minio_cleanup + - name: Install the standalone cluster + try: + - script: + content: | + kubectl -n $NAMESPACE create secret generic kube-root-ca.crt --from-literal=ca.crt="$(kubectl -n kube-system get configmaps kube-root-ca.crt -o jsonpath='{.data.ca\.crt}')" --dry-run=client -o yaml | kubectl apply -f - + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./01-standalone_cluster.yaml \ + --wait \ + standalone ../../ + - assert: + file: 01-standalone_cluster-assert.yaml + catch: + - describe: + apiVersion: postgresql.cnpg.io/v1 + kind: Cluster + - name: Write some data to the cluster + try: + - apply: + file: ./02-data_write.yaml + - assert: + file: ./02-data_write-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=data-write + - name: Create a backup + try: + - apply: + file: ./03-backup.yaml + - assert: + file: ./03-backup_running-assert.yaml + - apply: + file: ./03-checkpoint.yaml + - assert: + file: ./03-backup_completed-assert.yaml + - name: Write more data to the database after the backup + try: + - apply: + file: ./04-post_backup_data_write.yaml + - assert: + file: ./04-post_backup_data_write-assert.yaml + timeouts: + apply: 1s + assert: 10m + catch: + - describe: + apiVersion: postgresql.cnpg.io/v1 + kind: Backup + - name: Create a recovery cluster from backup + try: + - script: + content: | + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./05-recovery_backup_cluster.yaml \ + --wait \ + recovery-backup ../../ + - assert: + file: ./05-recovery_backup_cluster-assert.yaml + - name: Verify the data on the backup recovery cluster exists + try: + - apply: + file: 06-data_test.yaml + - assert: + file: 06-data_test-assert.yaml + - name: Create a recovery cluster from object store + try: + - script: + content: | + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./07-recovery_object_store_cluster.yaml \ + --wait \ + recovery-object-store ../../ + - assert: + file: ./07-recovery_object_store_cluster-assert.yaml + - name: Verify the data on the object store recovery cluster exists + try: + - apply: + file: 08-data_test.yaml + - assert: + file: 08-data_test-assert.yaml + - name: Create a recovery cluster from backup with a PITR target + try: + - script: + content: | + DATE_NO_BAD_TABLE=$(kubectl -n $NAMESPACE get configmap date-no-bad-table -o 'jsonpath={.data.date}') + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./09-recovery_backup_pitr_cluster.yaml \ + --set recovery.pitrTarget.time="$DATE_NO_BAD_TABLE" \ + --wait \ + recovery-backup-pitr ../../ + - assert: + file: ./09-recovery_backup_pitr_cluster-assert.yaml + - name: Verify the pre-backup data on the recovery cluster exists but not the post-backup data + try: + - apply: + file: 10-data_test.yaml + - assert: + file: 10-data_test-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + selector: batch.kubernetes.io/job-name=data-test-backup-pitr + - podLogs: + selector: batch.kubernetes.io/job-name=data-test-backup-pitr + - name: Cleanup + try: + - script: + content: | + helm uninstall --namespace $NAMESPACE standalone + helm uninstall --namespace $NAMESPACE recovery-backup + helm uninstall --namespace $NAMESPACE recovery-object-store + helm uninstall --namespace $NAMESPACE recovery-backup-pitr diff --git a/charts/cluster/test/postgresql-pg_basebackup/00-source-cluster-assert.yaml b/charts/cluster/test/postgresql-pg_basebackup/00-source-cluster-assert.yaml new file mode 100644 index 000000000..90ea90fd5 --- /dev/null +++ b/charts/cluster/test/postgresql-pg_basebackup/00-source-cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: source-cluster +status: + readyInstances: 1 diff --git a/charts/cluster/test/postgresql-pg_basebackup/00-source-cluster.yaml b/charts/cluster/test/postgresql-pg_basebackup/00-source-cluster.yaml new file mode 100644 index 000000000..c11fed595 --- /dev/null +++ b/charts/cluster/test/postgresql-pg_basebackup/00-source-cluster.yaml @@ -0,0 +1,6 @@ +type: postgresql +mode: "standalone" +cluster: + instances: 1 +backups: + enabled: false \ No newline at end of file diff --git a/charts/cluster/test/postgresql-pg_basebackup/01-data_write-assert.yaml b/charts/cluster/test/postgresql-pg_basebackup/01-data_write-assert.yaml new file mode 100644 index 000000000..831f963d9 --- /dev/null +++ b/charts/cluster/test/postgresql-pg_basebackup/01-data_write-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write +status: + succeeded: 1 diff --git a/charts/cluster/test/postgresql-pg_basebackup/01-data_write.yaml b/charts/cluster/test/postgresql-pg_basebackup/01-data_write.yaml new file mode 100644 index 000000000..cc5a743ad --- /dev/null +++ b/charts/cluster/test/postgresql-pg_basebackup/01-data_write.yaml @@ -0,0 +1,30 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-write + env: + - name: DB_USER + valueFrom: + secretKeyRef: + name: source-cluster-superuser + key: username + - name: DB_PASS + valueFrom: + secretKeyRef: + name: source-cluster-superuser + key: password + - name: DB_URI + value: postgres://$(DB_USER):$(DB_PASS)@source-cluster-rw:5432 + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + psql "$DB_URI" -c "CREATE DATABASE mygooddb;" + psql "$DB_URI/mygooddb" -c "CREATE TABLE mygoodtable (id serial PRIMARY KEY);" diff --git a/charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster-assert.yaml b/charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster-assert.yaml new file mode 100644 index 000000000..9b953d44a --- /dev/null +++ b/charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: pg-basebackup-cluster +status: + readyInstances: 2 diff --git a/charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml b/charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml new file mode 100644 index 000000000..d389200e8 --- /dev/null +++ b/charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml @@ -0,0 +1,22 @@ +type: postgresql +mode: "recovery" +recovery: + method: "pg_basebackup" + pgBaseBackup: + source: + host: "source-cluster-rw" + database: "mygooddb" + username: "streaming_replica" + sslMode: "require" + sslKeySecret: + name: source-cluster-replication + key: tls.key + sslCertSecret: + name: source-cluster-replication + key: tls.crt + +cluster: + instances: 2 + +backups: + enabled: false \ No newline at end of file diff --git a/charts/cluster/test/postgresql-pg_basebackup/03-data_test-assert.yaml b/charts/cluster/test/postgresql-pg_basebackup/03-data_test-assert.yaml new file mode 100644 index 000000000..04df941e4 --- /dev/null +++ b/charts/cluster/test/postgresql-pg_basebackup/03-data_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test +status: + succeeded: 1 diff --git a/charts/cluster/test/postgresql-pg_basebackup/03-data_test.yaml b/charts/cluster/test/postgresql-pg_basebackup/03-data_test.yaml new file mode 100644 index 000000000..40eb9029a --- /dev/null +++ b/charts/cluster/test/postgresql-pg_basebackup/03-data_test.yaml @@ -0,0 +1,23 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-test + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: pg-basebackup-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + test "$(psql "${DB_URI}mygooddb" -t -c 'SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = $$mygoodtable$$)' --csv -q 2>/dev/null)" = "t" diff --git a/charts/cluster/test/postgresql-pg_basebackup/chainsaw-test.yaml b/charts/cluster/test/postgresql-pg_basebackup/chainsaw-test.yaml new file mode 100644 index 000000000..85f2d9743 --- /dev/null +++ b/charts/cluster/test/postgresql-pg_basebackup/chainsaw-test.yaml @@ -0,0 +1,64 @@ +## +# This is a test that provisions a regular (non CNPG) PostgreSQL cluster and attempts to perform a pg_basebackup recovery. +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: postgresql-pg-basebackup +spec: + timeouts: + apply: 1s + assert: 2m + cleanup: 1m + steps: + - name: Install the external PostgreSQL cluster + try: + - script: + content: | + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./00-source-cluster.yaml \ + --wait \ + source ../../ + - assert: + file: ./00-source-cluster-assert.yaml + - apply: + file: ./01-data_write.yaml + - assert: + file: ./01-data_write-assert.yaml + - name: Install the pg_basebackup cluster + timeouts: + assert: 5m + try: + - script: + content: | + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./02-pg_basebackup-cluster.yaml \ + --wait \ + pg-basebackup ../../ + - assert: + file: ./02-pg_basebackup-cluster-assert.yaml + catch: + - describe: + apiVersion: postgresql.cnpg.io/v1 + kind: Cluster + - name: Verify the data from step 1 exists + try: + - apply: + file: ./03-data_test.yaml + - assert: + file: ./03-data_test-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=data-test + - name: Cleanup + try: + - script: + content: | + helm uninstall --namespace $NAMESPACE source + helm uninstall --namespace $NAMESPACE pg-basebackup diff --git a/charts/cluster/test/timescale-minio-backup-restore/00-minio_cleanup-assert.yaml b/charts/cluster/test/timescale-minio-backup-restore/00-minio_cleanup-assert.yaml new file mode 100644 index 000000000..9c0f3eb48 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/00-minio_cleanup-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: minio-cleanup +status: + succeeded: 1 diff --git a/charts/cluster/test/timescale-minio-backup-restore/00-minio_cleanup.yaml b/charts/cluster/test/timescale-minio-backup-restore/00-minio_cleanup.yaml new file mode 100644 index 000000000..ce71b1ef7 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/00-minio_cleanup.yaml @@ -0,0 +1,16 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: minio-cleanup +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: minio-cleanup + image: minio/mc + command: ['sh', '-c'] + args: + - | + mc alias set myminio https://minio.minio.svc.cluster.local minio minio123 + mc rm --recursive --force myminio/mybucket/timescale diff --git a/charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster-assert.yaml b/charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster-assert.yaml new file mode 100644 index 000000000..3bbd2f8fe --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: timescale-cluster +status: + readyInstances: 2 diff --git a/charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster.yaml b/charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster.yaml new file mode 100644 index 000000000..f84117fe0 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster.yaml @@ -0,0 +1,28 @@ +type: timescaledb +mode: standalone + +cluster: + instances: 2 + storage: + size: 256Mi + +backups: + enabled: true + + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/timescale/v1" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" diff --git a/charts/cluster/test/timescale-minio-backup-restore/03-timescale_test-assert.yaml b/charts/cluster/test/timescale-minio-backup-restore/03-timescale_test-assert.yaml new file mode 100644 index 000000000..aa63a21c9 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/03-timescale_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: timescale-test +status: + succeeded: 1 diff --git a/charts/cluster/test/timescale-minio-backup-restore/03-timescale_test.yaml b/charts/cluster/test/timescale-minio-backup-restore/03-timescale_test.yaml new file mode 100644 index 000000000..9b7581f96 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/03-timescale_test.yaml @@ -0,0 +1,22 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: timescale-test +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-test + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: timescale-cluster-app + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM pg_extension WHERE extname = '\''timescaledb'\'')' --csv -q 2>/dev/null)" = "t" \ No newline at end of file diff --git a/charts/cluster/test/timescale-minio-backup-restore/04-data_write-assert.yaml b/charts/cluster/test/timescale-minio-backup-restore/04-data_write-assert.yaml new file mode 100644 index 000000000..831f963d9 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/04-data_write-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write +status: + succeeded: 1 diff --git a/charts/cluster/test/timescale-minio-backup-restore/04-data_write.yaml b/charts/cluster/test/timescale-minio-backup-restore/04-data_write.yaml new file mode 100644 index 000000000..b827de143 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/04-data_write.yaml @@ -0,0 +1,54 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: configmap-creator-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: configmap-creator +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: configmap-creator-binding +subjects: +- kind: ServiceAccount + name: configmap-creator-sa +roleRef: + kind: Role + name: configmap-creator + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write +spec: + template: + spec: + serviceAccountName: configmap-creator-sa + restartPolicy: OnFailure + containers: + - name: data-write + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: timescale-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client kubectl coreutils + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + psql "$DB_URI" -c "CREATE TABLE mygoodtable (id serial PRIMARY KEY);" + sleep 5 + DATE_NO_BAD_TABLE=$(date --rfc-3339=ns) + kubectl create configmap date-no-bad-table --from-literal=date="$DATE_NO_BAD_TABLE" + sleep 5 diff --git a/charts/cluster/test/timescale-minio-backup-restore/05-backup.yaml b/charts/cluster/test/timescale-minio-backup-restore/05-backup.yaml new file mode 100644 index 000000000..be5e4b181 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/05-backup.yaml @@ -0,0 +1,8 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: post-init-backup +spec: + method: barmanObjectStore + cluster: + name: timescale-cluster diff --git a/charts/cluster/test/timescale-minio-backup-restore/05-backup_completed-assert.yaml b/charts/cluster/test/timescale-minio-backup-restore/05-backup_completed-assert.yaml new file mode 100644 index 000000000..040b1a49e --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/05-backup_completed-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: post-init-backup +spec: + cluster: + name: timescale-cluster + method: barmanObjectStore +status: + phase: completed diff --git a/charts/cluster/test/timescale-minio-backup-restore/05-backup_running-assert.yaml b/charts/cluster/test/timescale-minio-backup-restore/05-backup_running-assert.yaml new file mode 100644 index 000000000..dc35727a0 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/05-backup_running-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: post-init-backup +spec: + cluster: + name: timescale-cluster + method: barmanObjectStore +status: + phase: running diff --git a/charts/cluster/test/timescale-minio-backup-restore/05-checkpoint.yaml b/charts/cluster/test/timescale-minio-backup-restore/05-checkpoint.yaml new file mode 100644 index 000000000..3ba7fc727 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/05-checkpoint.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: backup-checkpoint +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: create-checkpoint + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: timescale-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + END_TIME=$(( $(date +%s) + 30 )) + while [ $(date +%s) -lt $END_TIME ]; do + psql "$DB_URI" -c "SELECT pg_switch_wal();CHECKPOINT;" + sleep 5 + done diff --git a/charts/cluster/test/timescale-minio-backup-restore/06-post_backup_data_write-assert.yaml b/charts/cluster/test/timescale-minio-backup-restore/06-post_backup_data_write-assert.yaml new file mode 100644 index 000000000..ad9be77a7 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/06-post_backup_data_write-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write-post-backup +status: + succeeded: 1 diff --git a/charts/cluster/test/timescale-minio-backup-restore/06-post_backup_data_write.yaml b/charts/cluster/test/timescale-minio-backup-restore/06-post_backup_data_write.yaml new file mode 100644 index 000000000..8585b247d --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/06-post_backup_data_write.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write-post-backup +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-write + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: timescale-cluster-superuser + key: uri + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + psql "$DB_URI" -c "CREATE TABLE mybadtable (id serial PRIMARY KEY);" diff --git a/charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml b/charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml new file mode 100644 index 000000000..2b6b9651f --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: recovery-backup-pitr-cluster +status: + readyInstances: 2 diff --git a/charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml b/charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml new file mode 100644 index 000000000..7e9c38f55 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml @@ -0,0 +1,48 @@ +type: timescaledb +mode: recovery + +cluster: + instances: 2 + storage: + size: 256Mi + +recovery: + method: backup + backupName: "post-init-backup" + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/timescale/v1" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" + +backups: + enabled: true + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/timescale/v2" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" diff --git a/charts/cluster/test/timescale-minio-backup-restore/08-data_test-assert.yaml b/charts/cluster/test/timescale-minio-backup-restore/08-data_test-assert.yaml new file mode 100644 index 000000000..6f14d5f23 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/08-data_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-backup-pitr +status: + succeeded: 1 diff --git a/charts/cluster/test/timescale-minio-backup-restore/08-data_test.yaml b/charts/cluster/test/timescale-minio-backup-restore/08-data_test.yaml new file mode 100644 index 000000000..5fb4faf39 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/08-data_test.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-backup-pitr +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-test + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: recovery-backup-pitr-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + set -e + test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = $$mygoodtable$$)' --csv -q 2>/dev/null)" = "t" + echo "Good table exists" + test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = $$mybadtable$$)' --csv -q 2>/dev/null)" = "f" + echo "Bad table does not exist" diff --git a/charts/cluster/test/timescale-minio-backup-restore/chainsaw-test.yaml b/charts/cluster/test/timescale-minio-backup-restore/chainsaw-test.yaml new file mode 100644 index 000000000..496153398 --- /dev/null +++ b/charts/cluster/test/timescale-minio-backup-restore/chainsaw-test.yaml @@ -0,0 +1,129 @@ +## +# This test sets up a timescale cluster with MinIO backups and ensured that timescale extensions are installed and +# PITR recovery is enabled and working. +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: timescale +spec: + timeouts: + apply: 1s + assert: 5m + cleanup: 1m + steps: + - name: Clear the MinIO bucket + try: + - apply: + file: ./00-minio_cleanup.yaml + - assert: + file: ./00-minio_cleanup-assert.yaml + - name: Install a standalone timescale cluster + try: + - script: + content: | + kubectl -n $NAMESPACE create secret generic kube-root-ca.crt --from-literal=ca.crt="$(kubectl -n kube-system get configmaps kube-root-ca.crt -o jsonpath='{.data.ca\.crt}')" --dry-run=client -o yaml | kubectl apply -f - + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./01-timescale_cluster.yaml \ + --wait \ + timescale ../../ + - assert: + file: ./01-timescale_cluster-assert.yaml + catch: + - describe: + apiVersion: postgresql.cnpg.io/v1 + kind: Cluster + - podLogs: + selector: cnpg.io/cluster=timescale-cluster + - name: Verify timescale extensions are installed + timeouts: + apply: 1s + assert: 30s + try: + - apply: + file: 03-timescale_test.yaml + - assert: + file: 03-timescale_test-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=data-test + - name: Write some data to the cluster + timeouts: + apply: 1s + assert: 30s + try: + - apply: + file: 04-data_write.yaml + - assert: + file: 04-data_write-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=data-test + - name: Create a backup + try: + - apply: + file: ./05-backup.yaml + - assert: + file: ./05-backup_running-assert.yaml + - apply: + file: ./05-checkpoint.yaml + - assert: + file: ./05-backup_completed-assert.yaml + - name: Write more data to the database after the backup + try: + - apply: + file: ./06-post_backup_data_write.yaml + - assert: + file: ./06-post_backup_data_write-assert.yaml + timeouts: + apply: 1s + assert: 10m + catch: + - describe: + apiVersion: postgresql.cnpg.io/v1 + kind: Backup + - name: Create a recovery cluster from backup with a PITR target + try: + - script: + content: | + DATE_NO_BAD_TABLE=$(kubectl -n $NAMESPACE get configmap date-no-bad-table -o 'jsonpath={.data.date}') + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./07-recovery_backup_pitr_cluster.yaml \ + --set recovery.pitrTarget.time="$DATE_NO_BAD_TABLE" \ + --wait \ + recovery-backup-pitr ../../ + - assert: + file: ./07-recovery_backup_pitr_cluster-assert.yaml + catch: + - describe: + apiVersion: postgresql.cnpg.io/v1 + kind: Cluster + - podLogs: + selector: cnpg.io/cluster=recovery-backup-pitr-cluster + - name: Verify the pre-backup data on the recovery cluster exists but not the post-backup data + try: + - apply: + file: 08-data_test.yaml + - assert: + file: 08-data_test-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + selector: batch.kubernetes.io/job-name=data-test-backup-pitr + - podLogs: + selector: batch.kubernetes.io/job-name=data-test-backup-pitr + - name: Cleanup + try: + - script: + content: | + helm uninstall --namespace $NAMESPACE timescale diff --git a/charts/cluster/values.schema.json b/charts/cluster/values.schema.json index 46c874b1d..81899f1cd 100644 --- a/charts/cluster/values.schema.json +++ b/charts/cluster/values.schema.json @@ -137,6 +137,17 @@ } } }, + "secret": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } + }, "wal": { "type": "object", "properties": { @@ -176,6 +187,9 @@ "enableSuperuserAccess": { "type": "boolean" }, + "imageCatalogRef": { + "type": "object" + }, "imageName": { "type": "string" }, @@ -200,6 +214,12 @@ "customQueries": { "type": "array" }, + "customQueriesSecret": { + "type": "array" + }, + "disableDefaultQueries": { + "type": "boolean" + }, "enabled": { "type": "boolean" }, @@ -208,6 +228,12 @@ "properties": { "enabled": { "type": "boolean" + }, + "metricRelabelings": { + "type": "array" + }, + "relabelings": { + "type": "array" } } }, @@ -231,7 +257,21 @@ "type": "integer" }, "postgresql": { - "type": "object" + "type": "object", + "properties": { + "parameters": { + "type": "object" + }, + "pg_hba": { + "type": "array" + }, + "pg_ident": { + "type": "array" + }, + "shared_preload_libraries": { + "type": "array" + } + } }, "primaryUpdateMethod": { "type": "string" @@ -261,65 +301,46 @@ }, "superuserSecret": { "type": "string" - } - } - }, - "fullnameOverride": { - "type": "string" - }, - "mode": { - "type": "string" - }, - "nameOverride": { - "type": "string" - }, - "pooler": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "instances": { - "type": "integer" }, - "monitoring": { + "walStorage": { "type": "object", "properties": { "enabled": { "type": "boolean" }, - "podMonitor": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - } - } - } - } - }, - "parameters": { - "type": "object", - "properties": { - "default_pool_size": { + "size": { "type": "string" }, - "max_client_conn": { + "storageClass": { "type": "string" } } + } + } + }, + "fullnameOverride": { + "type": "string" + }, + "imageCatalog": { + "type": "object", + "properties": { + "create": { + "type": "boolean" }, - "poolMode": { - "type": "string" - }, - "template": { - "type": "object" - }, - "type": { - "type": "string" + "images": { + "type": "array" } } }, + "mode": { + "type": "string" + }, + "nameOverride": { + "type": "string" + }, + "poolers": { + "type": "array" + }, "recovery": { "type": "object", "properties": { @@ -401,6 +422,90 @@ "method": { "type": "string" }, + "pgBaseBackup": { + "type": "object", + "properties": { + "database": { + "type": "string" + }, + "owner": { + "type": "string" + }, + "secret": { + "type": "string" + }, + "source": { + "type": "object", + "properties": { + "database": { + "type": "string" + }, + "host": { + "type": "string" + }, + "passwordSecret": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "port": { + "type": "integer" + }, + "sslCertSecret": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "sslKeySecret": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "sslMode": { + "type": "string" + }, + "sslRootCertSecret": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "username": { + "type": "string" + } + } + } + } + }, "pitrTarget": { "type": "object", "properties": { @@ -431,11 +536,36 @@ "type": "string" } } + }, + "secret": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } } } }, "type": { "type": "string" + }, + "version": { + "type": "object", + "properties": { + "postgis": { + "type": "string" + }, + "postgresql": { + "type": "string" + }, + "timescaledb": { + "type": "string" + } + } } } } diff --git a/charts/cluster/values.yaml b/charts/cluster/values.yaml index 7dad93eea..d45e5c1e9 100644 --- a/charts/cluster/values.yaml +++ b/charts/cluster/values.yaml @@ -7,8 +7,17 @@ fullnameOverride: "" # -- Type of the CNPG database. Available types: # * `postgresql` # * `postgis` +# * `timescaledb` type: postgresql +version: + # -- PostgreSQL major version to use + postgresql: "16" + # -- If using TimescaleDB, specify the version + timescaledb: "2.15" + # -- If using PostGIS, specify the version + postgis: "3.4" + ### # -- Cluster mode of operation. Available modes: # * `standalone` - default mode. Creates new or updates an existing CNPG cluster. @@ -75,6 +84,44 @@ recovery: bucket: "" gkeEnvironment: false applicationCredentials: "" + secret: + # -- Whether to create a secret for the backup credentials + create: true + # -- Name of the backup credentials secret + name: "" + + # See https://cloudnative-pg.io/documentation/1.22/bootstrap/#bootstrap-from-a-live-cluster-pg_basebackup + pgBaseBackup: + # -- Name of the database used by the application. Default: `app`. + database: app + # -- Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. + secret: "" + # -- Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch + owner: "" + source: + host: "" + port: 5432 + username: "" + database: "app" + sslMode: "verify-full" + passwordSecret: + # -- Whether to create a secret for the password + create: false + # -- Name of the secret containing the password + name: "" + # -- The key in the secret containing the password + key: "password" + # -- The password value to use when creating the secret + value: "" + sslKeySecret: + name: "" + key: "" + sslCertSecret: + name: "" + key: "" + sslRootCertSecret: + name: "" + key: "" cluster: @@ -85,6 +132,11 @@ cluster: # :@sha256: imageName: "" # Default value depends on type (postgresql/postgis/timescaledb) + # -- Reference to `ImageCatalog` of `ClusterImageCatalog`, if specified takes precedence over `cluster.imageName` + imageCatalogRef: {} + # kind: ImageCatalog + # name: postgresql + # -- Image pull policy. One of Always, Never or IfNotPresent. If not defined, it defaults to IfNotPresent. Cannot be updated. # More info: https://kubernetes.io/docs/concepts/containers/images#updating-images imagePullPolicy: IfNotPresent @@ -97,11 +149,16 @@ cluster: size: 8Gi storageClass: "" + walStorage: + enabled: false + size: 1Gi + storageClass: "" + # -- The UID of the postgres user inside the image, defaults to 26 - postgresUID: 26 + postgresUID: -1 # -- The GID of the postgres user inside the image, defaults to 26 - postgresGID: 26 + postgresGID: -1 # -- Resources requirements of every generated Pod. # Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. @@ -118,7 +175,7 @@ cluster: priorityClassName: "" # -- Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been - # successfully updated. It can be switchover (default) or in-place (restart). + # successfully updated. It can be switchover (default) or restart. primaryUpdateMethod: switchover # -- Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been @@ -163,28 +220,51 @@ cluster: podMonitor: # -- Whether to enable the PodMonitor enabled: true + # --The list of relabelings for the PodMonitor. + # Applied to samples before scraping. + relabelings: [] + # -- The list of metric relabelings for the PodMonitor. + # Applied to samples before ingestion. + metricRelabelings: [] prometheusRule: # -- Whether to enable the PrometheusRule automated alerts enabled: true # -- Exclude specified rules excludeRules: [] # - CNPGClusterZoneSpreadWarning + # -- Whether the default queries should be injected. + # Set it to true if you don't want to inject default queries into the cluster. + disableDefaultQueries: false # -- Custom Prometheus metrics + # Will be stored in the ConfigMap customQueries: [] - # - name: "pg_cache_hit_ratio" - # query: "SELECT current_database() as datname, sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) as ratio FROM pg_statio_user_tables;" - # metrics: - # - datname: - # usage: "LABEL" - # description: "Name of the database" - # - ratio: - # usage: GAUGE - # description: "Cache hit ratio" - - # -- Configuration of the PostgreSQL server. - # See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PostgresConfiguration - postgresql: {} - # max_connections: 300 + # - name: "pg_cache_hit_ratio" + # query: "SELECT current_database() as datname, sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) as ratio FROM pg_statio_user_tables;" + # metrics: + # - datname: + # usage: "LABEL" + # description: "Name of the database" + # - ratio: + # usage: GAUGE + # description: "Cache hit ratio" + # -- The list of secrets containing the custom queries + customQueriesSecret: [] + # - name: custom-queries-secret + # key: custom-queries + + postgresql: + # -- PostgreSQL configuration options (postgresql.conf) + parameters: {} + # max_connections: 300 + # -- PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) + pg_hba: [] + # - host all all 10.244.0.0/16 md5 + # -- PostgreSQL User Name Maps rules (lines to be appended to the pg_ident.conf file) + pg_ident: [] + # - mymap /^(.*)@mydomain\.com$ \1 + # -- Lists of shared preload libraries to add to the default ones + shared_preload_libraries: [] + # - pgaudit # -- BootstrapInitDB is the configuration of the bootstrap process when initdb is used. # See: https://cloudnative-pg.io/documentation/current/bootstrap/ @@ -192,9 +272,14 @@ cluster: initdb: {} # database: app # owner: "" # Defaults to the database name - # secret: "" # Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch + # secret: + # name: "" # Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch + # options: [] + # encoding: UTF8 # postInitSQL: # - CREATE EXTENSION IF NOT EXISTS vector; + # postInitApplicationSQL: [] + # postInitTemplateSQL: [] additionalLabels: {} annotations: {} @@ -242,6 +327,11 @@ backups: bucket: "" gkeEnvironment: false applicationCredentials: "" + secret: + # -- Whether to create a secret for the backup credentials + create: true + # -- Name of the backup credentials secret + name: "" wal: # -- WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. @@ -272,28 +362,57 @@ backups: # -- Retention policy for backups retentionPolicy: "30d" - -pooler: - # -- Whether to enable PgBouncer - enabled: false - # -- PgBouncer type of service to forward traffic to. - type: rw - # -- PgBouncer pooling mode - poolMode: transaction - # -- Number of PgBouncer instances - instances: 3 - # -- PgBouncer configuration parameters - parameters: - max_client_conn: "1000" - default_pool_size: "25" - - monitoring: - # -- Whether to enable monitoring - enabled: false - podMonitor: - # -- Whether to enable the PodMonitor - enabled: true - - # -- Custom PgBouncer deployment template. - # Use to override image, specify resources, etc. - template: {} +imageCatalog: + # -- Whether to provision an image catalog. If imageCatalog.images is empty this option will be ignored. + create: true + # -- List of images to be provisioned in an image catalog. + images: [] + # - image: ghcr.io/your_repo/your_image:your_tag + # major: 16 + +# -- List of PgBouncer poolers +poolers: [] + # - + # # -- Pooler name + # name: rw + # # -- PgBouncer type of service to forward traffic to. + # type: rw + # # -- PgBouncer pooling mode + # poolMode: transaction + # # -- Number of PgBouncer instances + # instances: 3 + # # -- PgBouncer configuration parameters + # parameters: + # max_client_conn: "1000" + # default_pool_size: "25" + # monitoring: + # # -- Whether to enable monitoring + # enabled: false + # podMonitor: + # # -- Whether to enable the PodMonitor + # enabled: true + # # -- Custom PgBouncer deployment template. + # # Use to override image, specify resources, etc. + # template: {} + # - + # # -- Pooler name + # name: ro + # # -- PgBouncer type of service to forward traffic to. + # type: ro + # # -- PgBouncer pooling mode + # poolMode: transaction + # # -- Number of PgBouncer instances + # instances: 3 + # # -- PgBouncer configuration parameters + # parameters: + # max_client_conn: "1000" + # default_pool_size: "25" + # monitoring: + # # -- Whether to enable monitoring + # enabled: false + # podMonitor: + # # -- Whether to enable the PodMonitor + # enabled: true + # # -- Custom PgBouncer deployment template. + # # Use to override image, specify resources, etc. + # template: {}