diff --git a/.github/actions/deploy-operator/action.yml b/.github/actions/deploy-operator/action.yml new file mode 100644 index 000000000..a998f06eb --- /dev/null +++ b/.github/actions/deploy-operator/action.yml @@ -0,0 +1,10 @@ +name: Deploy the CNPG Operator +description: Deploys the CNPG Operator to a Kubernetes cluster +runs: + using: composite + steps: + - name: Deploy the operator + shell: bash + run: | + helm upgrade --install cnpg --namespace cnpg-system \ + --create-namespace charts/cloudnative-pg --wait diff --git a/.github/actions/setup-kind/action.yml b/.github/actions/setup-kind/action.yml new file mode 100644 index 000000000..bb88f85b9 --- /dev/null +++ b/.github/actions/setup-kind/action.yml @@ -0,0 +1,24 @@ +name: Setup Kind +description: Sets up a kind cluster and installs Helm and kubectl +outputs: + helm-path: + description: The path to the Helm binary + value: ${{ steps.helm.outputs.helm-path }} + kubectl-path: + description: The path to the kubectl binary + value: ${{ steps.kubectl.outputs.kubectl-path }} +runs: + using: composite + steps: + - id: helm + name: Set up Helm + uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5 + with: + version: v3.6.2 + + - id: kubectl + name: Install kubectl + uses: azure/setup-kubectl@901a10e89ea615cf61f57ac05cecdf23e7de06d8 # v3.2 + + - name: Create kind cluster + uses: helm/kind-action@dda0770415bac9fc20092cacbc54aa298604d140 # v1.8.0 diff --git a/.github/actions/verify-ready-instances/action.yml b/.github/actions/verify-ready-instances/action.yml new file mode 100644 index 000000000..7a848e7f3 --- /dev/null +++ b/.github/actions/verify-ready-instances/action.yml @@ -0,0 +1,32 @@ +name: Verifies that a CNPG cluster has a certain amount of ready instances +description: Verifies that a CNPG cluster has a certain amount of ready instances +inputs: + cluster-name: + description: The name of the cluster to verify + required: true + default: database-cluster + ready-instances: + description: The amount of ready instances to wait for + required: true + default: "3" + +runs: + using: composite + steps: + - name: Wait for cluster to become ready + shell: bash + run: | + ITER=0 + while true; do + if [[ $ITER -ge 300 ]]; then + echo "Cluster not ready" + exit 1 + fi + READY_INSTANCES=$(kubectl get cluster ${INPUT_CLUSTER_NAME} -o jsonpath='{.status.readyInstances}') + if [[ "$READY_INSTANCES" == ${INPUT_READY_INSTANCES} ]]; then + echo "Cluster up and running" + break + fi + sleep 1 + (( ++ITER )) + done diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml deleted file mode 100644 index ef39c8e83..000000000 --- a/.github/workflows/continuous-delivery.yml +++ /dev/null @@ -1,60 +0,0 @@ -name: continuous-delivery - -on: - pull_request: - branches-ignore: - - 'gh-pages' - -jobs: - install_deploy: - runs-on: ubuntu-22.04 - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - fetch-depth: 0 - - - name: Set up Helm - uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5 - with: - version: v3.6.2 - - - name: Create kind cluster - uses: helm/kind-action@dda0770415bac9fc20092cacbc54aa298604d140 # v1.8.0 - - - name: Deploy using helm chart - run: | - helm upgrade --install cnpg --namespace cnpg-system \ - --create-namespace charts/cloudnative-pg --wait - - - name: Install kubectl - uses: azure/setup-kubectl@901a10e89ea615cf61f57ac05cecdf23e7de06d8 # v3.2 - - - name: Deploy a cluster - run: | - cat < diff --git a/charts/cluster/.gitignore b/charts/cluster/.gitignore new file mode 100644 index 000000000..0742ed461 --- /dev/null +++ b/charts/cluster/.gitignore @@ -0,0 +1 @@ +examples/*.test.yaml \ No newline at end of file diff --git a/charts/cluster/.helmignore b/charts/cluster/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/cluster/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/cluster/Chart.yaml b/charts/cluster/Chart.yaml new file mode 100644 index 000000000..9cac5cbc9 --- /dev/null +++ b/charts/cluster/Chart.yaml @@ -0,0 +1,31 @@ +# +# Copyright The CloudNativePG Contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: v2 +name: cluster +description: Deploys and manages a CloudNativePG cluster and its associated resources. +icon: https://raw.githubusercontent.com/cloudnative-pg/artwork/main/cloudnativepg-logo.svg +type: application +version: 0.0.1 +sources: + - https://github.com/cloudnative-pg/charts +keywords: + - postgresql + - postgres + - database +home: https://cloudnative-pg.io +maintainers: + - name: itay-grudev + email: itay+cloudnativepg-charts+github.com@grudev.com diff --git a/charts/cluster/README.md b/charts/cluster/README.md new file mode 100644 index 000000000..d334c4389 --- /dev/null +++ b/charts/cluster/README.md @@ -0,0 +1,213 @@ +# cluster + +![Version: 0.0.1](https://img.shields.io/badge/Version-0.0.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) + +> **Warning** +> ### This chart is under active development. +> ### Advised caution when using in production! + +A note on the chart's purpose +----------------------------- + +This is an opinionated chart that is designed to provide a subset of simple, stable and safe configurations using the +CloudNativePG operator. It is designed to provide a simple way to perform recovery operations to decrease your RTO. + +It is not designed to be a one size fits all solution. If you need a more complicated setup we strongly recommend that +you either: + +* use the operator directly +* create your own chart +* use Kustomize to modify the chart's resources + +**_Note_** that the latter option carries it's own risks as the chart configuration may change, especially before it +reaches a stable release. + +That being said, we welcome PRs that improve the chart, but please keep in mind that we don't plan to support every +single configuration that the operator provides and we may reject PRs that add too much complexity and maintenance +difficulty to the chart. + +Getting Started +--------------- + +### Installing the Operator +Skip this step if the CNPG operator is already installed in your cluster. + +```console +helm repo add cnpg https://cloudnative-pg.github.io/charts +helm upgrade --install cnpg \ +--namespace cnpg-system \ +--create-namespace \ +cnpg/cloudnative-pg +``` + +### Setting up a CNPG Cluster + +```console +helm repo add cnpg https://cloudnative-pg.github.io/charts +helm upgrade --install cnpg \ +--namespace cnpg-database \ +--create-namespace \ +--values values.yaml \ +cnpg/cluster +``` + +A more detailed guide can be found here: [Getting Started](docs/Getting Started.md) + +Cluster Configuration +--------------------- + +### Database types + +Currently the chart supports two database types. These are configured via the `type` parameter. These are: +* `postgresql` - A standard PostgreSQL database. +* `postgis` - A PostgreSQL database with the PostGIS extension installed. + +Depending on the type the chart will use a different Docker image and fill in some initial setup, like extension installation. + +### Modes of operation + +The chart has three modes of operation. These are configured via the `mode` parameter: +* `standalone` - Creates new or updates an existing CNPG cluster. This is the default mode. +* `replica` - Creates a replica cluster from an existing CNPG cluster. **_Note_ that this mode is not yet supported.** +* `recovery` - Recovers a CNPG cluster from a backup, object store or via pg_basebackup. + +### Backup configuration + +CNPG implements disaster recovery via [Barman](https://pgbarman.org/). The following section configures the barman object +store where backups will be stored. Barman performs backups of the cluster filesystem base backup and WALs. Both are +stored in the specified location. The backup provider is configured via the `backups.provider` parameter. The following +providers are supported: + +* S3 or S3-compatible stores, like MinIO +* Microsoft Azure Blob Storage +* Google Cloud Storage + +Additionally you can specify the following parameters: +* `backups.retentionPolicy` - The retention policy for backups. Defaults to `30d`. +* `backups.scheduledBackups` - An array of scheduled backups containing a name and a crontab schedule. Example: +```yaml +backups: + scheduledBackups: + - name: daily-backup + schedule: "0 0 0 * * *" # Daily at midnight + backupOwnerReference: self +``` + +Each backup adapter takes it's own set of parameters, listed in the [Configuration options](#Configuration-options) section +below. Refer to the table for the full list of parameters and place the configuration under the appropriate key: `backup.s3`, +`backup.azure`, or `backup.google`. + +Recovery +-------- + +There is a separate document outlining the recovery procedure here: **[Recovery](docs/recovery.md)** + +Examples +-------- + +There are several configuration examples in the [examples](examples) directory. Refer to them for a basic setup and +refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentation/current/) for more advanced configurations. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| backups.azure.connectionString | string | `""` | | +| backups.azure.containerName | string | `""` | | +| backups.azure.inheritFromAzureAD | bool | `false` | | +| backups.azure.path | string | `"/"` | | +| backups.azure.serviceName | string | `"blob"` | | +| backups.azure.storageAccount | string | `""` | | +| backups.azure.storageKey | string | `""` | | +| backups.azure.storageSasToken | string | `""` | | +| backups.destinationPath | string | `""` | Overrides the provider specific default path. Defaults to: S3: s3:// Azure: https://..core.windows.net/ Google: gs:// | +| backups.enabled | bool | `false` | You need to configure backups manually, so backups are disabled by default. | +| backups.endpointURL | string | `""` | Overrides the provider specific default endpoint. Defaults to: S3: https://s3..amazonaws.com" | +| backups.google.applicationCredentials | string | `""` | | +| backups.google.bucket | string | `""` | | +| backups.google.gkeEnvironment | bool | `false` | | +| backups.google.path | string | `"/"` | | +| backups.provider | string | `"s3"` | One of `s3`, `azure` or `google` | +| backups.retentionPolicy | string | `"30d"` | Retention policy for backups | +| backups.s3.accessKey | string | `""` | | +| backups.s3.bucket | string | `""` | | +| backups.s3.path | string | `"/"` | | +| backups.s3.region | string | `""` | | +| backups.s3.secretKey | string | `""` | | +| backups.scheduledBackups[0].backupOwnerReference | string | `"self"` | Backup owner reference | +| backups.scheduledBackups[0].name | string | `"daily-backup"` | Scheduled backup name | +| backups.scheduledBackups[0].schedule | string | `"0 0 0 * * *"` | Schedule in cron format | +| cluster.additionalLabels | object | `{}` | | +| cluster.affinity | object | `{"topologyKey":"topology.kubernetes.io/zone"}` | Affinity/Anti-affinity rules for Pods See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-AffinityConfiguration | +| cluster.annotations | object | `{}` | | +| cluster.certificates | string | `nil` | The configuration for the CA and related certificates See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-CertificatesConfiguration | +| cluster.enableSuperuserAccess | bool | `true` | When this option is enabled, the operator will use the SuperuserSecret to update the postgres user password. If the secret is not present, the operator will automatically create one. When this option is disabled, the operator will ignore the SuperuserSecret content, delete it when automatically created, and then blank the password of the postgres user by setting it to NULL. | +| cluster.imageName | string | `""` | Name of the container image, supporting both tags (:) and digests for deterministic and repeatable deployments: :@sha256: | +| cluster.imagePullPolicy | string | `"IfNotPresent"` | Image pull policy. One of Always, Never or IfNotPresent. If not defined, it defaults to IfNotPresent. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images | +| cluster.imagePullSecrets | list | `[]` | The list of pull secrets to be used to pull the images See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-LocalObjectReference | +| cluster.initdb | object | `{}` | BootstrapInitDB is the configuration of the bootstrap process when initdb is used See: https://cloudnative-pg.io/documentation/current/bootstrap/ See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb | +| cluster.instances | int | `3` | Number of instances | +| cluster.logLevel | string | `"info"` | The instances' log level, one of the following values: error, warning, info (default), debug, trace | +| cluster.monitoring.customQueries | list | `[]` | | +| cluster.monitoring.enablePodMonitor | bool | `false` | | +| cluster.postgresql | string | `nil` | Configuration of the PostgreSQL server See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PostgresConfiguration | +| cluster.primaryUpdateMethod | string | `"switchover"` | Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated. It can be switchover (default) or in-place (restart). | +| cluster.primaryUpdateStrategy | string | `"unsupervised"` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (unsupervised - default) or manual (supervised) | +| cluster.priorityClassName | string | `""` | | +| cluster.resources | string | `nil` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. We strongly advise you use the same setting for limits and requests so that your cluster pods are given a Guaranteed QoS. See: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/ | +| cluster.storage.size | string | `"8Gi"` | | +| cluster.storage.storageClass | string | `""` | | +| cluster.superuserSecret | string | `""` | | +| fullnameOverride | string | `""` | Override the full name of the chart | +| mode | string | `"standalone"` | Cluster mode of operation. Available modes: * `standalone` - default mode. Creates new or updates an existing CNPG cluster. * `replica` - Creates a replica cluster from an existing CNPG cluster. # TODO * `recovery` - Same as standalone but creates a cluster from a backup, object store or via pg_basebackup. | +| nameOverride | string | `""` | Override the name of the chart | +| pooler.enabled | bool | `false` | Whether to enable PgBouncer | +| pooler.instances | int | `3` | Number of PgBouncer instances | +| pooler.parameters | object | `{"default_pool_size":"25","max_client_conn":"1000"}` | PgBouncer configuration parameters | +| pooler.poolMode | string | `"transaction"` | PgBouncer pooling mode | +| recovery.azure.connectionString | string | `""` | | +| recovery.azure.containerName | string | `""` | | +| recovery.azure.inheritFromAzureAD | bool | `false` | | +| recovery.azure.path | string | `"/"` | | +| recovery.azure.serviceName | string | `"blob"` | | +| recovery.azure.storageAccount | string | `""` | | +| recovery.azure.storageKey | string | `""` | | +| recovery.azure.storageSasToken | string | `""` | | +| recovery.backupName | string | `""` | Backup Recovery Method | +| recovery.clusterName | string | `""` | Object Store Recovery Method | +| recovery.destinationPath | string | `""` | Overrides the provider specific default path. Defaults to: S3: s3:// Azure: https://..core.windows.net/ Google: gs:// | +| recovery.endpointURL | string | `""` | Overrides the provider specific default endpoint. Defaults to: S3: https://s3..amazonaws.com" Leave empty if using the default S3 endpoint | +| recovery.google.applicationCredentials | string | `""` | | +| recovery.google.bucket | string | `""` | | +| recovery.google.gkeEnvironment | bool | `false` | | +| recovery.google.path | string | `"/"` | | +| recovery.method | string | `"backup"` | Available recovery methods: * `backup` - Recovers a CNPG cluster from a CNPG backup (PITR supported) Needs to be on the same cluster in the same namespace. * `object_store` - Recovers a CNPG cluster from a barman object store (PITR supported). * `pg_basebackup` - Recovers a CNPG cluster viaa streaming replication protocol. Useful if you want to migrate databases to CloudNativePG, even from outside Kubernetes. # TODO | +| recovery.pitrTarget.time | string | `""` | Time in RFC3339 format | +| recovery.provider | string | `"s3"` | One of `s3`, `azure` or `google` | +| recovery.s3.accessKey | string | `""` | | +| recovery.s3.bucket | string | `""` | | +| recovery.s3.path | string | `"/"` | | +| recovery.s3.region | string | `""` | | +| recovery.s3.secretKey | string | `""` | | +| type | string | `"postgresql"` | Type of the CNPG database. Available types: * `postgresql` * `postgis` | + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| itay-grudev | | | + +Features that require feedback +------------------------------ + +Please raise a ticket tested any of the following features and they have worked. +Alternatively a ticket and a PR if you have found that something needs a change to work properly. + +- [ ] Google Cloud Storage Backups +- [ ] Google Cloud Storage Recovery + +TODO +---- +* IAM Role for S3 Service Account +* Automatic provisioning of a Alert Manager configuration + diff --git a/charts/cluster/README.md.gotmpl b/charts/cluster/README.md.gotmpl new file mode 100644 index 000000000..956e1431e --- /dev/null +++ b/charts/cluster/README.md.gotmpl @@ -0,0 +1,147 @@ +{{ template "chart.header" . }} + + +{{ template "chart.deprecationWarning" . }} + + +{{ template "chart.badgesSection" . }} + + +> **Warning** +> ### This chart is under active development. +> ### Advised caution when using in production! + + +A note on the chart's purpose +----------------------------- + +This is an opinionated chart that is designed to provide a subset of simple, stable and safe configurations using the +CloudNativePG operator. It is designed to provide a simple way to perform recovery operations to decrease your RTO. + +It is not designed to be a one size fits all solution. If you need a more complicated setup we strongly recommend that +you either: + +* use the operator directly +* create your own chart +* use Kustomize to modify the chart's resources + +**_Note_** that the latter option carries it's own risks as the chart configuration may change, especially before it +reaches a stable release. + +That being said, we welcome PRs that improve the chart, but please keep in mind that we don't plan to support every +single configuration that the operator provides and we may reject PRs that add too much complexity and maintenance +difficulty to the chart. + + +Getting Started +--------------- + +### Installing the Operator +Skip this step if the CNPG operator is already installed in your cluster. + +```console +helm repo add cnpg https://cloudnative-pg.github.io/charts +helm upgrade --install cnpg \ +--namespace cnpg-system \ +--create-namespace \ +cnpg/cloudnative-pg +``` + +### Setting up a CNPG Cluster + +```console +helm repo add cnpg https://cloudnative-pg.github.io/charts +helm upgrade --install cnpg \ +--namespace cnpg-database \ +--create-namespace \ +--values values.yaml \ +cnpg/cluster +``` + +A more detailed guide can be found here: [Getting Started](docs/Getting Started.md) + + +Cluster Configuration +--------------------- + +### Database types + +Currently the chart supports two database types. These are configured via the `type` parameter. These are: +* `postgresql` - A standard PostgreSQL database. +* `postgis` - A PostgreSQL database with the PostGIS extension installed. + +Depending on the type the chart will use a different Docker image and fill in some initial setup, like extension installation. + +### Modes of operation + +The chart has three modes of operation. These are configured via the `mode` parameter: +* `standalone` - Creates new or updates an existing CNPG cluster. This is the default mode. +* `replica` - Creates a replica cluster from an existing CNPG cluster. **_Note_ that this mode is not yet supported.** +* `recovery` - Recovers a CNPG cluster from a backup, object store or via pg_basebackup. + +### Backup configuration + +CNPG implements disaster recovery via [Barman](https://pgbarman.org/). The following section configures the barman object +store where backups will be stored. Barman performs backups of the cluster filesystem base backup and WALs. Both are +stored in the specified location. The backup provider is configured via the `backups.provider` parameter. The following +providers are supported: + +* S3 or S3-compatible stores, like MinIO +* Microsoft Azure Blob Storage +* Google Cloud Storage + +Additionally you can specify the following parameters: +* `backups.retentionPolicy` - The retention policy for backups. Defaults to `30d`. +* `backups.scheduledBackups` - An array of scheduled backups containing a name and a crontab schedule. Example: +```yaml +backups: + scheduledBackups: + - name: daily-backup + schedule: "0 0 0 * * *" # Daily at midnight + backupOwnerReference: self +``` + +Each backup adapter takes it's own set of parameters, listed in the [Configuration options](#Configuration-options) section +below. Refer to the table for the full list of parameters and place the configuration under the appropriate key: `backup.s3`, +`backup.azure`, or `backup.google`. + + +Recovery +-------- + +There is a separate document outlining the recovery procedure here: **[Recovery](docs/recovery.md)** + + +Examples +-------- + +There are several configuration examples in the [examples](examples) directory. Refer to them for a basic setup and +refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentation/current/) for more advanced configurations. + + +{{ template "chart.requirementsSection" . }} + + +{{ template "chart.valuesSection" . }} + + +{{ template "chart.maintainersSection" . }} + + +Features that require feedback +------------------------------ + +Please raise a ticket tested any of the following features and they have worked. +Alternatively a ticket and a PR if you have found that something needs a change to work properly. + +- [ ] Google Cloud Storage Backups +- [ ] Google Cloud Storage Recovery + + +TODO +---- +* IAM Role for S3 Service Account +* Automatic provisioning of a Alert Manager configuration + + +{{ template "helm-docs.versionFooter" . }} diff --git a/charts/cluster/docs/Getting Started.md b/charts/cluster/docs/Getting Started.md new file mode 100644 index 000000000..54dad419d --- /dev/null +++ b/charts/cluster/docs/Getting Started.md @@ -0,0 +1,106 @@ +# Getting Started + +The CNPG cluster chart follows a convention over configuration approach. This means that the chart will create a reasonable +CNPG setup with sensible defaults. However, you can override these defaults to create a more customized setup. Note that +you still need to configure backups and monitoring separately. The chart will not install a Prometheus stack for you. + +_**Note,**_ that this is an opinionated chart. It does not support all configuration options that CNPG supports. If you +need a highly customized setup, you should manage your cluster via a Kubernetes CNPG cluster manifest instead of this chart. +Refer to the [CNPG documentation](https://cloudnative-pg.io/documentation/current/) in that case. + +## Installing the operator + +To begin, make sure you install the CNPG operator in you cluster. It can be installed via a Helm chart as shown below or +ir can be installed via a Kubernetes manifest. For more information see the [CNPG documentation](https://cloudnative-pg.io/documentation/current/installation_upgrade/). + +```console +helm repo add cnpg https://cloudnative-pg.github.io/charts +helm upgrade --install cnpg \ + --namespace cnpg-system \ + --create-namespace \ + cnpg/cloudnative-pg +``` + +## Creating a cluster configuration + +Once you have the operator installed, the next step is to prepare the cluster configuration. Whether this will be manged +via a GitOps solution or directly via Helm is up to you. The following sections outlines the important steps in both cases. + +### Choosing the database type + +Currently the chart supports two database types. These are configured via the `type` parameter. These are: +* `postgresql` - A standard PostgreSQL database. +* `postgis` - A PostgreSQL database with the PostGIS extension installed. + +Depending on the type the chart will use a different Docker image and fill in some initial setup, like extension installation. + +### Choosing the mode of operation + +The chart has three modes of operation. These are configured via the `mode` parameter. If this is your first cluster, you +are likely looking for the `standalone` option. +* `standalone` - Creates new or updates an existing CNPG cluster. This is the default mode. +* `replica` - Creates a replica cluster from an existing CNPG cluster. **_Note_ that this mode is not yet supported.** +* `recovery` - Recovers a CNPG cluster from a backup, object store or via pg_basebackup. + +### Backup configuration + +Most importantly you should configure your backup storage. + +CNPG implements disaster recovery via [Barman](https://pgbarman.org/). The following section configures the barman object +store where backups will be stored. Barman performs backups of the cluster filesystem base backup and WALs. Both are +stored in the specified location. The backup provider is configured via the `backups.provider` parameter. The following +providers are supported: + +* S3 or S3-compatible stores, like MinIO +* Microsoft Azure Blob Storage +* Google Cloud Storage + +Additionally you can specify the following parameters: +* `backups.retentionPolicy` - The retention policy for backups. Defaults to `30d`. +* `backups.scheduledBackups` - An array of scheduled backups containing a name and a crontab schedule. Example: + ```yaml + backups: + scheduledBackups: + - name: daily-backup + schedule: "0 0 0 * * *" # Daily at midnight + backupOwnerReference: self + ``` + +Each backup adapter takes it's own set of parameters, listed in the [Configuration options](../README.md#Configuration-options) section +below. Refer to the table for the full list of parameters and place the configuration under the appropriate key: `backup.s3`, +`backup.azure`, or `backup.google`. + +### Cluster configuration + +There are several important cluster options. Here are the most important ones: + +`cluster.instances` - The number of instances in the cluster. Defaults to `1`, but you should set this to `3` for production. +`cluster.imageName` - This allows you to override the Docker image used for the cluster. The chart will choose a default + for you based on the setting you chose for `type`. If you need to run a configuration that is not supported, you can + create your own Docker image. You can use the [postgres-containers](https://github.com/cloudnative-pg/postgres-containers) + repository for a starting point. + You will likely need to set your own repository access credentials via: `cluster.imagePullPolicy` and `cluster.imagePullSecrets`. +`cluster.storage.size` - The size of the persistent volume claim for the cluster. Defaults to `8Gi`. Every instance will + have it's own persistent volume claim. +`cluster.storage.storageClass` - The storage class to use for the persistent volume claim. +`cluster.resources` - The resource limits and requests for the cluster. You are strongly advised to use the same values + for both limits and requests to ensure a [Guaranteed QoS](https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#guaranteed). +`cluster.affinity.topologyKey` - The chart sets it to `topology.kubernetes.io/zone` by default which is useful if you are + running a production cluster in a multi AZ cluster (highly recommended). If you are running a single AZ cluster, you may + want to change that to `kubernetes.io/hostname` to ensure that cluster instances are not provisioned on the same node. +`cluster.postgresql` - Allows you to override PostgreSQL configuration parameters example: + ```yaml + cluster: + postgresql: + max_connections: "200" + shared_buffers: "2GB" + ``` +`cluster.initSQL` - Allows you to run custom SQL queries during the cluster initialization. This is useful for creating +extensions, schemas and databases. Note that these are as a superuser. + +For a full list - refer to the Helm chart [configuration options](../README.md#Configuration-options). + +## Examples + +There are several configuration examples in the [examples](../examples) directory. Refer to them for a basic setup and +refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentation/current/) for more advanced configurations. diff --git a/charts/cluster/docs/Recovery.md b/charts/cluster/docs/Recovery.md new file mode 100644 index 000000000..6a1be6593 --- /dev/null +++ b/charts/cluster/docs/Recovery.md @@ -0,0 +1,27 @@ +Recovery +======== + +This chart can be used to initiate a recovery operation of a CNPG cluster no matter if it was created with the chart or not. + +CNPG does not support recovery in-place. Instead you need to create a new cluster that will be bootstrapped from the existing one or from a backup. + +You can find more information about the recovery process in the [CNPG documentation](https://cloudnative-pg.io/documentation/current/backup_recovery). + +There are 3 types of recovery possible with CNPG: +* Recovery from a backup object in the same Kubernetes namespace. +* Recovery from a Barman Object Store, that could be located anywhere. +* Streaming replication from an operating cluster using `pg_basebackup` (not supported by the chart yet). + +When performing a recovery you are strongly advised to use the same configuration and PostgreSQL version as the original cluster. + +To begin, create a `values.yaml` that contains the following: + +1. Set `mode: recovery` to indicate that you want to perform bootstrap the new cluster from an existing one. +2. Set the `recovery.method` to the type of recovery you want to perform. +3. Set either the `recovery.backupName` or the Barman Object Store configuration - i.e. `recovery.provider` and appropriate S3, Azure or GCS configuration. +4. Optionally set the `recovery.pitrTarget.time` in RFC3339 format to perform a point-in-time recovery. +4. Retain the identical PostgreSQL version and configuration as the original cluster. +5. Make sure you don't use the same backup section name as the original cluster. We advise you change the `path` within the storage location if you want to reuse the same storage location/bucket. + One pattern is adding a version number at the end of the path, e.g. `/v1` or `/v2` after each recovery procedure. + +Example recovery configurations can be found in the [examples](../examples) directory. diff --git a/charts/cluster/examples/basic.yaml b/charts/cluster/examples/basic.yaml new file mode 100644 index 000000000..5b608c267 --- /dev/null +++ b/charts/cluster/examples/basic.yaml @@ -0,0 +1,5 @@ +mode: standalone +cluster: + instances: 1 +backups: + enabled: false diff --git a/charts/cluster/examples/custom-queries.yaml b/charts/cluster/examples/custom-queries.yaml new file mode 100644 index 000000000..1e6ef16f6 --- /dev/null +++ b/charts/cluster/examples/custom-queries.yaml @@ -0,0 +1,23 @@ +type: postgresql +mode: standalone + +cluster: + instances: 1 + monitoring: + customQueries: + - name: "pg_cache_hit" + query: | + SELECT + current_database() as datname, + sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) as ratio + FROM pg_statio_user_tables; + metrics: + - datname: + usage: "LABEL" + description: "Name of the database database" + - ratio: + usage: GAUGE + description: "Cache hit ratio" + +backups: + enabled: false \ No newline at end of file diff --git a/charts/cluster/examples/postgis.yaml b/charts/cluster/examples/postgis.yaml new file mode 100644 index 000000000..6c686dc62 --- /dev/null +++ b/charts/cluster/examples/postgis.yaml @@ -0,0 +1,6 @@ +type: postgis +mode: standalone +cluster: + instances: 1 +backups: + enabled: false \ No newline at end of file diff --git a/charts/cluster/examples/recovery-backup.yaml b/charts/cluster/examples/recovery-backup.yaml new file mode 100644 index 000000000..d11187f5c --- /dev/null +++ b/charts/cluster/examples/recovery-backup.yaml @@ -0,0 +1,22 @@ +mode: recovery + +recovery: + method: backup + backupName: "database-clustermarket-database-daily-backup-1683244800" + +cluster: + instances: 1 + +backups: + provider: s3 + s3: + region: "eu-west-1" + bucket: "db-backups" + path: "/v1-restore" + accessKey: "AWS_S3_ACCESS_KEY" + secretKey: "AWS_S3_SECRET_KEY" + scheduledBackups: + - name: daily-backup # Daily at midnight + schedule: "0 0 0 * * *" # Daily at midnight + backupOwnerReference: self + retentionPolicy: "30d" \ No newline at end of file diff --git a/charts/cluster/examples/recovery-object_store.yaml b/charts/cluster/examples/recovery-object_store.yaml new file mode 100644 index 000000000..92722a159 --- /dev/null +++ b/charts/cluster/examples/recovery-object_store.yaml @@ -0,0 +1,30 @@ +mode: recovery + +recovery: + method: object_store + serverName: "cluster-name-to-recover-from" + provider: s3 + s3: + region: "eu-west-1" + bucket: "db-backups" + path: "/v1-restore" + accessKey: "AWS_S3_ACCESS_KEY" + secretKey: "AWS_S3_SECRET_KEY" + +cluster: + instances: 1 + +backups: + endpointURL: "https://cm-db-chart-test.ams3.digitaloceanspaces.com" + provider: s3 + s3: + region: "eu-west-1" + bucket: "db-backups" + path: "/v1-restore" + accessKey: "AWS_S3_ACCESS_KEY" + secretKey: "AWS_S3_SECRET_KEY" + scheduledBackups: + - name: daily-backup # Daily at midnight + schedule: "0 0 0 * * *" # Daily at midnight + backupOwnerReference: self + retentionPolicy: "30d" \ No newline at end of file diff --git a/charts/cluster/examples/standalone-s3.yaml b/charts/cluster/examples/standalone-s3.yaml new file mode 100644 index 000000000..bf1794d06 --- /dev/null +++ b/charts/cluster/examples/standalone-s3.yaml @@ -0,0 +1,19 @@ +mode: standalone + +cluster: + instances: 1 + +backups: + enabled: true + provider: s3 + s3: + region: "eu-west-1" + bucket: "db-backups" + path: "/v1" + accessKey: "AWS_S3_ACCESS_KEY" + secretKey: "AWS_S3_SECRET_KEY" + scheduledBackups: + - name: daily-backup # Daily at midnight + schedule: "0 0 0 * * *" # Daily at midnight + backupOwnerReference: self + retentionPolicy: "30d" diff --git a/charts/cluster/templates/NOTES.txt b/charts/cluster/templates/NOTES.txt new file mode 100644 index 000000000..28c0e6172 --- /dev/null +++ b/charts/cluster/templates/NOTES.txt @@ -0,0 +1,68 @@ +{{ if .Release.IsInstall }} +The {{ include "cluster.color-info" (include "cluster.fullname" .) }} has been installed successfully. +{{ else if .Release.IsUpgrade }} +The {{ include "cluster.color-info" (include "cluster.fullname" .) }} has been upgraded successfully. +{{ end }} + + ██████ ██ ██ ████ ██ ██ ██ ███████ ████████ + ██░░░░██ ░██ ░██░██░██ ░██ ░██ ░░ ░██░░░░██ ██░░░░░░██ + ██ ░░ ░██ ██████ ██ ██ ░██░██░░██ ░██ ██████ ██████ ██ ██ ██ █████ ░██ ░██ ██ ░░ +░██ ░██ ██░░░░██░██ ░██ ██████░██ ░░██ ░██ ░░░░░░██ ░░░██░ ░██░██ ░██ ██░░░██░███████ ░██ +░██ ░██░██ ░██░██ ░██ ██░░░██░██ ░░██░██ ███████ ░██ ░██░░██ ░██ ░███████░██░░░░ ░██ █████ +░░██ ██ ░██░██ ░██░██ ░██░██ ░██░██ ░░████ ██░░░░██ ░██ ░██ ░░████ ░██░░░░ ░██ ░░██ ░░░░██ + ░░██████ ███░░██████ ░░██████░░██████░██ ░░███░░████████ ░░██ ░██ ░░██ ░░██████░██ ░░████████ + ░░░░░░ ░░░ ░░░░░░ ░░░░░░ ░░░░░░ ░░ ░░░ ░░░░░░░░ ░░ ░░ ░░ ░░░░░░ ░░ ░░░░░░░░ + +Cheatsheet +---------- + +Run Helm Tests: +{{ include "cluster.color-info" (printf "helm test --namespace %s %s" .Release.Namespace .Release.Name) }} + +Get a list of all base backups: +{{ include "cluster.color-info" (printf "kubectl --namespace %s get backups --selector cnpg.io/cluster=%s" .Release.Namespace (include "cluster.fullname" .)) }} + +Connect to the cluster's primary instance: +{{ include "cluster.color-info" (printf "kubectl --namespace %s exec --stdin --tty services/%s-rw -- bash" .Release.Namespace (include "cluster.fullname" .)) }} + +Configuration +------------- + +{{- $redundancyColor := "" -}} +{{- if lt (int .Values.cluster.instances) 2 }} + {{- $redundancyColor = "error" -}} +{{- else if lt (int .Values.cluster.instances) 3 -}} + {{- $redundancyColor = "warning" -}} +{{- else -}} + {{- $redundancyColor = "ok" -}} +{{- end }} + +{{ $scheduledBackups := (first .Values.backups.scheduledBackups).name }} +{{- range (rest .Values.backups.scheduledBackups) -}} + {{ $scheduledBackups = printf "%s, %s" $scheduledBackups .name }} +{{- end -}} + +╭───────────────────┬────────────────────────────────────────────╮ +│ Configuration │ Value │ +┝━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┥ +│ Cluster mode │ {{ (printf "%-42s" .Values.mode) }} │ +│ Type │ {{ (printf "%-42s" .Values.type) }} │ +│ Image │ {{ include "cluster.color-info" (printf "%-42s" (include "cluster.imageName" .)) }} │ +│ Instances │ {{ include (printf "%s%s" "cluster.color-" $redundancyColor) (printf "%-42s" (toString .Values.cluster.instances)) }} │ +│ Backups │ {{ include (printf "%s%s" "cluster.color-" (ternary "ok" "error" .Values.backups.enabled)) (printf "%-42s" (ternary "Enabled" "Disabled" .Values.backups.enabled)) }} │ +│ Backup Provider │ {{ (printf "%-42s" (title .Values.backups.provider)) }} │ +│ Scheduled Backups │ {{ (printf "%-42s" $scheduledBackups) }} │ +│ Storage │ {{ (printf "%-42s" .Values.cluster.storage.size) }} │ +│ Storage Class │ {{ (printf "%-42s" (default "Default" .Values.cluster.storage.storageClass)) }} │ +│ PGBouncer │ {{ (printf "%-42s" (ternary "Enabled" "Disabled" .Values.pooler.enabled)) }} │ +╰───────────────────┴────────────────────────────────────────────╯ + +{{ if not .Values.backups.enabled }} + {{- include "cluster.color-error" "Warning! Backups not enabled. Recovery will not be possible! Do not use this configuration in production.\n" }} +{{ end -}} + +{{ if lt (int .Values.cluster.instances) 2 }} + {{- include "cluster.color-error" "Warning! Instance failure will lead to downtime and/or data loss!\n" }} +{{- else if lt (int .Values.cluster.instances) 3 -}} + {{- include "cluster.color-warning" "Warning! Single instance redundancy available only. Instance failure will put the cluster at risk.\n" }} +{{ end -}} diff --git a/charts/cluster/templates/_backup.tpl b/charts/cluster/templates/_backup.tpl new file mode 100644 index 000000000..cb76d9b74 --- /dev/null +++ b/charts/cluster/templates/_backup.tpl @@ -0,0 +1,18 @@ +{{- define "cluster.backup" -}} +backup: +{{- if .Values.backups.enabled }} + target: "prefer-standby" + retentionPolicy: {{ .Values.backups.retentionPolicy }} + barmanObjectStore: + wal: + compression: gzip + encryption: AES256 + data: + compression: gzip + encryption: AES256 + jobs: 2 + + {{- $d := dict "chartFullname" (include "cluster.fullname" .) "scope" .Values.backups }} + {{- include "cluster.barmanObjectStoreConfig" $d | nindent 2 }} +{{- end }} +{{- end }} diff --git a/charts/cluster/templates/_barman_object_store.tpl b/charts/cluster/templates/_barman_object_store.tpl new file mode 100644 index 000000000..96278f11a --- /dev/null +++ b/charts/cluster/templates/_barman_object_store.tpl @@ -0,0 +1,58 @@ +{{- define "cluster.barmanObjectStoreConfig" -}} + +{{- if .scope.endpointURL }} + endpointURL: {{ .scope.endpointURL }} +{{- end }} + +{{- if .scope.destinationPath }} + destinationPath: {{ .scope.destinationPath }} +{{- end }} + +{{- if eq .scope.provider "s3" }} + {{- if empty .scope.endpointURL }} + endpointURL: "https://s3.{{ required "You need to specify S3 region if endpointURL is not specified." .scope.s3.region }}.amazonaws.com" + {{- end }} + {{- if empty .scope.destinationPath }} + destinationPath: "s3://{{ required "You need to specify S3 bucket if destinationPath is not specified." .scope.s3.bucket }}{{ .scope.s3.path }}" + {{- end }} + s3Credentials: + accessKeyId: + name: {{ .chartFullname }}-backup-s3{{ .secretSuffix }}-creds + key: ACCESS_KEY_ID + secretAccessKey: + name: {{ .chartFullname }}-backup-s3{{ .secretSuffix }}-creds + key: ACCESS_SECRET_KEY +{{- else if eq .scope.provider "azure" }} + {{- if empty .scope.destinationPath }} + destinationPath: "https://{{ required "You need to specify Azure storageAccount if destinationPath is not specified." .scope.azure.storageAccount }}.{{ .scope.azure.serviceName }}.core.windows.net/{{ .scope.azure.containerName }}{{ .scope.azure.path }}" + {{- end }} + azureCredentials: + {{- if .scope.azure.connectionString }} + connectionString: + name: {{ .chartFullname }}-backup-azure{{ .secretSuffix }}-creds + key: AZURE_CONNECTION_STRING + {{- else }} + storageAccount: + name: {{ .chartFullname }}-backup-azure{{ .secretSuffix }}-creds + key: AZURE_STORAGE_ACCOUNT + {{- if .scope.azure.storageKey }} + storageKey: + name: {{ .chartFullname }}-backup-azure{{ .secretSuffix }}-creds + key: AZURE_STORAGE_KEY + {{- else }} + storageSasToken: + name: {{ .chartFullname }}-backup-azure{{ .secretSuffix }}-creds + key: AZURE_STORAGE_SAS_TOKEN + {{- end }} + {{- end }} +{{- else if eq .scope.provider "google" }} + {{- if empty .scope.destinationPath }} + destinationPath: "gs://{{ required "You need to specify Google storage bucket if destinationPath is not specified." .scope.google.bucket }}{{ .scope.google.path }}" + {{- end }} + googleCredentials: + gkeEnvironment: {{ .scope.google.gkeEnvironment }} + applicationCredentials: + name: {{ .chartFullname }}-backup-google{{ .secretSuffix }}-creds + key: APPLICATION_CREDENTIALS +{{- end -}} +{{- end -}} diff --git a/charts/cluster/templates/_bootstrap.tpl b/charts/cluster/templates/_bootstrap.tpl new file mode 100644 index 000000000..214ad391b --- /dev/null +++ b/charts/cluster/templates/_bootstrap.tpl @@ -0,0 +1,46 @@ +{{- define "cluster.bootstrap" -}} +bootstrap: +{{- if eq .Values.mode "standalone" }} + initdb: + {{- with .Values.cluster.initdb }} + {{- with (omit . "postInitApplicationSQL") }} + {{- . | toYaml | nindent 4 }} + {{- end }} + {{- end }} + postInitApplicationSQL: + {{- if eq .Values.type "postgis" }} + - CREATE EXTENSION IF NOT EXISTS postgis; + - CREATE EXTENSION IF NOT EXISTS postgis_topology; + - CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; + - CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder; + {{- else if eq .Values.type "timescaledb" }} + - CREATE EXTENSION IF NOT EXISTS timescaledb; + {{- end }} + {{- with .Values.cluster.initdb }} + {{- range .postInitApplicationSQL }} + {{- printf "- %s" . | nindent 6 }} + {{- end -}} + {{- end -}} +{{- else if eq .Values.mode "recovery" }} + recovery: + {{- with .Values.recovery.pitrTarget.time }} + recoveryTarget: + targetTime: {{ . }} + {{- end }} + {{- if eq .Values.recovery.method "backup" }} + backup: + name: {{ .Values.recovery.backupName }} + {{- else if eq .Values.recovery.method "object_store" }} + source: objectStoreRecoveryCluster + {{- end }} + +externalClusters: + - name: objectStoreRecoveryCluster + barmanObjectStore: + serverName: {{ .Values.recovery.serverName }} + {{- $d := dict "chartFullname" (include "cluster.fullname" .) "scope" .Values.recovery "secretSuffix" "-recovery" -}} + {{- include "cluster.barmanObjectStoreConfig" $d | nindent 4 }} +{{- else }} + {{ fail "Invalid cluster mode!" }} +{{- end }} +{{- end }} diff --git a/charts/cluster/templates/_colorize.tpl b/charts/cluster/templates/_colorize.tpl new file mode 100644 index 000000000..35b2ba1cd --- /dev/null +++ b/charts/cluster/templates/_colorize.tpl @@ -0,0 +1,12 @@ +{{- define "cluster.color-error" }} + {{- printf "\033[0;31m%s\033[0m" . -}} +{{- end }} +{{- define "cluster.color-ok" }} + {{- printf "\033[0;32m%s\033[0m" . -}} +{{- end }} +{{- define "cluster.color-warning" }} + {{- printf "\033[0;33m%s\033[0m" . -}} +{{- end }} +{{- define "cluster.color-info" }} + {{- printf "\033[0;34m%s\033[0m" . -}} +{{- end }} \ No newline at end of file diff --git a/charts/cluster/templates/_helpers.tpl b/charts/cluster/templates/_helpers.tpl new file mode 100644 index 000000000..b00846d60 --- /dev/null +++ b/charts/cluster/templates/_helpers.tpl @@ -0,0 +1,69 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cluster.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cluster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "cluster.labels" -}} +helm.sh/chart: {{ include "cluster.chart" . }} +{{ include "cluster.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "cluster.selectorLabels" -}} +app.kubernetes.io/name: {{ include "cluster.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Cluster Image Name +If a custom imageName is available, use it, otherwise use the defaults based on the .Values.type +*/}} +{{- define "cluster.imageName" -}} + {{- if .Values.cluster.imageName -}} + {{- .Values.cluster.imageName -}} + {{- else if eq .Values.type "postgresql" -}} + {{- "ghcr.io/cloudnative-pg/postgresql:15.2" -}} + {{- else if eq .Values.type "postgis" -}} + {{- "ghcr.io/cloudnative-pg/postgis:14" -}} + {{- else if eq .Values.type "timescaledb" -}} + {{ fail "You need to provide your own cluster.imageName as an official timescaledb image doesn't exist yet." }} + {{- else -}} + {{ fail "Invalid cluster type!" }} + {{- end }} +{{- end -}} diff --git a/charts/cluster/templates/backup-azure-creds.yaml b/charts/cluster/templates/backup-azure-creds.yaml new file mode 100644 index 000000000..19a651eb3 --- /dev/null +++ b/charts/cluster/templates/backup-azure-creds.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.backups.enabled (eq .Values.backups.provider "azure") }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "cluster.fullname" . }}-backup-azure-creds +data: + AZURE_CONNECTION_STRING: {{ .Values.backups.azure.connectionString | b64enc | quote }} + AZURE_STORAGE_ACCOUNT: {{ .Values.backups.azure.storageAccount | b64enc | quote }} + AZURE_STORAGE_KEY: {{ .Values.backups.azure.storageKey | b64enc | quote }} + AZURE_STORAGE_SAS_TOKEN: {{ .Values.backups.azure.storageSasToken | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/cluster/templates/backup-azure-recovery-creds.yaml b/charts/cluster/templates/backup-azure-recovery-creds.yaml new file mode 100644 index 000000000..b4aecb558 --- /dev/null +++ b/charts/cluster/templates/backup-azure-recovery-creds.yaml @@ -0,0 +1,11 @@ +{{- if and (eq .Values.mode "recovery" ) (eq .Values.recovery.method "object_store") (eq .Values.recovery.provider "azure") }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "cluster.fullname" . }}-backup-azure-recovery-creds +data: + AZURE_CONNECTION_STRING: {{ .Values.recovery.azure.connectionString | b64enc | quote }} + AZURE_STORAGE_ACCOUNT: {{ .Values.recovery.azure.storageAccount | b64enc | quote }} + AZURE_STORAGE_KEY: {{ .Values.recovery.azure.storageKey | b64enc | quote }} + AZURE_STORAGE_SAS_TOKEN: {{ .Values.recovery.azure.storageSasToken | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/cluster/templates/backup-google-creds.yaml b/charts/cluster/templates/backup-google-creds.yaml new file mode 100644 index 000000000..252a27064 --- /dev/null +++ b/charts/cluster/templates/backup-google-creds.yaml @@ -0,0 +1,8 @@ +{{- if and .Values.backups.enabled (eq .Values.backups.provider "google") }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "cluster.fullname" . }}-backup-google-creds +data: + APPLICATION_CREDENTIALS: {{ .Values.backups.google.applicationCredentials | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/cluster/templates/backup-google-recovery-creds.yaml b/charts/cluster/templates/backup-google-recovery-creds.yaml new file mode 100644 index 000000000..942bb897b --- /dev/null +++ b/charts/cluster/templates/backup-google-recovery-creds.yaml @@ -0,0 +1,8 @@ +{{- if and (eq .Values.mode "recovery" ) (eq .Values.recovery.method "object_store") (eq .Values.recovery.provider "google") }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "cluster.fullname" . }}-backup-google-recovery-creds +data: + APPLICATION_CREDENTIALS: {{ .Values.recovery.google.applicationCredentials | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/cluster/templates/backup-s3-creds.yaml b/charts/cluster/templates/backup-s3-creds.yaml new file mode 100644 index 000000000..b906d2453 --- /dev/null +++ b/charts/cluster/templates/backup-s3-creds.yaml @@ -0,0 +1,9 @@ +{{- if and .Values.backups.enabled (eq .Values.backups.provider "s3") }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "cluster.fullname" . }}-backup-s3-creds +data: + ACCESS_KEY_ID: {{ required ".Values.backups.s3.accessKey is required, but not specified." .Values.backups.s3.accessKey | b64enc | quote }} + ACCESS_SECRET_KEY: {{ required ".Values.backups.s3.secretKey is required, but not specified." .Values.backups.s3.secretKey | b64enc | quote }} +{{- end }} diff --git a/charts/cluster/templates/backup-s3-recovery-creds.yaml b/charts/cluster/templates/backup-s3-recovery-creds.yaml new file mode 100644 index 000000000..9cc615fcd --- /dev/null +++ b/charts/cluster/templates/backup-s3-recovery-creds.yaml @@ -0,0 +1,9 @@ +{{- if and (eq .Values.mode "recovery" ) (eq .Values.recovery.method "object_store") (eq .Values.recovery.provider "s3") }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "cluster.fullname" . }}-backup-s3-recovery-creds +data: + ACCESS_KEY_ID: {{ required ".Values.recovery.s3.accessKey is required, but not specified." .Values.recovery.s3.accessKey | b64enc | quote }} + ACCESS_SECRET_KEY: {{ required ".Values.recovery.s3.secretKey is required, but not specified." .Values.recovery.s3.secretKey | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/cluster/templates/cluster.yaml b/charts/cluster/templates/cluster.yaml new file mode 100644 index 000000000..4ec251698 --- /dev/null +++ b/charts/cluster/templates/cluster.yaml @@ -0,0 +1,64 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: {{ include "cluster.fullname" . }} + {{- with .Values.cluster.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "cluster.labels" . | nindent 4 }} + {{- with .Values.cluster.additionalLabels }} + {{ toYaml . | nindent 4 }} + {{- end }} +spec: + instances: {{ .Values.cluster.instances }} + imageName: {{ include "cluster.imageName" . }} + imagePullPolicy: {{ .Values.cluster.imagePullPolicy }} + {{- with .Values.cluster.imagePullSecrets}} + imagePullSecrets: + {{- . | toYaml | nindent 4 }} + {{- end }} + storage: + size: {{ .Values.cluster.storage.size }} + storageClass: {{ .Values.cluster.storage.storageClass }} + + {{- with .Values.cluster.resources }} + resources: + {{- toYaml . | nindent 4 }} + {{ end }} + {{- with .Values.cluster.affinity }} + affinity: + {{- toYaml . | nindent 4 }} + {{- end }} + priorityClassName: {{ .Values.cluster.priorityClassName }} + + primaryUpdateMethod: {{ .Values.cluster.primaryUpdateMethod }} + primaryUpdateStrategy: {{ .Values.cluster.primaryUpdateStrategy }} + logLevel: {{ .Values.cluster.logLevel }} + {{- with .Values.cluster.certificates }} + certificates: + {{- toYaml . | nindent 4 }} + {{ end }} + enableSuperuserAccess: {{ .Values.cluster.enableSuperuserAccess }} + superuserSecret: {{ .Values.cluster.superuserSecret }} + + postgresql: + shared_preload_libraries: + {{- if eq .Values.type "timescaledb" }} + - timescaledb + {{- end }} + {{- with .Values.cluster.postgresql }} + parameters: + {{- toYaml . | nindent 6 }} + {{ end }} + + monitoring: + enablePodMonitor: {{ .Values.cluster.monitoring.enablePodMonitor }} + {{- if not (empty .Values.cluster.monitoring.customQueries) }} + customQueriesConfigMap: + - name: {{ include "cluster.fullname" . }}-monitoring + key: custom-queries + {{- end }} + {{ include "cluster.bootstrap" . | nindent 2 }} + {{ include "cluster.backup" . | nindent 2 }} diff --git a/charts/cluster/templates/pooler.yaml b/charts/cluster/templates/pooler.yaml new file mode 100644 index 000000000..5a606d96d --- /dev/null +++ b/charts/cluster/templates/pooler.yaml @@ -0,0 +1,15 @@ +{{ if .Values.pooler.enabled }} +apiVersion: postgresql.cnpg.io/v1 +kind: Pooler +metadata: + name: {{ include "cluster.fullname" . }}-pooler-rw +spec: + cluster: + name: {{ include "cluster.fullname" . }} + instances: {{ .Values.pooler.instances }} + type: rw + pgbouncer: + poolMode: {{ .Values.pooler.poolMode }} + parameters: + {{- .Values.pooler.parameters | toYaml | nindent 6 }} +{{ end }} \ No newline at end of file diff --git a/charts/cluster/templates/scheduled-backups.yaml b/charts/cluster/templates/scheduled-backups.yaml new file mode 100644 index 000000000..36fbc4471 --- /dev/null +++ b/charts/cluster/templates/scheduled-backups.yaml @@ -0,0 +1,17 @@ +{{ if .Values.backups.enabled }} +{{ $context := . -}} +{{ range .Values.backups.scheduledBackups -}} +--- +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: {{ include "cluster.fullname" $context }}-{{ .name }} + labels: {{ include "cluster.labels" $context | nindent 4 }} +spec: + immediate: true + schedule: {{ .schedule }} + backupOwnerReference: {{ .backupOwnerReference }} + cluster: + name: {{ include "cluster.fullname" $context }} +{{ end -}} +{{ end }} diff --git a/charts/cluster/templates/tests/ping.yaml b/charts/cluster/templates/tests/ping.yaml new file mode 100644 index 000000000..95a474630 --- /dev/null +++ b/charts/cluster/templates/tests/ping.yaml @@ -0,0 +1,37 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "cluster.fullname" . }}-ping-test + labels: + app.kubernetes.io/component: database-ping-test + annotations: + "helm.sh/hook": test + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + template: + metadata: + name: {{ include "cluster.fullname" . }}-ping-test + labels: + app.kubernetes.io/component: database-ping-test + spec: + restartPolicy: Never + containers: + - name: alpine + image: alpine:3.17 + command: [ 'sh' ] + env: + - name: PGUSER + valueFrom: + secretKeyRef: + name: {{ include "cluster.fullname" . }}-app + key: username + - name: PGPASS + valueFrom: + secretKeyRef: + name: {{ include "cluster.fullname" . }}-app + key: password + args: + - "-c" + - >- + apk add postgresql-client && + psql "postgresql://$PGUSER:$PGPASS@{{ include "cluster.fullname" . }}-rw.{{ .Release.Namespace }}.svc.cluster.local:5432" -c 'SELECT 1' diff --git a/charts/cluster/templates/user-metrics.yaml b/charts/cluster/templates/user-metrics.yaml new file mode 100644 index 000000000..e01039661 --- /dev/null +++ b/charts/cluster/templates/user-metrics.yaml @@ -0,0 +1,17 @@ +{{- if not (empty .Values.cluster.monitoring.customQueries) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "cluster.fullname" . }}-monitoring + labels: + cnpg.io/reload: "" + {{- include "cluster.labels" . | nindent 4 }} +data: + custom-queries: | + {{- range .Values.cluster.monitoring.customQueries }} + {{ .name }}: + query: {{ .query | quote }} + metrics: + {{- .metrics | toYaml | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/cluster/values.yaml b/charts/cluster/values.yaml new file mode 100644 index 000000000..dec1fc9b4 --- /dev/null +++ b/charts/cluster/values.yaml @@ -0,0 +1,223 @@ +# -- Override the name of the chart +nameOverride: "" +# -- Override the full name of the chart +fullnameOverride: "" + +### +# -- Type of the CNPG database. Available types: +# * `postgresql` +# * `postgis` +type: postgresql + +### +# -- Cluster mode of operation. Available modes: +# * `standalone` - default mode. Creates new or updates an existing CNPG cluster. +# * `replica` - Creates a replica cluster from an existing CNPG cluster. # TODO +# * `recovery` - Same as standalone but creates a cluster from a backup, object store or via pg_basebackup. +mode: standalone + +recovery: + ## + # -- Available recovery methods: + # * `backup` - Recovers a CNPG cluster from a CNPG backup (PITR supported) Needs to be on the same cluster in the same namespace. + # * `object_store` - Recovers a CNPG cluster from a barman object store (PITR supported). + # * `pg_basebackup` - Recovers a CNPG cluster viaa streaming replication protocol. Useful if you want to + # migrate databases to CloudNativePG, even from outside Kubernetes. # TODO + method: backup + + ## -- Point in time recovery target. Specify one of the following: + pitrTarget: + # -- Time in RFC3339 format + time: "" + + ## + # -- Backup Recovery Method + backupName: "" # Name of the backup to recover from. Required if method is `backup`. + + ## + # -- Object Store Recovery Method + clusterName: "" + # -- Overrides the provider specific default endpoint. Defaults to: + # S3: https://s3..amazonaws.com" + # Leave empty if using the default S3 endpoint + endpointURL: "" + # -- Overrides the provider specific default path. Defaults to: + # S3: s3:// + # Azure: https://..core.windows.net/ + # Google: gs:// + destinationPath: "" + # -- One of `s3`, `azure` or `google` + provider: s3 + s3: + region: "" + bucket: "" + path: "/" + accessKey: "" + secretKey: "" + azure: + path: "/" + connectionString: "" + storageAccount: "" + storageKey: "" + storageSasToken: "" + containerName: "" + serviceName: blob + inheritFromAzureAD: false + google: + path: "/" + bucket: "" + gkeEnvironment: false + applicationCredentials: "" + + +cluster: + # -- Number of instances + instances: 3 + + # -- Name of the container image, supporting both tags (:) and digests for deterministic and repeatable deployments: + # :@sha256: + imageName: "" # Default value depends on type (postgresql/postgis/timescaledb) + + # -- Image pull policy. One of Always, Never or IfNotPresent. If not defined, it defaults to IfNotPresent. Cannot be updated. + # More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + imagePullPolicy: IfNotPresent + + # -- The list of pull secrets to be used to pull the images + # See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-LocalObjectReference + imagePullSecrets: [] + + storage: + size: 8Gi + storageClass: "" + + # -- Resources requirements of every generated Pod. + # Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. + # We strongly advise you use the same setting for limits and requests so that your cluster pods are given a Guaranteed QoS. + # See: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/ + resources: + # limits: + # cpu: 2000m + # memory: 8Gi + # requests: + # cpu: 2000m + # memory: 8Gi + + priorityClassName: "" + + # -- Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been + # successfully updated. It can be switchover (default) or in-place (restart). + primaryUpdateMethod: switchover + + # -- Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been + # successfully updated: it can be automated (unsupervised - default) or manual (supervised) + primaryUpdateStrategy: unsupervised + + # -- The instances' log level, one of the following values: error, warning, info (default), debug, trace + logLevel: "info" + + # -- Affinity/Anti-affinity rules for Pods + # See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-AffinityConfiguration + affinity: + topologyKey: topology.kubernetes.io/zone + + # -- The configuration for the CA and related certificates + # See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-CertificatesConfiguration + certificates: + + # -- When this option is enabled, the operator will use the SuperuserSecret to update the postgres user password. + # If the secret is not present, the operator will automatically create one. + # When this option is disabled, the operator will ignore the SuperuserSecret content, delete it when automatically created, + # and then blank the password of the postgres user by setting it to NULL. + enableSuperuserAccess: true + superuserSecret: "" + + monitoring: + enablePodMonitor: false + customQueries: [] + # - name: "pg_cache_hit_ratio" + # query: "SELECT current_database() as datname, sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) as ratio FROM pg_statio_user_tables;" + # metrics: + # - datname: + # usage: "LABEL" + # description: "Name of the database" + # - ratio: + # usage: GAUGE + # description: "Cache hit ratio" + + # -- Configuration of the PostgreSQL server + # See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PostgresConfiguration + postgresql: + + # -- BootstrapInitDB is the configuration of the bootstrap process when initdb is used + # See: https://cloudnative-pg.io/documentation/current/bootstrap/ + # See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb + initdb: {} + # database: app + # owner: "" # Defaults to the database name + # secret: "" # Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch + # postInitSQL: + # - CREATE EXTENSION IF NOT EXISTS vector; + + additionalLabels: {} + annotations: {} + + +backups: + # -- You need to configure backups manually, so backups are disabled by default. + enabled: false + + # -- Overrides the provider specific default endpoint. Defaults to: + # S3: https://s3..amazonaws.com" + endpointURL: "" # Leave empty if using the default S3 endpoint + + # -- Overrides the provider specific default path. Defaults to: + # S3: s3:// + # Azure: https://..core.windows.net/ + # Google: gs:// + destinationPath: "" + # -- One of `s3`, `azure` or `google` + provider: s3 + s3: + region: "" + bucket: "" + path: "/" + accessKey: "" + secretKey: "" + azure: + path: "/" + connectionString: "" + storageAccount: "" + storageKey: "" + storageSasToken: "" + containerName: "" + serviceName: blob + inheritFromAzureAD: false + google: + path: "/" + bucket: "" + gkeEnvironment: false + applicationCredentials: "" + + scheduledBackups: + - + # -- Scheduled backup name + name: daily-backup + # -- Schedule in cron format + schedule: "0 0 0 * * *" + # -- Backup owner reference + backupOwnerReference: self + + # -- Retention policy for backups + retentionPolicy: "30d" + +pooler: + # -- Whether to enable PgBouncer + enabled: false + # -- PgBouncer pooling mode + poolMode: transaction + # -- Number of PgBouncer instances + instances: 3 + # -- PgBouncer configuration parameters + parameters: + max_client_conn: "1000" + default_pool_size: "25"