From 1b638d9d17a26793a6592c572f269e2859419f3a Mon Sep 17 00:00:00 2001 From: Itay Grudev Date: Tue, 10 Sep 2024 02:10:32 +0300 Subject: [PATCH 1/8] ParadeDB Support (#1) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> Co-authored-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> ci: AWS EKS LocalStack Tests (#14) Co-authored-by: Itay Grudev Co-authored-by: Itay Grudev Updated documentation workflows and cleanup (#18) Revert "Add postgresql default to tests" This reverts commit a9f340d28944ce6a194c23d36c75f4125a902409. Add repository_dispatch Signed-off-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> chore: Configure repository for ParadeDB (#15) Co-authored-by: Itay Grudev chore: Create artifacthub-repo.yml (#20) Signed-off-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> Put artifacthub-repo.yml in the right location Bug Fix: Tests (#21) Renamed chart to `paradedb-cluster` (#22) Co-authored-by: Philippe Noël Rm .DS_Store Remove extra HTML tag Signed-off-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> Remove repository_dispatch Signed-off-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> chore: Rename paradedb-cluster to paradedb (#28) chore: Try with adding missing -cluster (#33) chore: Final Cleanup (#34) chore: Remove PostGIS and Timescale (#35) Using the default UID/GID 999 from the postgres docker image (#26) Co-authored-by: Philippe Noël chore: Update README and Schema (#37) Signed-off-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> chore(deps): pin sigstore/cosign-installer action to 4959ce0 (#403) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Rm pgvectorscale (#39) Signed-off-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> feat: Add GitHub Actions Workflow to check for typos (#40) Signed-off-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> Co-authored-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> chore(deps): update kyverno/action-install-chainsaw action to v0.2.11 (#410) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Rename the default database from app to paradedb (#41) Co-authored-by: Philippe Noël docs: Clarify the docs (#42) Update Depot Revert "Update Depot" This reverts commit cb85b818e72793888b1a3a1e799a18cfaf8b04b3. feat: Add `pg_cron` and Postgis (#44) Signed-off-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> --- .codespellignore | 1 + .github/CODEOWNERS | 4 + .github/FUNDING.yml | 12 + .github/ISSUE_TEMPLATE/bug_report.md | 16 + .github/ISSUE_TEMPLATE/config.yml | 1 + .github/ISSUE_TEMPLATE/feature_request.md | 16 + .github/PULL_REQUEST_TEMPLATE.md | 11 + .github/actions/deploy-operator/action.yml | 4 +- .../actions/verify-cluster-ready/action.yml | 32 - .github/config/cr.yaml | 12 + .github/dependabot.yml | 14 + .github/renovate.json5 | 27 - .github/workflows/check-typo.yml | 31 + .github/workflows/paradedb-publish-chart.yml | 109 +++ .github/workflows/paradedb-test-eks.yml | 118 +++ .github/workflows/release-pr.yml | 31 - .github/workflows/release-publish.yml | 88 -- .github/workflows/tests-cluster-chainsaw.yaml | 27 +- .github/workflows/tests-operator.yml | 41 - .gitignore | 5 +- .markdownlint.yaml | 4 + .pre-commit-config.yaml | 44 + .prettierignore | 5 + CODEOWNERS | 7 - CODE-OF-CONDUCT.md => CODE_OF_CONDUCT.md | 49 +- CONTRIBUTING.md | 43 +- LICENSE | 863 ++++++++++++++---- Makefile | 8 +- README.md | 113 ++- RELEASE.md | 150 --- SECURITY.md | 21 + artifacthub-repo.yml | 15 + charts/cloudnative-pg/Chart.lock | 6 - charts/cloudnative-pg/LICENSE | 202 ---- .../monitoring/grafana-dashboard.json | 3 - charts/cloudnative-pg/templates/NOTES.txt | 18 - charts/cloudnative-pg/templates/_helpers.tpl | 62 -- charts/cloudnative-pg/templates/config.yaml | 45 - .../cloudnative-pg/templates/deployment.yaml | 147 --- .../templates/monitoring-configmap.yaml | 29 - .../mutatingwebhookconfiguration.yaml | 92 -- .../cloudnative-pg/templates/podmonitor.yaml | 29 - charts/cloudnative-pg/templates/service.yaml | 34 - .../validatingwebhookconfiguration.yaml | 113 --- charts/cloudnative-pg/values.schema.json | 281 ------ charts/cloudnative-pg/values.yaml | 628 ------------- charts/cluster/.helmignore | 23 - charts/cluster/examples/timescaledb.yaml | 9 - charts/{cluster => paradedb}/.gitignore | 0 .../{cloudnative-pg => paradedb}/.helmignore | 0 charts/{cluster => paradedb}/Chart.yaml | 24 +- charts/{cluster => paradedb}/README.md | 142 +-- charts/{cluster => paradedb}/README.md.gotmpl | 82 +- .../docs/Getting Started.md | 6 +- charts/{cluster => paradedb}/docs/Recovery.md | 0 .../docs/runbooks/CNPGClusterHACritical.md | 0 .../docs/runbooks/CNPGClusterHAWarning.md | 0 .../CNPGClusterHighConnectionsCritical.md | 0 .../CNPGClusterHighConnectionsWarning.md | 0 .../runbooks/CNPGClusterHighReplicationLag.md | 0 .../CNPGClusterInstancesOnSameNode.md | 0 .../CNPGClusterLowDiskSpaceCritical.md | 0 .../CNPGClusterLowDiskSpaceWarning.md | 0 .../docs/runbooks/CNPGClusterOffline.md | 0 .../runbooks/CNPGClusterZoneSpreadWarning.md | 0 .../{cluster => paradedb}/examples/basic.yaml | 0 .../examples/custom-queries.yaml | 0 .../examples/image-catalog-ref.yaml | 2 +- .../examples/image-catalog.yaml | 2 +- .../examples/paradedb.yaml} | 6 +- .../examples/pgbouncer.yaml | 0 .../examples/recovery-backup.yaml | 0 .../examples/recovery-object_store.yaml | 0 .../examples/recovery-pg_basebackup.yaml | 0 .../examples/standalone-s3.yaml | 0 .../prometheus_rules/cluster-ha-critical.yaml | 6 +- .../prometheus_rules/cluster-ha-warning.yaml | 6 +- .../cluster-high_connection-critical.yaml | 6 +- .../cluster-high_connection-warning.yaml | 6 +- .../cluster-high_replication_lag.yaml | 6 +- .../cluster-instances_on_same_node.yaml | 6 +- .../cluster-low_disk_space-critical.yaml | 6 +- .../cluster-low_disk_space-warning.yaml | 6 +- .../prometheus_rules/cluster-offline.yaml | 6 +- .../cluster-zone_spread-warning.yaml | 6 +- .../{cluster => paradedb}/templates/NOTES.txt | 4 +- .../templates/_backup.tpl | 0 .../templates/_barman_object_store.tpl | 0 .../templates/_bootstrap.tpl | 43 +- .../templates/_colorize.tpl | 0 .../templates/_helpers.tpl | 27 +- .../templates/backup-azure-creds.yaml | 0 .../templates/backup-google-creds.yaml | 0 .../templates/backup-s3-creds.yaml | 0 .../templates/ca-bundle.yaml | 0 .../templates/cluster.yaml | 6 +- .../templates/image-catalog.yaml | 0 .../templates/pooler.yaml | 0 .../templates/prometheus-rule.yaml | 0 .../templates/recovery-azure-creds.yaml | 0 .../templates/recovery-google-creds.yaml | 0 .../recovery-pg_basebackup-password.yaml | 0 .../templates/recovery-s3-creds.yaml | 0 .../templates/scheduled-backups.yaml | 0 .../templates/tests/ping.yaml | 0 .../templates/user-metrics.yaml | 0 .../01-monitoring_cluster-assert.yaml | 16 +- .../monitoring/01-monitoring_cluster.yaml | 0 .../test/monitoring/chainsaw-test.yaml | 0 .../00-minio_cleanup-assert.yaml | 0 .../00-minio_cleanup.yaml | 2 +- .../01-paradedb_cluster-assert.yaml} | 2 +- .../01-paradedb_cluster.yaml} | 5 +- .../02-paradedb_write-assert.yaml} | 2 +- .../02-paradedb_write.yaml | 33 + .../03-paradedb_test-assert.yaml | 6 + .../03-paradedb_test.yaml} | 11 +- .../04-data_write-assert.yaml | 0 .../04-data_write.yaml | 2 +- .../05-backup.yaml | 2 +- .../05-backup_completed-assert.yaml | 2 +- .../05-backup_running-assert.yaml | 2 +- .../05-checkpoint.yaml | 2 +- .../06-post_backup_data_write-assert.yaml | 0 .../06-post_backup_data_write.yaml | 2 +- ...7-recovery_backup_pitr_cluster-assert.yaml | 2 +- .../07-recovery_backup_pitr_cluster.yaml | 6 +- .../08-data_test-assert.yaml | 0 .../08-data_test.yaml | 2 +- .../chainsaw-test.yaml | 41 +- .../test/pooler/01-pooler_cluster-assert.yaml | 6 +- .../test/pooler/01-pooler_cluster.yaml | 0 .../test/pooler/chainsaw-test.yaml | 0 ..._default_configuration_cluster-assert.yaml | 5 +- .../01-non_default_configuration_cluster.yaml | 3 + .../chainsaw-test.yaml | 0 .../00-minio_cleanup-assert.yaml | 0 .../00-minio_cleanup.yaml | 0 .../01-standalone_cluster-assert.yaml | 2 +- .../01-standalone_cluster.yaml | 0 .../02-data_write-assert.yaml | 0 .../02-data_write.yaml | 2 +- .../03-backup.yaml | 2 +- .../03-backup_completed-assert.yaml | 2 +- .../03-backup_running-assert.yaml | 2 +- .../03-checkpoint.yaml | 2 +- .../04-post_backup_data_write-assert.yaml | 0 .../04-post_backup_data_write.yaml | 2 +- .../05-recovery_backup_cluster-assert.yaml | 2 +- .../05-recovery_backup_cluster.yaml | 0 .../06-data_test-assert.yaml | 0 .../06-data_test.yaml | 2 +- ...-recovery_object_store_cluster-assert.yaml | 2 +- .../07-recovery_object_store_cluster.yaml | 2 +- .../08-data_test-assert.yaml | 0 .../08-data_test.yaml | 2 +- ...9-recovery_backup_pitr_cluster-assert.yaml | 2 +- .../09-recovery_backup_pitr_cluster.yaml | 0 .../10-data_test-assert.yaml | 0 .../10-data_test.yaml | 2 +- .../chainsaw-test.yaml | 0 .../00-source-cluster-assert.yaml | 2 +- .../00-source-cluster.yaml | 0 .../01-data_write-assert.yaml | 0 .../01-data_write.yaml | 6 +- .../02-pg_basebackup-cluster-assert.yaml | 2 +- .../02-pg_basebackup-cluster.yaml | 6 +- .../03-data_test-assert.yaml | 0 .../03-data_test.yaml | 2 +- .../chainsaw-test.yaml | 0 .../{cluster => paradedb}/values.schema.json | 12 +- charts/{cluster => paradedb}/values.yaml | 32 +- provenance.gpg | 83 -- 173 files changed, 1616 insertions(+), 2758 deletions(-) create mode 100644 .codespellignore create mode 100644 .github/CODEOWNERS create mode 100644 .github/FUNDING.yml create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md delete mode 100644 .github/actions/verify-cluster-ready/action.yml create mode 100644 .github/config/cr.yaml create mode 100644 .github/dependabot.yml delete mode 100644 .github/renovate.json5 create mode 100644 .github/workflows/check-typo.yml create mode 100644 .github/workflows/paradedb-publish-chart.yml create mode 100644 .github/workflows/paradedb-test-eks.yml delete mode 100644 .github/workflows/release-pr.yml delete mode 100644 .github/workflows/release-publish.yml delete mode 100644 .github/workflows/tests-operator.yml create mode 100644 .markdownlint.yaml create mode 100644 .pre-commit-config.yaml create mode 100644 .prettierignore delete mode 100644 CODEOWNERS rename CODE-OF-CONDUCT.md => CODE_OF_CONDUCT.md (72%) delete mode 100644 RELEASE.md create mode 100644 SECURITY.md create mode 100644 artifacthub-repo.yml delete mode 100644 charts/cloudnative-pg/Chart.lock delete mode 100644 charts/cloudnative-pg/LICENSE delete mode 100644 charts/cloudnative-pg/monitoring/grafana-dashboard.json delete mode 100644 charts/cloudnative-pg/templates/NOTES.txt delete mode 100644 charts/cloudnative-pg/templates/_helpers.tpl delete mode 100644 charts/cloudnative-pg/templates/config.yaml delete mode 100644 charts/cloudnative-pg/templates/deployment.yaml delete mode 100644 charts/cloudnative-pg/templates/monitoring-configmap.yaml delete mode 100644 charts/cloudnative-pg/templates/mutatingwebhookconfiguration.yaml delete mode 100644 charts/cloudnative-pg/templates/podmonitor.yaml delete mode 100644 charts/cloudnative-pg/templates/service.yaml delete mode 100644 charts/cloudnative-pg/templates/validatingwebhookconfiguration.yaml delete mode 100644 charts/cloudnative-pg/values.schema.json delete mode 100644 charts/cloudnative-pg/values.yaml delete mode 100644 charts/cluster/.helmignore delete mode 100644 charts/cluster/examples/timescaledb.yaml rename charts/{cluster => paradedb}/.gitignore (100%) rename charts/{cloudnative-pg => paradedb}/.helmignore (100%) rename charts/{cluster => paradedb}/Chart.yaml (56%) rename charts/{cluster => paradedb}/README.md (61%) rename charts/{cluster => paradedb}/README.md.gotmpl (82%) rename charts/{cluster => paradedb}/docs/Getting Started.md (96%) rename charts/{cluster => paradedb}/docs/Recovery.md (100%) rename charts/{cluster => paradedb}/docs/runbooks/CNPGClusterHACritical.md (100%) rename charts/{cluster => paradedb}/docs/runbooks/CNPGClusterHAWarning.md (100%) rename charts/{cluster => paradedb}/docs/runbooks/CNPGClusterHighConnectionsCritical.md (100%) rename charts/{cluster => paradedb}/docs/runbooks/CNPGClusterHighConnectionsWarning.md (100%) rename charts/{cluster => paradedb}/docs/runbooks/CNPGClusterHighReplicationLag.md (100%) rename charts/{cluster => paradedb}/docs/runbooks/CNPGClusterInstancesOnSameNode.md (100%) rename charts/{cluster => paradedb}/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md (100%) rename charts/{cluster => paradedb}/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md (100%) rename charts/{cluster => paradedb}/docs/runbooks/CNPGClusterOffline.md (100%) rename charts/{cluster => paradedb}/docs/runbooks/CNPGClusterZoneSpreadWarning.md (100%) rename charts/{cluster => paradedb}/examples/basic.yaml (100%) rename charts/{cluster => paradedb}/examples/custom-queries.yaml (100%) rename charts/{cluster => paradedb}/examples/image-catalog-ref.yaml (88%) rename charts/{cluster => paradedb}/examples/image-catalog.yaml (90%) rename charts/{cluster/examples/postgis.yaml => paradedb/examples/paradedb.yaml} (57%) rename charts/{cluster => paradedb}/examples/pgbouncer.yaml (100%) rename charts/{cluster => paradedb}/examples/recovery-backup.yaml (100%) rename charts/{cluster => paradedb}/examples/recovery-object_store.yaml (100%) rename charts/{cluster => paradedb}/examples/recovery-pg_basebackup.yaml (100%) rename charts/{cluster => paradedb}/examples/standalone-s3.yaml (100%) rename charts/{cluster => paradedb}/prometheus_rules/cluster-ha-critical.yaml (82%) rename charts/{cluster => paradedb}/prometheus_rules/cluster-ha-warning.yaml (79%) rename charts/{cluster => paradedb}/prometheus_rules/cluster-high_connection-critical.yaml (68%) rename charts/{cluster => paradedb}/prometheus_rules/cluster-high_connection-warning.yaml (68%) rename charts/{cluster => paradedb}/prometheus_rules/cluster-high_replication_lag.yaml (68%) rename charts/{cluster => paradedb}/prometheus_rules/cluster-instances_on_same_node.yaml (67%) rename charts/{cluster => paradedb}/prometheus_rules/cluster-low_disk_space-critical.yaml (84%) rename charts/{cluster => paradedb}/prometheus_rules/cluster-low_disk_space-warning.yaml (84%) rename charts/{cluster => paradedb}/prometheus_rules/cluster-offline.yaml (66%) rename charts/{cluster => paradedb}/prometheus_rules/cluster-zone_spread-warning.yaml (72%) rename charts/{cluster => paradedb}/templates/NOTES.txt (98%) rename charts/{cluster => paradedb}/templates/_backup.tpl (100%) rename charts/{cluster => paradedb}/templates/_barman_object_store.tpl (100%) rename charts/{cluster => paradedb}/templates/_bootstrap.tpl (67%) rename charts/{cluster => paradedb}/templates/_colorize.tpl (100%) rename charts/{cluster => paradedb}/templates/_helpers.tpl (75%) rename charts/{cluster => paradedb}/templates/backup-azure-creds.yaml (100%) rename charts/{cluster => paradedb}/templates/backup-google-creds.yaml (100%) rename charts/{cluster => paradedb}/templates/backup-s3-creds.yaml (100%) rename charts/{cluster => paradedb}/templates/ca-bundle.yaml (100%) rename charts/{cluster => paradedb}/templates/cluster.yaml (97%) rename charts/{cluster => paradedb}/templates/image-catalog.yaml (100%) rename charts/{cluster => paradedb}/templates/pooler.yaml (100%) rename charts/{cluster => paradedb}/templates/prometheus-rule.yaml (100%) rename charts/{cluster => paradedb}/templates/recovery-azure-creds.yaml (100%) rename charts/{cluster => paradedb}/templates/recovery-google-creds.yaml (100%) rename charts/{cluster => paradedb}/templates/recovery-pg_basebackup-password.yaml (100%) rename charts/{cluster => paradedb}/templates/recovery-s3-creds.yaml (100%) rename charts/{cluster => paradedb}/templates/scheduled-backups.yaml (100%) rename charts/{cluster => paradedb}/templates/tests/ping.yaml (100%) rename charts/{cluster => paradedb}/templates/user-metrics.yaml (100%) rename charts/{cluster => paradedb}/test/monitoring/01-monitoring_cluster-assert.yaml (89%) rename charts/{cluster => paradedb}/test/monitoring/01-monitoring_cluster.yaml (100%) rename charts/{cluster => paradedb}/test/monitoring/chainsaw-test.yaml (100%) rename charts/{cluster/test/postgresql-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/00-minio_cleanup-assert.yaml (100%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/00-minio_cleanup.yaml (83%) rename charts/{cluster/test/timescale-minio-backup-restore/01-timescale_cluster-assert.yaml => paradedb/test/paradedb-minio-backup-restore/01-paradedb_cluster-assert.yaml} (76%) rename charts/{cluster/test/timescale-minio-backup-restore/01-timescale_cluster.yaml => paradedb/test/paradedb-minio-backup-restore/01-paradedb_cluster.yaml} (90%) rename charts/{cluster/test/timescale-minio-backup-restore/03-timescale_test-assert.yaml => paradedb/test/paradedb-minio-backup-restore/02-paradedb_write-assert.yaml} (73%) create mode 100644 charts/paradedb/test/paradedb-minio-backup-restore/02-paradedb_write.yaml create mode 100644 charts/paradedb/test/paradedb-minio-backup-restore/03-paradedb_test-assert.yaml rename charts/{cluster/test/timescale-minio-backup-restore/03-timescale_test.yaml => paradedb/test/paradedb-minio-backup-restore/03-paradedb_test.yaml} (55%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/04-data_write-assert.yaml (100%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/04-data_write.yaml (96%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/05-backup.yaml (81%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/05-backup_completed-assert.yaml (84%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/05-backup_running-assert.yaml (84%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/05-checkpoint.yaml (93%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/06-post_backup_data_write-assert.yaml (100%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/06-post_backup_data_write.yaml (93%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/07-recovery_backup_pitr_cluster-assert.yaml (69%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/07-recovery_backup_pitr_cluster.yaml (92%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/08-data_test-assert.yaml (100%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/08-data_test.yaml (93%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/paradedb-minio-backup-restore}/chainsaw-test.yaml (76%) rename charts/{cluster => paradedb}/test/pooler/01-pooler_cluster-assert.yaml (84%) rename charts/{cluster => paradedb}/test/pooler/01-pooler_cluster.yaml (100%) rename charts/{cluster => paradedb}/test/pooler/chainsaw-test.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml (93%) rename charts/{cluster => paradedb}/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml (95%) rename charts/{cluster => paradedb}/test/postgresql-cluster-configuration/chainsaw-test.yaml (100%) rename charts/{cluster/test/timescale-minio-backup-restore => paradedb/test/postgresql-minio-backup-restore}/00-minio_cleanup-assert.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/00-minio_cleanup.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/01-standalone_cluster-assert.yaml (75%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/01-standalone_cluster.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/02-data_write-assert.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/02-data_write.yaml (91%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/03-backup.yaml (80%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/03-backup_completed-assert.yaml (83%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/03-backup_running-assert.yaml (83%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/03-checkpoint.yaml (92%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/04-post_backup_data_write-assert.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/04-post_backup_data_write.yaml (96%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/05-recovery_backup_cluster-assert.yaml (72%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/05-recovery_backup_cluster.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/06-data_test-assert.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/06-data_test.yaml (91%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster-assert.yaml (68%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster.yaml (96%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/08-data_test-assert.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/08-data_test.yaml (90%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster-assert.yaml (69%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/10-data_test-assert.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/10-data_test.yaml (93%) rename charts/{cluster => paradedb}/test/postgresql-minio-backup-restore/chainsaw-test.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-pg_basebackup/00-source-cluster-assert.yaml (78%) rename charts/{cluster => paradedb}/test/postgresql-pg_basebackup/00-source-cluster.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-pg_basebackup/01-data_write-assert.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-pg_basebackup/01-data_write.yaml (80%) rename charts/{cluster => paradedb}/test/postgresql-pg_basebackup/02-pg_basebackup-cluster-assert.yaml (73%) rename charts/{cluster => paradedb}/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml (73%) rename charts/{cluster => paradedb}/test/postgresql-pg_basebackup/03-data_test-assert.yaml (100%) rename charts/{cluster => paradedb}/test/postgresql-pg_basebackup/03-data_test.yaml (91%) rename charts/{cluster => paradedb}/test/postgresql-pg_basebackup/chainsaw-test.yaml (100%) rename charts/{cluster => paradedb}/values.schema.json (98%) rename charts/{cluster => paradedb}/values.yaml (96%) delete mode 100644 provenance.gpg diff --git a/.codespellignore b/.codespellignore new file mode 100644 index 000000000..4719f383d --- /dev/null +++ b/.codespellignore @@ -0,0 +1 @@ +socio-economic diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..e8a17c3d8 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,4 @@ +# Order is important. The last matching pattern has the most precedence. In each subsection folders are ordered first by depth, then alphabetically + +/.github/ @philippemnoel +/charts/ @philippemnoel diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000..afa87dc2d --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,12 @@ +# These are supported funding model platforms + +github: [paradedb] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..3cdee547e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,16 @@ +--- +name: Bug report +about: Create a report to help us improve +title: "" +labels: "" +assignees: "" +--- + +**Bug Description** +Please describe the bug. + +**How To Reproduce** +Please describe how to reproduce the bug. + +**Proposed Fix** +Please describe how you think this bug could be fixed. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..3ba13e0ce --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..ea75ee063 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,16 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: "" +labels: "" +assignees: "" +--- + +**What** +Please describe the feature. + +**Why** +Please describe why this feature is important. + +**How** +Please describe how you'd implement this feature. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..8c45227cc --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,11 @@ +# Ticket(s) Closed + +- Closes # + +## What + +## Why + +## How + +## Tests diff --git a/.github/actions/deploy-operator/action.yml b/.github/actions/deploy-operator/action.yml index a1fea523d..65d486b33 100644 --- a/.github/actions/deploy-operator/action.yml +++ b/.github/actions/deploy-operator/action.yml @@ -6,11 +6,11 @@ runs: - name: Deploy the operator shell: bash run: - helm dependency update charts/cloudnative-pg + helm repo add cnpg https://cloudnative-pg.github.io/charts helm upgrade --install --namespace cnpg-system --create-namespace --wait - cnpg charts/cloudnative-pg + cnpg cnpg/cloudnative-pg diff --git a/.github/actions/verify-cluster-ready/action.yml b/.github/actions/verify-cluster-ready/action.yml deleted file mode 100644 index 7e3a522b8..000000000 --- a/.github/actions/verify-cluster-ready/action.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Verifies that a CNPG cluster has a certain amount of ready instances -description: Verifies that a CNPG cluster has a certain amount of ready instances -inputs: - cluster-name: - description: The name of the cluster to verify - required: true - default: database-cluster - ready-instances: - description: The amount of ready instances to wait for - required: true - default: "3" - -runs: - using: composite - steps: - - name: Wait for the cluster to become ready - shell: bash - run: | - ITER=0 - while true; do - if [[ $ITER -ge 300 ]]; then - echo "Cluster not ready" - exit 1 - fi - READY_INSTANCES=$(kubectl get clusters.postgresql.cnpg.io ${INPUT_CLUSTER_NAME} -o jsonpath='{.status.readyInstances}') - if [[ "$READY_INSTANCES" == ${INPUT_READY_INSTANCES} ]]; then - echo "Cluster up and running" - break - fi - sleep 1 - (( ++ITER )) - done diff --git a/.github/config/cr.yaml b/.github/config/cr.yaml new file mode 100644 index 000000000..e5d939a3d --- /dev/null +++ b/.github/config/cr.yaml @@ -0,0 +1,12 @@ +## Reference: https://github.com/helm/chart-releaser +index-path: "./index.yaml" + +# PGP signing +sign: true +key: ParadeDB +# keyring: # Set via env variable CR_KEYRING +# passphrase-file: # Set via env variable CR_PASSPHRASE_FILE + +# Enable automatic generation of release notes using GitHub's release notes generator. +# see: https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes +generate-release-notes: true diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..e5034579f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,14 @@ +version: 2 + +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" + ignore: + - dependency-name: "*" + update-types: ["version-update:semver-patch"] + groups: + github-actions-dependencies: + patterns: + - "*" diff --git a/.github/renovate.json5 b/.github/renovate.json5 deleted file mode 100644 index 1fb395251..000000000 --- a/.github/renovate.json5 +++ /dev/null @@ -1,27 +0,0 @@ -{ - "$schema": "https://docs.renovatebot.com/renovate-schema.json", - "extends": [ - "config:base" - ], - "prConcurrentLimit": 5, - "semanticCommits": "enabled", - "regexManagers": [ - { - "fileMatch": ["charts\\/cloudnative-pg\\/Chart\\.yaml$"], - "matchStrings": [ - "appVersion: \"(?.*?)\"", - ], - "datasourceTemplate": "docker", - "depNameTemplate": "ghcr.io/cloudnative-pg/cloudnative-pg", - "versioningTemplate": "loose" - }, - ], - "packageRules": [ - { - "matchDepTypes": [ - "action" - ], - "pinDigests": true - }, - ] -} diff --git a/.github/workflows/check-typo.yml b/.github/workflows/check-typo.yml new file mode 100644 index 000000000..8b64424aa --- /dev/null +++ b/.github/workflows/check-typo.yml @@ -0,0 +1,31 @@ +# workflows/check-typo.yml +# +# Check Typo +# Check Typo using codespell. + +name: Check Typo + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + workflow_dispatch: + +concurrency: + group: check-typo-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + check-typo: + name: Check Typo using codespell + runs-on: depot-ubuntu-latest-2 + if: github.event.pull_request.draft == false + + steps: + - name: Checkout Git Repository + uses: actions/checkout@v4 + + - name: Check Typo using codespell + uses: codespell-project/actions-codespell@v2 + with: + check_filenames: true + ignore_words_file: .codespellignore diff --git a/.github/workflows/paradedb-publish-chart.yml b/.github/workflows/paradedb-publish-chart.yml new file mode 100644 index 000000000..1b3574f86 --- /dev/null +++ b/.github/workflows/paradedb-publish-chart.yml @@ -0,0 +1,109 @@ +# workflows/paradedb-publish-chart.yml +# +# ParadeDB Publish Chart +# Publish the ParadeDB Helm chart to paradedb.github.io via GitHub Pages. This workflow also +# triggers the creation of a GitHub Release. It only runs on pushes to `main` or when we trigger +# a workflow_dispatch event, either manually or via creating a release in `paradedb/paradedb`. + +name: ParadeDB Publish Chart + +on: + push: + branches: + - main + workflow_dispatch: + inputs: + appVersion: + description: "The ParadeDB version to publish in the Helm Chart (e.g. 0.1.0)" + required: true + default: "" + +concurrency: + group: paradedb-publish-chart-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + paradedb-publish-chart: + name: Publish ParadeDB Helm Charts to GitHub Pages + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Set Helm Chart Release Versions + id: set_versions + working-directory: charts/paradedb/ + env: + GH_TOKEN: ${{ secrets.GHA_CREATE_RELEASE_PAT }} + run: | + # If no appVersion is provided, we use the latest ParadeDB version + if [ -z "${{ github.event.inputs.appVersion }}" ]; then + LATEST_TAG=$(curl -s https://api.github.com/repos/paradedb/paradedb/tags | jq -r '.[0].name') + APP_VERSION=${LATEST_TAG#v} + else + APP_VERSION=${{ github.event.inputs.appVersion }} + fi + # Update appVersion to the GitHub Release version and version to the Helm Chart version + sed -i "s/^[[:space:]]*paradedb: .*/ paradedb: \"$APP_VERSION\"/" values.yaml + sed -i "s/^version: .*/version: ${{ vars.CHART_VERSION_MAJOR }}.${{ vars.CHART_VERSION_MINOR }}.${{ vars.CHART_VERSION_PATCH }}/" Chart.yaml + echo "values.yaml:" + cat values.yaml + echo "----------------------------------------" + echo "Chart.yaml:" + cat Chart.yaml + + # Set output to update post-release, increasing the Helm Chart version patch number by one to update in GitHub Actions Variables + echo "new_chart_version_patch=$(( ${{ vars.CHART_VERSION_PATCH }} + 1 ))" >> $GITHUB_OUTPUT + + # The GitHub repository secret `PARADEDB_PGP_PRIVATE_KEY` contains the private key + # in ASCII-armored format. To export a (new) key, run this command: + # `gpg --armor --export-secret-key ` + - name: Prepare ParadeDB PGP Key + env: + PGP_PRIVATE_KEY: "${{ secrets.PARADEDB_PGP_PRIVATE_KEY }}" + PGP_PASSPHRASE: "${{ secrets.PARADEDB_PGP_PASSPHRASE }}" + run: | + IFS="" + echo "$PGP_PRIVATE_KEY" | gpg --dearmor --verbose > /tmp/secring.gpg + echo "$PGP_PASSPHRASE" > /tmp/passphrase.txt + + # Tell chart-releaser-action where to find the key and its passphrase + echo "CR_KEYRING=/tmp/secring.gpg" >> "$GITHUB_ENV" + echo "CR_PASSPHRASE_FILE=/tmp/passphrase.txt" >> "$GITHUB_ENV" + + - name: Add Grafana Chart Dependencies + run: helm repo add cnpg-grafana-dashboard https://cloudnative-pg.github.io/grafana-dashboards + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.6.0 + with: + config: "./.github/config/cr.yaml" + env: + CR_TOKEN: "${{ secrets.GHA_CREATE_RELEASE_PAT }}" + + # We have a separate version for our Helm Chart, since it needs to always increment by + # one for every production release, independently of the ParadeDB version. Any non-patch + # version increment should be done manually in GitHub Actions Variables. + - name: Increment Helm Chart Version Number in GitHub Actions Variables + env: + GH_TOKEN: ${{ secrets.GHA_CREATE_RELEASE_PAT }} + run: | + gh api \ + --method PATCH \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/paradedb/charts/actions/variables/CHART_VERSION_PATCH \ + -f name='CHART_VERSION_PATCH' \ + -f value='${{ steps.set_versions.outputs.new_chart_version_patch }}' + + - name: Securely Delete the PGP Key and Passphrase + if: always() + run: shred --remove=wipesync /tmp/secring.gpg /tmp/passphrase.txt diff --git a/.github/workflows/paradedb-test-eks.yml b/.github/workflows/paradedb-test-eks.yml new file mode 100644 index 000000000..8148e3f86 --- /dev/null +++ b/.github/workflows/paradedb-test-eks.yml @@ -0,0 +1,118 @@ +# workflows/paradedb-test-eks.yml +# +# ParadeDB Test EKS +# Test the ParadeDB Helm chart against a local AWS EKS cluster via LocalStack. This test workflow is +# specific to the ParadeDB cloudnative-pg/charts fork. + +name: ParadeDB Test EKS + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + paths: + - "charts/paradedb/**" + - ".github/workflows/paradedb-test-eks.yml" + workflow_dispatch: + +concurrency: + group: paradedb-test-eks-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + paradedb-test-eks: + name: Test ParadeDB Helm Chart on AWS EKS via LocalStack + runs-on: ubuntu-22.04 # As of October 2024, the LocalStack GitHub Action is not compatible with Ubuntu 24.04 + if: github.event.pull_request.draft == false + + steps: + - name: Checkout Git Repository + uses: actions/checkout@v4 + + - name: Set up Kubectl + uses: azure/setup-kubectl@v4 + + - name: Set up Helm + uses: azure/setup-helm@v4 + + - name: Start LocalStack + uses: LocalStack/setup-localstack@v0.2.3 + with: + image-tag: "latest" + install-awslocal: "true" + configuration: DEBUG=1 + use-pro: "true" + env: + LOCALSTACK_AUTH_TOKEN: ${{ secrets.LOCALSTACK_AUTH_TOKEN }} + + - name: Configure AWS CLI for LocalStack + run: | + awslocal configure set aws_secret_access_key test + awslocal configure set aws_access_key_id test + awslocal configure set region us-east-1 + + # As of writing, the latest Kubernetes version available on LocalStack EKS + # is 1.29. CloudNativePG requires version 1.25+ + - name: Create the LocalStack AWS EKS Cluster + run: | + awslocal --endpoint-url=http://localhost:4566 eks create-cluster \ + --name paradedb-eks \ + --role-arn arn:aws:iam::000000000000:role/eks-service-role \ + --resources-vpc-config subnetIds=subnet-12345 \ + --kubernetes-version 1.29 + + - name: Wait for LocalStack AWS EKS Cluster to be Active + run: | + for i in {1..10}; do + STATUS=$(awslocal --endpoint-url=http://localhost:4566 --region us-east-1 eks describe-cluster --name paradedb-eks --query 'cluster.status' --output text) + if [ "$STATUS" == "ACTIVE" ]; then + echo "Cluster is ACTIVE" + break + else + echo "Cluster status is $STATUS. Waiting..." + sleep 10 + fi + done + + - name: Update Kubeconfig to Use the LocalStack AWS EKS Cluster + run: awslocal --endpoint-url=http://localhost:4566 eks update-kubeconfig --name paradedb-eks + + - name: Wait for the LocalStack AWS EKS Cluster to be Ready + run: | + nodes=$(kubectl get nodes --no-headers -o custom-columns=NAME:.metadata.name) + for node in $nodes; do + kubectl wait --for=condition=ready node/$node --timeout=120s + done + + - name: Install the CloudNativePG Operator + run: | + helm repo add cnpg https://cloudnative-pg.github.io/charts + helm upgrade --install cnpg --namespace cnpg-system --create-namespace cnpg/cloudnative-pg + + - name: Wait for CNPG Webhook Service to be Ready + run: | + kubectl wait --namespace cnpg-system --for=condition=available --timeout=120s deployment/cnpg-cloudnative-pg + kubectl get svc -n cnpg-system cnpg-webhook-service + + - name: Test Helm Dependency Update + working-directory: charts/paradedb/ + run: helm dependency update . --debug + + - name: Fetch the latest ParadeDB release tag + id: paradedb-version + run: | + # Fetch the latest release tag and strip the 'v' prefix + LATEST_TAG=$(curl -s https://api.github.com/repos/paradedb/paradedb/releases/latest | jq -r '.tag_name') + CLEANED_TAG=${LATEST_TAG#v} + echo $CLEANED_TAG + echo "version=$CLEANED_TAG" >> $GITHUB_OUTPUT + + - name: Test Helm Install + working-directory: charts/paradedb/ + run: helm install paradedb . --namespace paradedb --create-namespace --set version.paradedb=${{ steps.paradedb-version.outputs.version }} --debug + + - name: Test Helm Upgrade + working-directory: charts/paradedb/ + run: helm upgrade paradedb . --namespace paradedb --reuse-values --wait --debug + + - name: Test PostgreSQL Connection + run: helm test paradedb --namespace paradedb diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml deleted file mode 100644 index aec68975c..000000000 --- a/.github/workflows/release-pr.yml +++ /dev/null @@ -1,31 +0,0 @@ -## -# Create a PR for a release when a commit is pushed on a release/*-v* branch to support the releases of both the -# operator and cluster charts -name: release-pr - -on: - push: - branches: - - release/*-v* - -permissions: - pull-requests: write - -jobs: - create-pull-request: - runs-on: ubuntu-24.04 - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Create Pull Request - id: create-pr - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - TAG="${GITHUB_REF##*/}" - TITLE="Release ${TAG}" - BODY="Automated PR. Will trigger the ${TAG} release when approved." - LABEL=release - ASSIGNEE=${{ github.actor }} - gh pr create --title "${TITLE}" --body "${BODY}" --label "${LABEL}" --assignee "${ASSIGNEE}" || - gh pr edit --title "${TITLE}" --body "${BODY}" --add-label "${LABEL}" diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml deleted file mode 100644 index 4d9a58568..000000000 --- a/.github/workflows/release-publish.yml +++ /dev/null @@ -1,88 +0,0 @@ -name: release-publish - -on: - push: - branches: - - main - -permissions: - contents: write # Required for pushing the Helm charts to the gh-pages branch - packages: write # Required for GHCR access - id-token: write # Required for signing - -jobs: - release: - runs-on: ubuntu-24.04 - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - fetch-depth: 0 # important for fetching all history to run comparison against - - - name: Fetch history - run: git fetch --prune - - - name: Configure Git - run: | - git config user.name "$GITHUB_ACTOR" - git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - - - name: Import PGP Private Key - run: | - echo "${{ secrets.PGP_PRIVATE_KEY }}" | gpg --dearmor --output /tmp/keyring.gpg - echo "${{ secrets.PGP_KEY_PASSPHRASE }}" > /tmp/passphrase-file.txt - - - name: Set up Helm - uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0 - with: - version: v3.16.2 - - - name: Add chart dependencies - run: | - helm repo add cnpg-grafana-dashboard https://cloudnative-pg.github.io/grafana-dashboards - - - name: Run chart-releaser - uses: helm/chart-releaser-action@a917fd15b20e8b64b94d9158ad54cd6345335584 # v1.6.0 - env: - CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - CR_KEY: helm-charts+no-reply@cloudnative-pg.io - CR_KEYRING: /tmp/keyring.gpg - CR_PASSPHRASE_FILE: /tmp/passphrase-file.txt - CR_SIGN: true - CR_SKIP_EXISTING: true - CR_GENERATE_RELEASE_NOTES: true - CR_RELEASE_NAME_TEMPLATE: "{{ .Name }}-v{{ .Version }}" - - - name: Securely delete the PGP key and passphrase - if: always() - run: shred --remove=wipesync /tmp/keyring.gpg /tmp/passphrase-file.txt - - - name: Login to GitHub Container Registry - uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Install sigstore/cosign - uses: sigstore/cosign-installer@dc72c7d5c4d10cd6bcb8cf6e3fd625a9e5e537da # v3.7.0 - - - name: Push charts to GHCR - env: - COSIGN_EXPERIMENTAL: 1 - # when filling gaps with previously released charts, cr would create - # nothing in .cr-release-packages/, and the original globbing character - # would be preserved, causing a non-zero exit. Set nullglob to fix this - run: | - shopt -s nullglob - for pkg in .cr-release-packages/*.tgz; do - if [ -z "${pkg:-}" ]; then - break - fi - helm push "${pkg}" oci://ghcr.io/"${GITHUB_REPOSITORY}" - file=${pkg##*/} - name=${file%-*} - version=${file%.*} - version=${version##*-} - cosign sign --yes ghcr.io/"${GITHUB_REPOSITORY}"/"${name}":"${version}" - done diff --git a/.github/workflows/tests-cluster-chainsaw.yaml b/.github/workflows/tests-cluster-chainsaw.yaml index 2d619b24a..d7f9e6bdb 100644 --- a/.github/workflows/tests-cluster-chainsaw.yaml +++ b/.github/workflows/tests-cluster-chainsaw.yaml @@ -6,8 +6,8 @@ on: - 'gh-pages' jobs: - test-cluster-standalone: - runs-on: ubuntu-24.04 + test-cluster-chainsaw: + runs-on: depot-ubuntu-latest-8 steps: - name: Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -17,6 +17,29 @@ jobs: - name: Install Cosign uses: sigstore/cosign-installer@dc72c7d5c4d10cd6bcb8cf6e3fd625a9e5e537da # v3.7.0 + # Added by ParadeDB: Authenticate to Docker Hub to avoid rate limits + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN }} + + # Added by ParadeDB: Always pull the latest version of paradedb/paradedb + - name: Set ParadeDB Version to Latest + working-directory: charts/paradedb/ + env: + GH_TOKEN: ${{ secrets.GHA_CREATE_RELEASE_PAT }} + run: | + LATEST_TAG=$(curl -s https://api.github.com/repos/paradedb/paradedb/tags | jq -r '.[0].name') + APP_VERSION=${LATEST_TAG#v} + sed -i "s/^[[:space:]]*paradedb: .*/ paradedb: \"$APP_VERSION\"/" values.yaml + sed -i "s/^version: .*/version: ${{ vars.CHART_VERSION_MAJOR }}.${{ vars.CHART_VERSION_MINOR }}.${{ vars.CHART_VERSION_PATCH }}/" Chart.yaml + echo "values.yaml:" + cat values.yaml + echo "----------------------------------------" + echo "Chart.yaml:" + cat Chart.yaml + - name: Setup kind uses: ./.github/actions/setup-kind diff --git a/.github/workflows/tests-operator.yml b/.github/workflows/tests-operator.yml deleted file mode 100644 index ec7052ed5..000000000 --- a/.github/workflows/tests-operator.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: tests-operator - -on: - pull_request: - branches-ignore: - - 'gh-pages' - -jobs: - deploy_operator: - runs-on: ubuntu-24.04 - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - fetch-depth: 0 - - - name: Setup kind - uses: ./.github/actions/setup-kind - - - name: Deploy the operator - uses: ./.github/actions/deploy-operator - - - name: Deploy a cluster - run: | - cat < + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/Makefile b/Makefile index ac2030a88..bacdce965 100644 --- a/Makefile +++ b/Makefile @@ -12,12 +12,8 @@ docs: ## Generate charts' docs using helm-docs (echo "Please, install https://github.com/norwoodj/helm-docs first" && exit 1) .PHONY: schema -schema: cloudnative-pg-schema cluster-schema ## Generate charts' schema using helm-schema-gen - -cloudnative-pg-schema: - @helm schema-gen charts/cloudnative-pg/values.yaml | cat > charts/cloudnative-pg/values.schema.json || \ - (echo "Please, run: helm plugin install https://github.com/karuppiah7890/helm-schema-gen.git" && exit 1) +schema: cluster-schema ## Generate charts' schema using helm-schema-gen cluster-schema: - @helm schema-gen charts/cluster/values.yaml | cat > charts/cluster/values.schema.json || \ + @helm schema-gen charts/paradedb/values.yaml | cat > charts/paradedb/values.schema.json || \ (echo "Please, run: helm plugin install https://github.com/karuppiah7890/helm-schema-gen.git" && exit 1) diff --git a/README.md b/README.md index dda335965..ceef6acf0 100644 --- a/README.md +++ b/README.md @@ -1,53 +1,104 @@ -# CloudNativePG Helm Charts +

+ ParadeDB +
+

-[![Stack Overflow](https://img.shields.io/badge/stackoverflow-cloudnative--pg-blue?logo=stackoverflow&logoColor=%23F48024&link=https%3A%2F%2Fstackoverflow.com%2Fquestions%2Ftagged%2Fcloudnative-pg)][stackoverflow] -[![GitHub License](https://img.shields.io/github/license/cloudnative-pg/charts)][license] +

+ Postgres for Search and Analytics
+

+

+ Website • + Docs • + Community • + Blog • + Changelog +

-[![GitHub Release](https://img.shields.io/github/v/release/cloudnative-pg/charts?filter=cloudnative-pg-*)](https://github.com/cloudnative-pg/charts/tree/main/charts/cloudnative-pg) -[![GitHub Release](https://img.shields.io/github/v/release/cloudnative-pg/charts?filter=cluster-*)](https://github.com/cloudnative-pg/charts/tree/main/charts/cluster) +--- +[![Publish Helm Chart](https://github.com/paradedb/charts/actions/workflows/paradedb-publish-chart.yml/badge.svg)](https://github.com/paradedb/charts/actions/workflows/paradedb-publish-chart.yml) +[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/paradedb)](https://artifacthub.io/packages/search?repo=paradedb) +[![Docker Pulls](https://img.shields.io/docker/pulls/paradedb/paradedb)](https://hub.docker.com/r/paradedb/paradedb) +[![License](https://img.shields.io/github/license/paradedb/paradedb?color=blue)](https://github.com/paradedb/paradedb?tab=AGPL-3.0-1-ov-file#readme) +[![Slack URL](https://img.shields.io/badge/Join%20Slack-purple?logo=slack&link=https%3A%2F%2Fjoin.slack.com%2Ft%2Fparadedbcommunity%2Fshared_invite%2Fzt-2lkzdsetw-OiIgbyFeiibd1DG~6wFgTQ)](https://join.slack.com/t/paradedbcommunity/shared_invite/zt-2lkzdsetw-OiIgbyFeiibd1DG~6wFgTQ) +[![X URL](https://img.shields.io/twitter/url?url=https%3A%2F%2Ftwitter.com%2Fparadedb&label=Follow%20%40paradedb)](https://x.com/paradedb) -## Operator chart +# ParadeDB Helm Chart -Helm chart to install the -[CloudNativePG operator](https://cloudnative-pg.io), originally created and sponsored by -[EDB](https://www.enterprisedb.com/) to manage PostgreSQL workloads on any supported Kubernetes cluster -running in private, public, or hybrid cloud environments. +The [ParadeDB](https://github.com/paradedb/paradedb) Helm Chart is based on the official [CloudNativePG Helm Chart](https://cloudnative-pg.io/). CloudNativePG is a Kubernetes operator that manages the full lifecycle of a highly available PostgreSQL database cluster with a primary/standby architecture using Postgres streaming replication. + +Kubernetes, and specifically the CloudNativePG operator, is the recommended approach for deploying ParadeDB in production, with high availability. ParadeDB also provides a [Docker image](https://hub.docker.com/r/paradedb/paradedb) and [prebuilt binaries](https://github.com/paradedb/paradedb/releases) for Debian, Ubuntu and Red Hat Enterprise Linux. + +The chart is also available on [ArtifactHub](https://artifacthub.io/packages/helm/paradedb/paradedb). + +## Getting Started + +First, install [Helm](https://helm.sh/docs/intro/install/). The following steps assume you have a Kubernetes cluster running v1.25+. If you are testing locally, we recommend using [Minikube](https://minikube.sigs.k8s.io/docs/start/). + +### Installing the CloudNativePG Operator + +Skip this step if the CNPG operator is already installed in your cluster. -**NOTE**: supports only the latest point release of the CloudNativePG operator. ```console helm repo add cnpg https://cloudnative-pg.github.io/charts helm upgrade --install cnpg \ - --namespace cnpg-system \ - --create-namespace \ - cnpg/cloudnative-pg +--namespace cnpg-system \ +--create-namespace \ +cnpg/cloudnative-pg ``` -Refer to the [Operator Chart documentation](charts/cloudnative-pg/README.md) for advanced configuration and monitoring. +### Setting up a ParadeDB CNPG Cluster -## Cluster chart +Create a `values.yaml` and configure it to your requirements. Here is a basic example: -Helm chart to install a CloudNativePG database cluster. +```yaml +type: paradedb +mode: standalone -```console -helm repo add cnpg https://cloudnative-pg.github.io/charts -helm upgrade --install database \ - --namespace database \ - --create-namespace \ - cnpg/cluster +cluster: + instances: 2 + storage: + size: 256Mi +``` + +Then, launch the ParadeDB cluster. + +```bash +helm repo add paradedb https://paradedb.github.io/charts +helm upgrade --install paradedb \ +--namespace paradedb-database \ +--create-namespace \ +--values values.yaml \ +paradedb/paradedb +``` + +If `--values values.yaml` is omitted, the default values will be used. For additional configuration options for the `values.yaml` file, please refer to the [ParadeDB Helm Chart documentation](https://artifacthub.io/packages/helm/paradedb/paradedb#values). For advanced cluster configuration options, please refer to the [CloudNativePG Cluster Chart documentation](charts/paradedb/README.md). + +A more detailed guide on launching the cluster can be found in the [Getting Started docs](<./charts/paradedb/docs/Getting Started.md>). To get started with ParadeDB, we suggest you follow the [quickstart guide](/documentation/getting-started/quickstart). + +### Connecting to a ParadeDB CNPG Cluster + +The command to connect to the primary instance of the cluster will be printed in your terminal. If you do not modify any settings, it will be: + +```bash +kubectl --namespace paradedb-database exec --stdin --tty services/paradedb-rw -- bash ``` -Refer to the [Cluster Chart documentation](charts/cluster/README.md) for advanced configuration options. +This will launch a shell inside the instance. You can connect via `psql` with: -## Contributing +```bash +psql -d paradedb +``` -Please read the [code of conduct](CODE-OF-CONDUCT.md) and the -[guidelines](CONTRIBUTING.md) to contribute to the project. +## Development -## Copyright +To test changes to the Chart on a local Minikube cluster, follow the instructions from [Getting Started](#getting-started), replacing the `helm upgrade` step by the path to the directory of the modified `Chart.yaml`. + +```bash +helm upgrade --install paradedb --namespace paradedb-database --create-namespace ./charts/paradedb +``` -Helm charts for CloudNativePG are distributed under [Apache License 2.0](LICENSE). +## License -[stackoverflow]: https://stackoverflow.com/questions/tagged/cloudnative-pg -[license]: https://github.com/cloudnative-pg/charts?tab=Apache-2.0-1-ov-file +ParadeDB is licensed under the [GNU Affero General Public License v3.0](LICENSE) and as commercial software. For commercial licensing, please contact us at [sales@paradedb.com](mailto:sales@paradedb.com). diff --git a/RELEASE.md b/RELEASE.md deleted file mode 100644 index 118f2ac8c..000000000 --- a/RELEASE.md +++ /dev/null @@ -1,150 +0,0 @@ -Release Process -=============== - -This repo contains two helm charts: [cloudnative-pg](./charts/cloudnative-pg) -and [cluster](./charts/cluster). Both the charts are available -through a single [repository](https://cloudnative-pg.github.io/charts), but -should be released separately as their versioning might be unlinked, and the -latter depends on the former. - -**IMPORTANT** we should run the below procedures against the latest point -release of the CloudNativePG operator. I.e. even if we have several release -branches in CNPG, we will only target the most advanced point -release (e.g. 1.17.1) - -## Charts - -1. [Releasing the `cloudnative-pg` chart](#releasing-the-cloudnative-pg-chart) -2. [Releasing `cluster` chart](#releasing-the-cluster-chart) - -## Releasing the `cloudnative-pg` chart - -In order to create a new release of the `cloudnative-pg` chart, follow these steps: - -1. Take note of the current value of the release: see `.version` in `charts/cloudnative-pg/Chart.yaml` - ```bash - yq -r '.version' charts/cloudnative-pg/Chart.yaml - ``` -2. Decide which version to create, depending on the kind of jump of the CloudNativePG release, following semver - semantics. For this document, let's call it `X.Y.Z` - ```bash - NEW_VERSION="X.Y.Z" - ``` -3. Create a branch named `release/cloudnative-pg-vX.Y.Z` and switch to it: - ```bash - git switch --create release/cloudnative-pg-v$NEW_VERSION - ``` -4. Update the `.version` in the [Chart.yaml](./charts/cloudnative-pg/Chart.yaml) file to `"X.Y.Z"` - ```bash - sed -i -E "s/^version: \"([0-9]+.?)+\"/version: \"$NEW_VERSION\"/" charts/cloudnative-pg/Chart.yaml - ``` -5. Update everything else as required, e.g. if releasing due to a new `cloudnative-pg` version being released, you might - want to update the following: - 1. `.appVersion` in the [Chart.yaml](./charts/cloudnative-pg/Chart.yaml) file - 2. [crds.yaml](./charts/cloudnative-pg/templates/crds/crds.yaml), which can be built using - [kustomize](https://kustomize.io/) from the `cloudnative-pg` repo using kustomize - [remoteBuild](https://github.com/kubernetes-sigs/kustomize/blob/master/examples/remoteBuild.md) - running: - ```bash - VERSION=v1.16.0 - kustomize build https://github.com/cloudnative-pg/cloudnative-pg/tree/release-1.16/config/helm/\?ref=v1.16.0 - ``` - It might be easier to run `kustomize build config/helm` from the `cloudnative-pg` repo, with the desired release - branch checked out, and copy the result to `./charts/cloudnative-pg/templates/crds/crds.yaml`. - 3. NOTE: please keep the guards for `.Values.crds.create`, i.e. - `{{- if .Values.crds.create }}` and `{{- end }}` after you copy the CRD into `templates/crds/crds.yaml`. - 4. To update the files in the [templates](./charts/cloudnative-pg/templates) directory, you can diff the previous - CNPG release yaml against the new one, to find what should be updated (e.g. - ```bash - OLD_VERSION=1.15.0 - NEW_VERSION=1.15.1 - vimdiff \ - "https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-${OLD_VERSION}.yaml" \ - "https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-${NEW_VERSION}.yaml" - ``` - Or from the `cloudnative-pg` repo, with the desired release branch checked out: - ```bash - vimdiff releases/cnpg-1.15.0.yaml releases/cnpg-1.15.1.yaml - ``` - 5. Update [values.yaml](./charts/cloudnative-pg/values.yaml) if needed - 6. NOTE: updating `values.yaml` just for the CNPG version may not be necessary, as the value should default to the - `appVersion` in `Chart.yaml` -6. Run `make docs schema` to regenerate the docs and the values schema in case it is needed - ```bash - make docs schema - ``` -7. Commit and add the relevant information you wish in the commit message. - ```bash - git add . - git commit -S -s -m "Release cloudnative-pg-v$NEW_VERSION" --edit - ``` -8. Push the new branch - ```bash - git push --set-upstream origin release/cloudnative-pg-v$NEW_VERSION - ``` -9. A PR named `Release cloudnative-pg-vX.Y.Z` should be automatically created -10. Wait for all the checks to pass -11. Two approvals are required in order to merge the PR, if you are a maintainer approve the PR yourself and ask for - another approval, otherwise ask for two approvals directly. -12. Merge the PR squashing all commits and **taking care to keep the commit message to be - `Release cloudnative-pg-vX.Y.Z`** -13. A release `cloudnative-pg-vX.Y.Z` should be automatically created by an action, which will then trigger the release - action. Verify they both are successful. -14. Once done you should be able to run: - ```bash - helm repo add cnpg https://cloudnative-pg.github.io/charts - helm repo update - helm search repo cnpg - ``` - and be able to see the new version `X.Y.Z` as `CHART VERSION` for `cloudnative-pg` - -## Releasing the `cluster` chart - -In order to create a new release of the `cluster` chart, follow these steps: - -1. Take note of the current value of the release: see `.version` in `charts/cluster/Chart.yaml` - ```bash - yq -r '.version' charts/cluster/Chart.yaml - ``` -2. Decide which version to create, depending on the kind of changes and backwards compatibility, following semver - semantics. For this document, let's call it `X.Y.Z` - ```bash - NEW_VERSION="X.Y.Z" - ``` -3. Create a branch: named `release/cluster-vX.Y.Z` and switch to it - ```bash - git switch --create release/cluster-v$NEW_VERSION - ``` -4. Update the `.version` in the [Chart.yaml](./charts/cluster/Chart.yaml) file to `"X.Y.Z"` - ```bash - sed -i -E "s/^version: ([0-9]+.?)+/version: $NEW_VERSION/" charts/cluster/Chart.yaml - ``` -5. Run `make docs schema` to regenerate the docs and the values schema in case it is needed - ```bash - make docs schema - ``` -6. Commit and add the relevant information you wish in the commit message. - ```bash - git add . - git commit -S -s -m "Release cluster-v$NEW_VERSION" --edit - ``` -7. Push the new branch - ```bash - git push --set-upstream origin release/cluster-v$NEW_VERSION - ``` -8. A PR should be automatically created -9. Wait for all the checks to pass -10. Two approvals are required in order to merge the PR, if you are a - maintainer approve the PR yourself and ask for another approval, otherwise - ask for two approvals directly. -11. Merge the PR squashing all commits and **taking care to keep the commit - message to be `Release cluster-vX.Y.Z`** -12. A release `cluster-vX.Y.Z` should be automatically created by an action, which will ten trigger the release action. - Verify they both are successful. -13. Once done you should be able to run: - ```bash - helm repo add cnpg https://cloudnative-pg.github.io/charts - helm repo update - helm search repo cnpg - ``` - and be able to see the new version `X.Y.Z` as `CHART VERSION` for `cluster` diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..e454bef28 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,21 @@ +# Security Policy + +## Supported Versions + +We release patches for security vulnerabilities on a regular cadence. Which versions +are eligible for receiving such patches can be found below: + +| Version | Supported | +| ------- | ------------------ | +| latest | :white_check_mark: | + +## Reporting a Vulnerability + +Please do NOT raise a GitHub Issue to report a security vulnerability. Please report +(suspected) security vulnerabilities to **[security@paradedb.com](mailto:security@paradedb.com)**, +preferably with a proof of concept. You will receive a response from us within 24 +hours. If the issue is confirmed, we will release a patch as quickly as +possible depending on complexity but historically within a few days. + +Non-vulnerability-related security issues such as new ideas for security features +are welcome on GitHub Issues. diff --git a/artifacthub-repo.yml b/artifacthub-repo.yml new file mode 100644 index 000000000..34c2c2346 --- /dev/null +++ b/artifacthub-repo.yml @@ -0,0 +1,15 @@ +# Artifact Hub repository metadata file +# +# Some settings like the verified publisher flag or the ignored packages won't +# be applied until the next time the repository is processed. Please keep in +# mind that the repository won't be processed if it has not changed since the +# last time it was processed. Depending on the repository kind, this is checked +# in a different way. For Helm http based repositories, we consider it has +# changed if the `index.yaml` file changes. For git based repositories, it does +# when the hash of the last commit in the branch you set up changes. This does +# NOT apply to ownership claim operations, which are processed immediately. +# +repositoryID: d7b5cc3f-1710-47b5-af0f-14855f44f77d +owners: + - name: ParadeDB Support + email: support@paradedb.com diff --git a/charts/cloudnative-pg/Chart.lock b/charts/cloudnative-pg/Chart.lock deleted file mode 100644 index 610070fb7..000000000 --- a/charts/cloudnative-pg/Chart.lock +++ /dev/null @@ -1,6 +0,0 @@ -dependencies: -- name: cluster - repository: https://cloudnative-pg.github.io/grafana-dashboards - version: 0.0.2 -digest: sha256:fcf16ad357c17be3dd79c138723e78e9e101fecc5d07d9371299c32b9f85dbd9 -generated: "2024-04-25T12:32:36.61779032-04:00" diff --git a/charts/cloudnative-pg/LICENSE b/charts/cloudnative-pg/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/charts/cloudnative-pg/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/charts/cloudnative-pg/monitoring/grafana-dashboard.json b/charts/cloudnative-pg/monitoring/grafana-dashboard.json deleted file mode 100644 index 8c4813056..000000000 --- a/charts/cloudnative-pg/monitoring/grafana-dashboard.json +++ /dev/null @@ -1,3 +0,0 @@ -The JSON file has been moved to a dedicated repository for CloudNativePG dashboards located at: - -https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json diff --git a/charts/cloudnative-pg/templates/NOTES.txt b/charts/cloudnative-pg/templates/NOTES.txt deleted file mode 100644 index 0f79fe0dc..000000000 --- a/charts/cloudnative-pg/templates/NOTES.txt +++ /dev/null @@ -1,18 +0,0 @@ - -CloudNativePG operator should be installed in namespace "{{ .Release.Namespace }}". -You can now create a PostgreSQL cluster with 3 nodes in the current namespace as follows: - -cat <=17.0.0" - name: pg_stat_bgwriter - query: | - SELECT buffers_clean - , maxwritten_clean - , buffers_alloc - , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time - FROM pg_catalog.pg_stat_bgwriter - metrics: - - buffers_clean: - usage: "COUNTER" - description: "Number of buffers written by the background writer" - - maxwritten_clean: - usage: "COUNTER" - description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" - - buffers_alloc: - usage: "COUNTER" - description: "Number of buffers allocated" - - stats_reset_time: - usage: "GAUGE" - description: "Time at which these statistics were last reset" - - pg_stat_checkpointer: - runonserver: ">=17.0.0" - query: | - SELECT num_timed AS checkpoints_timed - , num_requested AS checkpoints_req - , restartpoints_timed - , restartpoints_req - , restartpoints_done - , write_time - , sync_time - , buffers_written - , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time - FROM pg_catalog.pg_stat_checkpointer - metrics: - - checkpoints_timed: - usage: "COUNTER" - description: "Number of scheduled checkpoints that have been performed" - - checkpoints_req: - usage: "COUNTER" - description: "Number of requested checkpoints that have been performed" - - restartpoints_timed: - usage: "COUNTER" - description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" - - restartpoints_req: - usage: "COUNTER" - description: "Number of requested restartpoints that have been performed" - - restartpoints_done: - usage: "COUNTER" - description: "Number of restartpoints that have been performed" - - write_time: - usage: "COUNTER" - description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" - - sync_time: - usage: "COUNTER" - description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" - - buffers_written: - usage: "COUNTER" - description: "Number of buffers written during checkpoints and restartpoints" - - stats_reset_time: - usage: "GAUGE" - description: "Time at which these statistics were last reset" - - pg_stat_database: - query: | - SELECT datname - , xact_commit - , xact_rollback - , blks_read - , blks_hit - , tup_returned - , tup_fetched - , tup_inserted - , tup_updated - , tup_deleted - , conflicts - , temp_files - , temp_bytes - , deadlocks - , blk_read_time - , blk_write_time - FROM pg_catalog.pg_stat_database - metrics: - - datname: - usage: "LABEL" - description: "Name of this database" - - xact_commit: - usage: "COUNTER" - description: "Number of transactions in this database that have been committed" - - xact_rollback: - usage: "COUNTER" - description: "Number of transactions in this database that have been rolled back" - - blks_read: - usage: "COUNTER" - description: "Number of disk blocks read in this database" - - blks_hit: - usage: "COUNTER" - description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" - - tup_returned: - usage: "COUNTER" - description: "Number of rows returned by queries in this database" - - tup_fetched: - usage: "COUNTER" - description: "Number of rows fetched by queries in this database" - - tup_inserted: - usage: "COUNTER" - description: "Number of rows inserted by queries in this database" - - tup_updated: - usage: "COUNTER" - description: "Number of rows updated by queries in this database" - - tup_deleted: - usage: "COUNTER" - description: "Number of rows deleted by queries in this database" - - conflicts: - usage: "COUNTER" - description: "Number of queries canceled due to conflicts with recovery in this database" - - temp_files: - usage: "COUNTER" - description: "Number of temporary files created by queries in this database" - - temp_bytes: - usage: "COUNTER" - description: "Total amount of data written to temporary files by queries in this database" - - deadlocks: - usage: "COUNTER" - description: "Number of deadlocks detected in this database" - - blk_read_time: - usage: "COUNTER" - description: "Time spent reading data file blocks by backends in this database, in milliseconds" - - blk_write_time: - usage: "COUNTER" - description: "Time spent writing data file blocks by backends in this database, in milliseconds" - - pg_stat_replication: - primary: true - query: | - SELECT usename - , COALESCE(application_name, '') AS application_name - , COALESCE(client_addr::text, '') AS client_addr - , COALESCE(client_port::text, '') AS client_port - , EXTRACT(EPOCH FROM backend_start) AS backend_start - , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age - , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes - , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes - , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes - , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes - , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds - , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds - , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds - FROM pg_catalog.pg_stat_replication - metrics: - - usename: - usage: "LABEL" - description: "Name of the replication user" - - application_name: - usage: "LABEL" - description: "Name of the application" - - client_addr: - usage: "LABEL" - description: "Client IP address" - - client_port: - usage: "LABEL" - description: "Client TCP port" - - backend_start: - usage: "COUNTER" - description: "Time when this process was started" - - backend_xmin_age: - usage: "COUNTER" - description: "The age of this standby's xmin horizon" - - sent_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location sent on this connection" - - write_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" - - flush_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" - - replay_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" - - write_lag_seconds: - usage: "GAUGE" - description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" - - flush_lag_seconds: - usage: "GAUGE" - description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" - - replay_lag_seconds: - usage: "GAUGE" - description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" - - pg_settings: - query: | - SELECT name, - CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting - FROM pg_catalog.pg_settings - WHERE vartype IN ('integer', 'real', 'bool') - ORDER BY 1 - metrics: - - name: - usage: "LABEL" - description: "Name of the setting" - - setting: - usage: "GAUGE" - description: "Setting value" diff --git a/charts/cluster/.helmignore b/charts/cluster/.helmignore deleted file mode 100644 index 0e8a0eb36..000000000 --- a/charts/cluster/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/charts/cluster/examples/timescaledb.yaml b/charts/cluster/examples/timescaledb.yaml deleted file mode 100644 index 328b6c1eb..000000000 --- a/charts/cluster/examples/timescaledb.yaml +++ /dev/null @@ -1,9 +0,0 @@ -type: timescaledb -mode: standalone -version: - postgresql: "15.7" - timescaledb: "2.15" -cluster: - instances: 1 -backups: - enabled: false diff --git a/charts/cluster/.gitignore b/charts/paradedb/.gitignore similarity index 100% rename from charts/cluster/.gitignore rename to charts/paradedb/.gitignore diff --git a/charts/cloudnative-pg/.helmignore b/charts/paradedb/.helmignore similarity index 100% rename from charts/cloudnative-pg/.helmignore rename to charts/paradedb/.helmignore diff --git a/charts/cluster/Chart.yaml b/charts/paradedb/Chart.yaml similarity index 56% rename from charts/cluster/Chart.yaml rename to charts/paradedb/Chart.yaml index cb3aff83b..8fc821776 100644 --- a/charts/cluster/Chart.yaml +++ b/charts/paradedb/Chart.yaml @@ -14,18 +14,26 @@ # limitations under the License. # apiVersion: v2 -name: cluster -description: Deploys and manages a CloudNativePG cluster and its associated resources. -icon: https://raw.githubusercontent.com/cloudnative-pg/artwork/main/cloudnativepg-logo.svg +name: paradedb +description: Deploys and manages a ParadeDB CloudNativePG cluster and its associated resources. +icon: https://raw.githubusercontent.com/paradedb/paradedb/main/docs/logo/light.svg type: application -version: 0.1.0 + +# The Chart version, set in the publish CI workflow from GitHub Actions Variables +# We default to v0.10.3 for testing and local development +version: 0.10.3 + sources: - - https://github.com/cloudnative-pg/charts + - https://github.com/paradedb/charts keywords: + - paradedb + - pg_search + - pg_analytics - postgresql - postgres - database -home: https://cloudnative-pg.io +home: https://paradedb.com maintainers: - - name: itay-grudev - email: itay+cloudnativepg-charts+github.com@grudev.com + - name: ParadeDB + email: support@paradedb.com + url: https://paradedb.com diff --git a/charts/cluster/README.md b/charts/paradedb/README.md similarity index 61% rename from charts/cluster/README.md rename to charts/paradedb/README.md index 99cc8c378..505a4c2b5 100644 --- a/charts/cluster/README.md +++ b/charts/paradedb/README.md @@ -1,35 +1,17 @@ -# cluster +# ParadeDB CloudNativePG Cluster -![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) +The [ParadeDB](https://github.com/paradedb/paradedb) Helm Chart is based on the official [CloudNativePG Helm Chart](https://cloudnative-pg.io/). CloudNativePG is a Kubernetes operator that manages the full lifecycle of a highly available PostgreSQL database cluster with a primary/standby architecture using Postgres streaming replication. -> **Warning** -> ### This chart is under active development. -> ### Advised caution when using in production! +Kubernetes, and specifically the CloudNativePG operator, is the recommended approach for deploying ParadeDB in production, with high availability. ParadeDB also provides a [Docker image](https://hub.docker.com/r/paradedb/paradedb) and [prebuilt binaries](https://github.com/paradedb/paradedb/releases) for Debian, Ubuntu and Red Hat Enterprise Linux. -A note on the chart's purpose ------------------------------ +The chart is also available on [ArtifactHub](https://artifacthub.io/packages/helm/paradedb/paradedb). -This is an opinionated chart that is designed to provide a subset of simple, stable and safe configurations using the -CloudNativePG operator. It is designed to provide a simple way to perform recovery operations to decrease your RTO. +## Getting Started -It is not designed to be a one size fits all solution. If you need a more complicated setup we strongly recommend that -you either: +First, install [Helm](https://helm.sh/docs/intro/install/). The following steps assume you have a Kubernetes cluster running v1.25+. If you are testing locally, we recommend using [Minikube](https://minikube.sigs.k8s.io/docs/start/). -* use the operator directly -* create your own chart -* use Kustomize to modify the chart's resources +### Installing the CloudNativePG Operator -**_Note_** that the latter option carries it's own risks as the chart configuration may change, especially before it -reaches a stable release. - -That being said, we welcome PRs that improve the chart, but please keep in mind that we don't plan to support every -single configuration that the operator provides and we may reject PRs that add too much complexity and maintenance -difficulty to the chart. - -Getting Started ---------------- - -### Installing the Operator Skip this step if the CNPG operator is already installed in your cluster. ```console @@ -40,29 +22,63 @@ helm upgrade --install cnpg \ cnpg/cloudnative-pg ``` -### Setting up a CNPG Cluster +### Setting up a ParadeDB CNPG Cluster -```console -helm repo add cnpg https://cloudnative-pg.github.io/charts -helm upgrade --install cnpg \ ---namespace cnpg-database \ +Create a `values.yaml` and configure it to your requirements. Here is a basic example: + +```yaml +type: paradedb +mode: standalone + +cluster: + instances: 2 + storage: + size: 256Mi +``` + +Then, launch the ParadeDB cluster. + +```bash +helm repo add paradedb https://paradedb.github.io/charts +helm upgrade --install paradedb \ +--namespace paradedb-database \ --create-namespace \ --values values.yaml \ -cnpg/cluster +paradedb/paradedb +``` + +If `--values values.yaml` is omitted, the default values will be used. For additional configuration options for the `values.yaml` file, please refer to the [ParadeDB Helm Chart documentation](https://artifacthub.io/packages/helm/paradedb/paradedb#values). For advanced cluster configuration options, please refer to the [CloudNativePG Cluster Chart documentation](charts/paradedb/README.md). + +A more detailed guide on launching the cluster can be found in the [Getting Started docs](<./docs/Getting Started.md>). To get started with ParadeDB, we suggest you follow the [quickstart guide](/documentation/getting-started/quickstart). + +### Connecting to a ParadeDB CNPG Cluster + +The command to connect to the primary instance of the cluster will be printed in your terminal. If you do not modify any settings, it will be: + +```bash +kubectl --namespace paradedb-database exec --stdin --tty services/paradedb-rw -- bash +``` + +This will launch a shell inside the instance. You can connect via `psql` with: + +```bash +psql -d paradedb ``` -A more detailed guide can be found in the [Getting Started docs](<./docs/Getting Started.md>). +## Development + +To test changes to the Chart on a local Minikube cluster, follow the instructions from [Getting Started](#getting-started), replacing the `helm upgrade` step by the path to the directory of the modified `Chart.yaml`. + +```bash +helm upgrade --install paradedb --namespace paradedb-database --create-namespace ./charts/paradedb +``` Cluster Configuration --------------------- ### Database types -Currently the chart supports two database types. These are configured via the `type` parameter. These are: -* `postgresql` - A standard PostgreSQL database. -* `postgis` - A PostgreSQL database with the PostGIS extension installed. - -Depending on the type the chart will use a different Docker image and fill in some initial setup, like extension installation. +To use the ParadeDB Helm Chart, specify `paradedb` via the `type` parameter. ### Modes of operation @@ -157,7 +173,7 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | cluster.imageName | string | `""` | Name of the container image, supporting both tags (:) and digests for deterministic and repeatable deployments: :@sha256: | | cluster.imagePullPolicy | string | `"IfNotPresent"` | Image pull policy. One of Always, Never or IfNotPresent. If not defined, it defaults to IfNotPresent. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images | | cluster.imagePullSecrets | list | `[]` | The list of pull secrets to be used to pull the images. See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-LocalObjectReference | -| cluster.initdb | object | `{}` | BootstrapInitDB is the configuration of the bootstrap process when initdb is used. See: https://cloudnative-pg.io/documentation/current/bootstrap/ See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb | +| cluster.initdb | object | `{"database":"paradedb"}` | BootstrapInitDB is the configuration of the bootstrap process when initdb is used. See: https://cloudnative-pg.io/documentation/current/bootstrap/ See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb | | cluster.instances | int | `3` | Number of instances | | cluster.logLevel | string | `"info"` | The instances' log level, one of the following values: error, warning, info (default), debug, trace | | cluster.monitoring.customQueries | list | `[]` | Custom Prometheus metrics Will be stored in the ConfigMap | @@ -211,10 +227,10 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | recovery.google.gkeEnvironment | bool | `false` | | | recovery.google.path | string | `"/"` | | | recovery.method | string | `"backup"` | Available recovery methods: * `backup` - Recovers a CNPG cluster from a CNPG backup (PITR supported) Needs to be on the same cluster in the same namespace. * `object_store` - Recovers a CNPG cluster from a barman object store (PITR supported). * `pg_basebackup` - Recovers a CNPG cluster viaa streaming replication protocol. Useful if you want to migrate databases to CloudNativePG, even from outside Kubernetes. # TODO | -| recovery.pgBaseBackup.database | string | `"app"` | Name of the database used by the application. Default: `app`. | +| recovery.pgBaseBackup.database | string | `"paradedb"` | Name of the database used by the application. Default: `paradedb`. | | recovery.pgBaseBackup.owner | string | `""` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | | recovery.pgBaseBackup.secret | string | `""` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. | -| recovery.pgBaseBackup.source.database | string | `"app"` | | +| recovery.pgBaseBackup.source.database | string | `"paradedb"` | | | recovery.pgBaseBackup.source.host | string | `""` | | | recovery.pgBaseBackup.source.passwordSecret.create | bool | `false` | Whether to create a secret for the password | | recovery.pgBaseBackup.source.passwordSecret.key | string | `"password"` | The key in the secret containing the password | @@ -238,40 +254,28 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | recovery.s3.secretKey | string | `""` | | | recovery.secret.create | bool | `true` | Whether to create a secret for the backup credentials | | recovery.secret.name | string | `""` | Name of the backup credentials secret | -| type | string | `"postgresql"` | Type of the CNPG database. Available types: * `postgresql` * `postgis` * `timescaledb` | -| version.postgis | string | `"3.4"` | If using PostGIS, specify the version | +| type | string | `"paradedb"` | Type of the CNPG database. Available types: * `paradedb` | +| version.paradedb | string | `"0.10.3"` | We default to v0.10.3 for testing and local development | | version.postgresql | string | `"16"` | PostgreSQL major version to use | -| version.timescaledb | string | `"2.15"` | If using TimescaleDB, specify the version | -| poolers[].name | string | `` | Name of the pooler resource | -| poolers[].instances | number | `1` | The number of replicas we want | -| poolers[].type | [PoolerType][PoolerType] | `rw` | Type of service to forward traffic to. Default: `rw`. | -| poolers[].poolMode | [PgBouncerPoolMode][PgBouncerPoolMode] | `session` | The pool mode. Default: `session`. | -| poolers[].authQuerySecret | [LocalObjectReference][LocalObjectReference] | `{}` | The credentials of the user that need to be used for the authentication query. | -| poolers[].authQuery | string | `{}` | The credentials of the user that need to be used for the authentication query. | -| poolers[].parameters | map[string]string | `{}` | Additional parameters to be passed to PgBouncer - please check the CNPG documentation for a list of options you can configure | -| poolers[].template | [PodTemplateSpec][PodTemplateSpec] | `{}` | The template of the Pod to be created | -| poolers[].template | [ServiceTemplateSpec][ServiceTemplateSpec] | `{}` | Template for the Service to be created | -| poolers[].pg_hba | []string | `{}` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | -| poolers[].monitoring.enabled | bool | `false` | Whether to enable monitoring for the Pooler. | -| poolers[].monitoring.podMonitor.enabled | bool | `true` | Create a podMonitor for the Pooler. | +| poolers[].name | string | `` | Name of the pooler resource | +| poolers[].instances | number | `1` | The number of replicas we want | +| poolers[].type | [PoolerType][PoolerType] | `rw` | Type of service to forward traffic to. Default: `rw`. | +| poolers[].poolMode | [PgBouncerPoolMode][PgBouncerPoolMode] | `session` | The pool mode. Default: `session`. | +| poolers[].authQuerySecret | [LocalObjectReference][LocalObjectReference] | `{}` | The credentials of the user that need to be used for the authentication query. | +| poolers[].authQuery | string | `{}` | The credentials of the user that need to be used for the authentication query. | +| poolers[].parameters | map[string]string | `{}` | Additional parameters to be passed to PgBouncer - please check the CNPG documentation for a list of options you can configure | +| poolers[].template | [PodTemplateSpec][PodTemplateSpec] | `{}` | The template of the Pod to be created | +| poolers[].template | [ServiceTemplateSpec][ServiceTemplateSpec] | `{}` | Template for the Service to be created | +| poolers[].pg_hba | []string | `{}` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | +| poolers[].monitoring.enabled | bool | `false` | Whether to enable monitoring for the Pooler. | +| poolers[].monitoring.podMonitor.enabled | bool | `true` | Create a podMonitor for the Pooler. | ## Maintainers | Name | Email | Url | | ---- | ------ | --- | -| itay-grudev | | | - -Features that require feedback ------------------------------- - -Please raise a ticket tested any of the following features and they have worked. -Alternatively a ticket and a PR if you have found that something needs a change to work properly. - -- [ ] Google Cloud Storage Backups -- [ ] Google Cloud Storage Recovery +| ParadeDB | | | -TODO ----- -* IAM Role for S3 Service Account -* Automatic provisioning of a Alert Manager configuration +## License +ParadeDB is licensed under the [GNU Affero General Public License v3.0](LICENSE) and as commercial software. For commercial licensing, please contact us at [sales@paradedb.com](mailto:sales@paradedb.com). diff --git a/charts/cluster/README.md.gotmpl b/charts/paradedb/README.md.gotmpl similarity index 82% rename from charts/cluster/README.md.gotmpl rename to charts/paradedb/README.md.gotmpl index 1ca7bebaa..879b04577 100644 --- a/charts/cluster/README.md.gotmpl +++ b/charts/paradedb/README.md.gotmpl @@ -1,42 +1,17 @@ -{{ template "chart.header" . }} +# ParadeDB CloudNativePG Cluster {{ template "chart.deprecationWarning" . }} -{{ template "chart.badgesSection" . }} +This README documents the Helm chart for deploying and managing [ParadeDB](https://github.com/paradedb/paradedb) on Kubernetes via [CloudNativePG](https://cloudnative-pg.io/), including advanced settings. +Kubernetes, and specifically the CloudNativePG operator, is the recommended approach for deploying ParadeDB in production. ParadeDB also provides a [Docker image](https://hub.docker.com/r/paradedb/paradedb) and [prebuilt binaries](https://github.com/paradedb/paradedb/releases) for Debian, Ubuntu and Red Hat Enterprise Linux. -> **Warning** -> ### This chart is under active development. -> ### Advised caution when using in production! - - -A note on the chart's purpose ------------------------------ - -This is an opinionated chart that is designed to provide a subset of simple, stable and safe configurations using the -CloudNativePG operator. It is designed to provide a simple way to perform recovery operations to decrease your RTO. - -It is not designed to be a one size fits all solution. If you need a more complicated setup we strongly recommend that -you either: - -* use the operator directly -* create your own chart -* use Kustomize to modify the chart's resources - -**_Note_** that the latter option carries it's own risks as the chart configuration may change, especially before it -reaches a stable release. - -That being said, we welcome PRs that improve the chart, but please keep in mind that we don't plan to support every -single configuration that the operator provides and we may reject PRs that add too much complexity and maintenance -difficulty to the chart. - - -Getting Started ---------------- +## Getting Started ### Installing the Operator + Skip this step if the CNPG operator is already installed in your cluster. ```console @@ -47,30 +22,39 @@ helm upgrade --install cnpg \ cnpg/cloudnative-pg ``` -### Setting up a CNPG Cluster +### Setting up a ParadeDB CNPG Cluster + +Create a `values.yaml` and configure it to your requirements. Here is a basic example: + +```yaml +type: paradedb +mode: standalone + +cluster: + instances: 2 + storage: + size: 256Mi +``` + +You can refer to the other examples in the [`charts/paradedb/examples`](https://github.com/paradedb/charts/tree/main/charts/paradedb/examples) directory. ```console -helm repo add cnpg https://cloudnative-pg.github.io/charts -helm upgrade --install cnpg \ ---namespace cnpg-database \ +helm repo add paradedb https://paradedb.github.io/charts +helm upgrade --install paradedb \ +--namespace paradedb-database \ --create-namespace \ --values values.yaml \ -cnpg/cluster +paradedb/paradedb ``` A more detailed guide can be found in the [Getting Started docs](<./docs/Getting Started.md>). - Cluster Configuration --------------------- ### Database types -Currently the chart supports two database types. These are configured via the `type` parameter. These are: -* `postgresql` - A standard PostgreSQL database. -* `postgis` - A PostgreSQL database with the PostGIS extension installed. - -Depending on the type the chart will use a different Docker image and fill in some initial setup, like extension installation. +To use the ParadeDB Helm Chart, specify `paradedb` via the `type` parameter. ### Modes of operation @@ -139,20 +123,4 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat {{ template "chart.maintainersSection" . }} -Features that require feedback ------------------------------- - -Please raise a ticket tested any of the following features and they have worked. -Alternatively a ticket and a PR if you have found that something needs a change to work properly. - -- [ ] Google Cloud Storage Backups -- [ ] Google Cloud Storage Recovery - - -TODO ----- -* IAM Role for S3 Service Account -* Automatic provisioning of a Alert Manager configuration - - {{ template "helm-docs.versionFooter" . }} diff --git a/charts/cluster/docs/Getting Started.md b/charts/paradedb/docs/Getting Started.md similarity index 96% rename from charts/cluster/docs/Getting Started.md rename to charts/paradedb/docs/Getting Started.md index 54dad419d..67feb8891 100644 --- a/charts/cluster/docs/Getting Started.md +++ b/charts/paradedb/docs/Getting Started.md @@ -1,6 +1,6 @@ # Getting Started -The CNPG cluster chart follows a convention over configuration approach. This means that the chart will create a reasonable +The CNPG cluster chart follows a convention over configuration approach. This means that the chart will create a reasonable CNPG setup with sensible defaults. However, you can override these defaults to create a more customized setup. Note that you still need to configure backups and monitoring separately. The chart will not install a Prometheus stack for you. @@ -23,14 +23,14 @@ helm upgrade --install cnpg \ ## Creating a cluster configuration -Once you have the operator installed, the next step is to prepare the cluster configuration. Whether this will be manged +Once you have the operator installed, the next step is to prepare the cluster configuration. Whether this will be managed via a GitOps solution or directly via Helm is up to you. The following sections outlines the important steps in both cases. ### Choosing the database type Currently the chart supports two database types. These are configured via the `type` parameter. These are: * `postgresql` - A standard PostgreSQL database. -* `postgis` - A PostgreSQL database with the PostGIS extension installed. +* `paradedb` - Postgres for Search and Analytics. Depending on the type the chart will use a different Docker image and fill in some initial setup, like extension installation. diff --git a/charts/cluster/docs/Recovery.md b/charts/paradedb/docs/Recovery.md similarity index 100% rename from charts/cluster/docs/Recovery.md rename to charts/paradedb/docs/Recovery.md diff --git a/charts/cluster/docs/runbooks/CNPGClusterHACritical.md b/charts/paradedb/docs/runbooks/CNPGClusterHACritical.md similarity index 100% rename from charts/cluster/docs/runbooks/CNPGClusterHACritical.md rename to charts/paradedb/docs/runbooks/CNPGClusterHACritical.md diff --git a/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md b/charts/paradedb/docs/runbooks/CNPGClusterHAWarning.md similarity index 100% rename from charts/cluster/docs/runbooks/CNPGClusterHAWarning.md rename to charts/paradedb/docs/runbooks/CNPGClusterHAWarning.md diff --git a/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md b/charts/paradedb/docs/runbooks/CNPGClusterHighConnectionsCritical.md similarity index 100% rename from charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md rename to charts/paradedb/docs/runbooks/CNPGClusterHighConnectionsCritical.md diff --git a/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md b/charts/paradedb/docs/runbooks/CNPGClusterHighConnectionsWarning.md similarity index 100% rename from charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md rename to charts/paradedb/docs/runbooks/CNPGClusterHighConnectionsWarning.md diff --git a/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md b/charts/paradedb/docs/runbooks/CNPGClusterHighReplicationLag.md similarity index 100% rename from charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md rename to charts/paradedb/docs/runbooks/CNPGClusterHighReplicationLag.md diff --git a/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md b/charts/paradedb/docs/runbooks/CNPGClusterInstancesOnSameNode.md similarity index 100% rename from charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md rename to charts/paradedb/docs/runbooks/CNPGClusterInstancesOnSameNode.md diff --git a/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md b/charts/paradedb/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md similarity index 100% rename from charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md rename to charts/paradedb/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md diff --git a/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md b/charts/paradedb/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md similarity index 100% rename from charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md rename to charts/paradedb/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md diff --git a/charts/cluster/docs/runbooks/CNPGClusterOffline.md b/charts/paradedb/docs/runbooks/CNPGClusterOffline.md similarity index 100% rename from charts/cluster/docs/runbooks/CNPGClusterOffline.md rename to charts/paradedb/docs/runbooks/CNPGClusterOffline.md diff --git a/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md b/charts/paradedb/docs/runbooks/CNPGClusterZoneSpreadWarning.md similarity index 100% rename from charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md rename to charts/paradedb/docs/runbooks/CNPGClusterZoneSpreadWarning.md diff --git a/charts/cluster/examples/basic.yaml b/charts/paradedb/examples/basic.yaml similarity index 100% rename from charts/cluster/examples/basic.yaml rename to charts/paradedb/examples/basic.yaml diff --git a/charts/cluster/examples/custom-queries.yaml b/charts/paradedb/examples/custom-queries.yaml similarity index 100% rename from charts/cluster/examples/custom-queries.yaml rename to charts/paradedb/examples/custom-queries.yaml diff --git a/charts/cluster/examples/image-catalog-ref.yaml b/charts/paradedb/examples/image-catalog-ref.yaml similarity index 88% rename from charts/cluster/examples/image-catalog-ref.yaml rename to charts/paradedb/examples/image-catalog-ref.yaml index e4833a3b6..a512ddd45 100644 --- a/charts/cluster/examples/image-catalog-ref.yaml +++ b/charts/paradedb/examples/image-catalog-ref.yaml @@ -2,7 +2,7 @@ type: postgresql mode: standalone version: major: "16" - timescaledb: "2.15" + paradedb: "0.10.3" cluster: instances: 1 imageCatalogRef: diff --git a/charts/cluster/examples/image-catalog.yaml b/charts/paradedb/examples/image-catalog.yaml similarity index 90% rename from charts/cluster/examples/image-catalog.yaml rename to charts/paradedb/examples/image-catalog.yaml index c610229b0..e82f99546 100644 --- a/charts/cluster/examples/image-catalog.yaml +++ b/charts/paradedb/examples/image-catalog.yaml @@ -2,7 +2,7 @@ type: postgresql mode: standalone version: major: "16" - timescaledb: "2.15" + paradedb: "0.10.3" cluster: instances: 1 backups: diff --git a/charts/cluster/examples/postgis.yaml b/charts/paradedb/examples/paradedb.yaml similarity index 57% rename from charts/cluster/examples/postgis.yaml rename to charts/paradedb/examples/paradedb.yaml index 168ac9fbf..1c2c4f75e 100644 --- a/charts/cluster/examples/postgis.yaml +++ b/charts/paradedb/examples/paradedb.yaml @@ -1,8 +1,8 @@ -type: postgis +type: paradedb mode: standalone version: - postgresql: "16" - postgis: "3.4" + postgresql: "16.3" + paradedb: "0.10.3" cluster: instances: 1 backups: diff --git a/charts/cluster/examples/pgbouncer.yaml b/charts/paradedb/examples/pgbouncer.yaml similarity index 100% rename from charts/cluster/examples/pgbouncer.yaml rename to charts/paradedb/examples/pgbouncer.yaml diff --git a/charts/cluster/examples/recovery-backup.yaml b/charts/paradedb/examples/recovery-backup.yaml similarity index 100% rename from charts/cluster/examples/recovery-backup.yaml rename to charts/paradedb/examples/recovery-backup.yaml diff --git a/charts/cluster/examples/recovery-object_store.yaml b/charts/paradedb/examples/recovery-object_store.yaml similarity index 100% rename from charts/cluster/examples/recovery-object_store.yaml rename to charts/paradedb/examples/recovery-object_store.yaml diff --git a/charts/cluster/examples/recovery-pg_basebackup.yaml b/charts/paradedb/examples/recovery-pg_basebackup.yaml similarity index 100% rename from charts/cluster/examples/recovery-pg_basebackup.yaml rename to charts/paradedb/examples/recovery-pg_basebackup.yaml diff --git a/charts/cluster/examples/standalone-s3.yaml b/charts/paradedb/examples/standalone-s3.yaml similarity index 100% rename from charts/cluster/examples/standalone-s3.yaml rename to charts/paradedb/examples/standalone-s3.yaml diff --git a/charts/cluster/prometheus_rules/cluster-ha-critical.yaml b/charts/paradedb/prometheus_rules/cluster-ha-critical.yaml similarity index 82% rename from charts/cluster/prometheus_rules/cluster-ha-critical.yaml rename to charts/paradedb/prometheus_rules/cluster-ha-critical.yaml index 246a5af6b..d24e06be2 100644 --- a/charts/cluster/prometheus_rules/cluster-ha-critical.yaml +++ b/charts/paradedb/prometheus_rules/cluster-ha-critical.yaml @@ -2,9 +2,9 @@ {{- if not (has $alert .excludeRules) -}} alert: {{ $alert }} annotations: - summary: CNPG Cluster has no standby replicas! + summary: ParadeDB CNPG Cluster has no standby replicas! description: |- - CloudNativePG Cluster "{{ .labels.job }}" has no ready standby replicas. Your cluster at a severe + ParadeDB CNPG Cluster "{{ .labels.job }}" has no ready standby replicas. Your cluster at a severe risk of data loss and downtime if the primary instance fails. The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint @@ -15,7 +15,7 @@ annotations: This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this case you may want to silence it. - runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md + runbook_url: https://github.com/paradedb/charts/blob/main/charts/paradedb/docs/runbooks/CNPGClusterHACritical.md expr: | max by (job) (cnpg_pg_replication_streaming_replicas{namespace="{{ .namespace }}"} - cnpg_pg_replication_is_wal_receiver_up{namespace="{{ .namespace }}"}) < 1 for: 5m diff --git a/charts/cluster/prometheus_rules/cluster-ha-warning.yaml b/charts/paradedb/prometheus_rules/cluster-ha-warning.yaml similarity index 79% rename from charts/cluster/prometheus_rules/cluster-ha-warning.yaml rename to charts/paradedb/prometheus_rules/cluster-ha-warning.yaml index 736ddf393..950642d31 100644 --- a/charts/cluster/prometheus_rules/cluster-ha-warning.yaml +++ b/charts/paradedb/prometheus_rules/cluster-ha-warning.yaml @@ -2,9 +2,9 @@ {{- if not (has $alert .excludeRules) -}} alert: {{ $alert }} annotations: - summary: CNPG Cluster less than 2 standby replicas. + summary: ParadeDB CNPG Cluster less than 2 standby replicas. description: |- - CloudNativePG Cluster "{{ .labels.job }}" has only {{ .value }} standby replicas, putting + ParadeDB CNPG Cluster "{{ .labels.job }}" has only {{ .value }} standby replicas, putting your cluster at risk if another instance fails. The cluster is still able to operate normally, although the `-ro` and `-r` endpoints operate at reduced capacity. @@ -13,7 +13,7 @@ annotations: This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances. In this case you may want to silence it. - runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md + runbook_url: https://github.com/paradedb/charts/blob/main/charts/paradedb/docs/runbooks/CNPGClusterHAWarning.md expr: | max by (job) (cnpg_pg_replication_streaming_replicas{namespace="{{ .namespace }}"} - cnpg_pg_replication_is_wal_receiver_up{namespace="{{ .namespace }}"}) < 2 for: 5m diff --git a/charts/cluster/prometheus_rules/cluster-high_connection-critical.yaml b/charts/paradedb/prometheus_rules/cluster-high_connection-critical.yaml similarity index 68% rename from charts/cluster/prometheus_rules/cluster-high_connection-critical.yaml rename to charts/paradedb/prometheus_rules/cluster-high_connection-critical.yaml index df13ce3b3..089b4abbb 100644 --- a/charts/cluster/prometheus_rules/cluster-high_connection-critical.yaml +++ b/charts/paradedb/prometheus_rules/cluster-high_connection-critical.yaml @@ -2,11 +2,11 @@ {{- if not (has $alert .excludeRules) -}} alert: {{ $alert }} annotations: - summary: CNPG Instance maximum number of connections critical! + summary: ParadeDB Instance maximum number of connections critical! description: |- - CloudNativePG Cluster "{{ .namespace }}/{{ .cluster }}" instance {{ .labels.pod }} is using {{ .value }}% of + ParadeDB CNPG Cluster "{{ .namespace }}/{{ .cluster }}" instance {{ .labels.pod }} is using {{ .value }}% of the maximum number of connections. - runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md + runbook_url: https://github.com/paradedb/charts/blob/main/charts/paradedb/docs/runbooks/CNPGClusterHighConnectionsCritical.md expr: | sum by (pod) (cnpg_backends_total{namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"}) * 100 > 95 for: 5m diff --git a/charts/cluster/prometheus_rules/cluster-high_connection-warning.yaml b/charts/paradedb/prometheus_rules/cluster-high_connection-warning.yaml similarity index 68% rename from charts/cluster/prometheus_rules/cluster-high_connection-warning.yaml rename to charts/paradedb/prometheus_rules/cluster-high_connection-warning.yaml index 73cc78392..d8189f726 100644 --- a/charts/cluster/prometheus_rules/cluster-high_connection-warning.yaml +++ b/charts/paradedb/prometheus_rules/cluster-high_connection-warning.yaml @@ -2,11 +2,11 @@ {{- if not (has $alert .excludeRules) -}} alert: {{ $alert }} annotations: - summary: CNPG Instance is approaching the maximum number of connections. + summary: ParadeDB Instance is approaching the maximum number of connections. description: |- - CloudNativePG Cluster "{{ .namespace }}/{{ .cluster }}" instance {{ .labels.pod }} is using {{ .value }}% of + ParadeDB CNPG Cluster "{{ .namespace }}/{{ .cluster }}" instance {{ .labels.pod }} is using {{ .value }}% of the maximum number of connections. - runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md + runbook_url: https://github.com/paradedb/charts/blob/main/charts/paradedb/docs/runbooks/CNPGClusterHighConnectionsWarning.md expr: | sum by (pod) (cnpg_backends_total{namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"}) * 100 > 80 for: 5m diff --git a/charts/cluster/prometheus_rules/cluster-high_replication_lag.yaml b/charts/paradedb/prometheus_rules/cluster-high_replication_lag.yaml similarity index 68% rename from charts/cluster/prometheus_rules/cluster-high_replication_lag.yaml rename to charts/paradedb/prometheus_rules/cluster-high_replication_lag.yaml index 660db254f..7e0304aaf 100644 --- a/charts/cluster/prometheus_rules/cluster-high_replication_lag.yaml +++ b/charts/paradedb/prometheus_rules/cluster-high_replication_lag.yaml @@ -2,13 +2,13 @@ {{- if not (has $alert .excludeRules) -}} alert: {{ $alert }} annotations: - summary: CNPG Cluster high replication lag + summary: ParadeDB CNPG Cluster high replication lag description: |- - CloudNativePG Cluster "{{ .namespace }}/{{ .cluster }}" is experiencing a high replication lag of + ParadeDB CNPG Cluster "{{ .namespace }}/{{ .cluster }}" is experiencing a high replication lag of {{ .value }}ms. High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration. - runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md + runbook_url: https://github.com/paradedb/charts/blob/main/charts/paradedb/docs/runbooks/CNPGClusterHighReplicationLag.md expr: | max(cnpg_pg_replication_lag{namespace="{{ .namespace }}",pod=~"{{ .podSelector }}"}) * 1000 > 1000 for: 5m diff --git a/charts/cluster/prometheus_rules/cluster-instances_on_same_node.yaml b/charts/paradedb/prometheus_rules/cluster-instances_on_same_node.yaml similarity index 67% rename from charts/cluster/prometheus_rules/cluster-instances_on_same_node.yaml rename to charts/paradedb/prometheus_rules/cluster-instances_on_same_node.yaml index aafcfab1e..43b1f5215 100644 --- a/charts/cluster/prometheus_rules/cluster-instances_on_same_node.yaml +++ b/charts/paradedb/prometheus_rules/cluster-instances_on_same_node.yaml @@ -2,13 +2,13 @@ {{- if not (has $alert .excludeRules) -}} alert: {{ $alert }} annotations: - summary: CNPG Cluster instances are located on the same node. + summary: ParadeDB CNPG Cluster instances are located on the same node. description: |- - CloudNativePG Cluster "{{ .namespace }}/{{ .cluster }}" has {{ .value }} + ParadeDB CNPG Cluster "{{ .namespace }}/{{ .cluster }}" has {{ .value }} instances on the same node {{ .labels.node }}. A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss. - runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md + runbook_url: https://github.com/paradedb/charts/blob/main/charts/paradedb/docs/runbooks/CNPGClusterInstancesOnSameNode.md expr: | count by (node) (kube_pod_info{namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"}) > 1 for: 5m diff --git a/charts/cluster/prometheus_rules/cluster-low_disk_space-critical.yaml b/charts/paradedb/prometheus_rules/cluster-low_disk_space-critical.yaml similarity index 84% rename from charts/cluster/prometheus_rules/cluster-low_disk_space-critical.yaml rename to charts/paradedb/prometheus_rules/cluster-low_disk_space-critical.yaml index c211bc61a..62fabbd1b 100644 --- a/charts/cluster/prometheus_rules/cluster-low_disk_space-critical.yaml +++ b/charts/paradedb/prometheus_rules/cluster-low_disk_space-critical.yaml @@ -2,10 +2,10 @@ {{- if not (has $alert .excludeRules) -}} alert: {{ $alert }} annotations: - summary: CNPG Instance is running out of disk space! + summary: ParadeDB Instance is running out of disk space! description: |- - CloudNativePG Cluster "{{ .namespace }}/{{ .cluster }}" is running extremely low on disk space. Check attached PVCs! - runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md + ParadeDB CNPG Cluster "{{ .namespace }}/{{ .cluster }}" is running extremely low on disk space. Check attached PVCs! + runbook_url: https://github.com/paradedb/charts/blob/main/charts/paradedb/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md expr: | max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="{{ .namespace }}", persistentvolumeclaim=~"{{ .podSelector }}"} / kubelet_volume_stats_capacity_bytes{namespace="{{ .namespace }}", persistentvolumeclaim=~"{{ .podSelector }}"})) > 0.9 OR max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="{{ .namespace }}", persistentvolumeclaim=~"{{ .podSelector }}-wal"} / kubelet_volume_stats_capacity_bytes{namespace="{{ .namespace }}", persistentvolumeclaim=~"{{ .podSelector }}-wal"})) > 0.9 OR diff --git a/charts/cluster/prometheus_rules/cluster-low_disk_space-warning.yaml b/charts/paradedb/prometheus_rules/cluster-low_disk_space-warning.yaml similarity index 84% rename from charts/cluster/prometheus_rules/cluster-low_disk_space-warning.yaml rename to charts/paradedb/prometheus_rules/cluster-low_disk_space-warning.yaml index 5ed3653aa..093b93b87 100644 --- a/charts/cluster/prometheus_rules/cluster-low_disk_space-warning.yaml +++ b/charts/paradedb/prometheus_rules/cluster-low_disk_space-warning.yaml @@ -2,10 +2,10 @@ {{- if not (has $alert .excludeRules) -}} alert: {{ $alert }} annotations: - summary: CNPG Instance is running out of disk space. + summary: ParadeDB Instance is running out of disk space. description: |- - CloudNativePG Cluster "{{ .namespace }}/{{ .cluster }}" is running low on disk space. Check attached PVCs. - runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md + ParadeDB CNPG Cluster "{{ .namespace }}/{{ .cluster }}" is running low on disk space. Check attached PVCs. + runbook_url: https://github.com/paradedb/charts/blob/main/charts/paradedb/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md expr: | max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="{{ .namespace }}", persistentvolumeclaim=~"{{ .podSelector }}"} / kubelet_volume_stats_capacity_bytes{namespace="{{ .namespace }}", persistentvolumeclaim=~"{{ .podSelector }}"})) > 0.7 OR max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="{{ .namespace }}", persistentvolumeclaim=~"{{ .podSelector }}-wal"} / kubelet_volume_stats_capacity_bytes{namespace="{{ .namespace }}", persistentvolumeclaim=~"{{ .podSelector }}-wal"})) > 0.7 OR diff --git a/charts/cluster/prometheus_rules/cluster-offline.yaml b/charts/paradedb/prometheus_rules/cluster-offline.yaml similarity index 66% rename from charts/cluster/prometheus_rules/cluster-offline.yaml rename to charts/paradedb/prometheus_rules/cluster-offline.yaml index 4206c02f3..cba2833b5 100644 --- a/charts/cluster/prometheus_rules/cluster-offline.yaml +++ b/charts/paradedb/prometheus_rules/cluster-offline.yaml @@ -2,13 +2,13 @@ {{- if not (has $alert .excludeRules) -}} alert: {{ $alert }} annotations: - summary: CNPG Cluster has no running instances! + summary: ParadeDB CNPG Cluster has no running instances! description: |- - CloudNativePG Cluster "{{ .namespace }}/{{ .cluster }}" has no ready instances. + ParadeDB CloudNativePG Cluster "{{ .namespace }}/{{ .cluster }}" has no ready instances. Having an offline cluster means your applications will not be able to access the database, leading to potential service disruption and/or data loss. - runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md + runbook_url: https://github.com/paradedb/charts/blob/main/charts/paradedb/docs/runbooks/CNPGClusterOffline.md expr: | (count(cnpg_collector_up{namespace="{{ .namespace }}",pod=~"{{ .podSelector }}"}) OR on() vector(0)) == 0 for: 5m diff --git a/charts/cluster/prometheus_rules/cluster-zone_spread-warning.yaml b/charts/paradedb/prometheus_rules/cluster-zone_spread-warning.yaml similarity index 72% rename from charts/cluster/prometheus_rules/cluster-zone_spread-warning.yaml rename to charts/paradedb/prometheus_rules/cluster-zone_spread-warning.yaml index 41fa4002a..ba41c55da 100644 --- a/charts/cluster/prometheus_rules/cluster-zone_spread-warning.yaml +++ b/charts/paradedb/prometheus_rules/cluster-zone_spread-warning.yaml @@ -2,12 +2,12 @@ {{- if not (has $alert .excludeRules) -}} alert: {{ $alert }} annotations: - summary: CNPG Cluster instances in the same zone. + summary: ParadeDB CNPG Cluster instances in the same zone. description: |- - CloudNativePG Cluster "{{ .namespace }}/{{ .cluster }}" has instances in the same availability zone. + ParadeDB CNPG Cluster "{{ .namespace }}/{{ .cluster }}" has instances in the same availability zone. A disaster in one availability zone will lead to a potential service disruption and/or data loss. - runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md + runbook_url: https://github.com/paradedb/charts/blob/main/charts/paradedb/docs/runbooks/CNPGClusterZoneSpreadWarning.md expr: | {{ .Values.cluster.instances }} > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="{{ .namespace }}", pod=~"{{ .podSelector }}"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3 for: 5m diff --git a/charts/cluster/templates/NOTES.txt b/charts/paradedb/templates/NOTES.txt similarity index 98% rename from charts/cluster/templates/NOTES.txt rename to charts/paradedb/templates/NOTES.txt index 5e96a74ea..5c27b2e75 100644 --- a/charts/cluster/templates/NOTES.txt +++ b/charts/paradedb/templates/NOTES.txt @@ -8,9 +8,9 @@ {{ if .Release.IsInstall }} -The {{ include "cluster.color-info" (include "cluster.fullname" .) }} has been installed successfully. +The {{ include "cluster.color-info" (include "cluster.fullname" .) }} cluster has been installed successfully. {{ else if .Release.IsUpgrade }} -The {{ include "cluster.color-info" (include "cluster.fullname" .) }} has been upgraded successfully. +The {{ include "cluster.color-info" (include "cluster.fullname" .) }} cluster has been upgraded successfully. {{ end }} ██████ ██ ██ ████ ██ ██ ██ ███████ ████████ diff --git a/charts/cluster/templates/_backup.tpl b/charts/paradedb/templates/_backup.tpl similarity index 100% rename from charts/cluster/templates/_backup.tpl rename to charts/paradedb/templates/_backup.tpl diff --git a/charts/cluster/templates/_barman_object_store.tpl b/charts/paradedb/templates/_barman_object_store.tpl similarity index 100% rename from charts/cluster/templates/_barman_object_store.tpl rename to charts/paradedb/templates/_barman_object_store.tpl diff --git a/charts/cluster/templates/_bootstrap.tpl b/charts/paradedb/templates/_bootstrap.tpl similarity index 67% rename from charts/cluster/templates/_bootstrap.tpl rename to charts/paradedb/templates/_bootstrap.tpl index aea7d9429..8aa1fa290 100644 --- a/charts/cluster/templates/_bootstrap.tpl +++ b/charts/paradedb/templates/_bootstrap.tpl @@ -3,26 +3,55 @@ bootstrap: initdb: {{- with .Values.cluster.initdb }} - {{- with (omit . "postInitApplicationSQL" "owner") }} + {{- with (omit . "postInitSQL" "postInitApplicationSQL" "postInitTemplateSQL" "owner") }} {{- . | toYaml | nindent 4 }} {{- end }} {{- end }} {{- if .Values.cluster.initdb.owner }} owner: {{ tpl .Values.cluster.initdb.owner . }} {{- end }} + postInitSQL: + {{- if eq .Values.type "paradedb" }} + - CREATE EXTENSION IF NOT EXISTS pg_cron; + {{- end }} + {{- with .Values.cluster.initdb }} + {{- range .postInitSQL }} + {{- printf "- %s" . | nindent 6 }} + {{- end -}} + {{- end }} postInitApplicationSQL: - {{- if eq .Values.type "postgis" }} + {{- if eq .Values.type "paradedb" }} + - CREATE EXTENSION IF NOT EXISTS pg_search; + - CREATE EXTENSION IF NOT EXISTS pg_analytics; + - CREATE EXTENSION IF NOT EXISTS pg_ivm; + - CREATE EXTENSION IF NOT EXISTS vector; + - CREATE EXTENSION IF NOT EXISTS postgis; + - CREATE EXTENSION IF NOT EXISTS postgis_topology; + - CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; + - CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder; + - ALTER DATABASE "{{ default "paradedb" .Values.cluster.initdb.database }}" SET search_path TO public,paradedb; + {{- end }} + {{- with .Values.cluster.initdb }} + {{- range .postInitApplicationSQL }} + {{- printf "- %s" . | nindent 6 }} + {{- end -}} + {{- end }} + postInitTemplateSQL: + {{- if eq .Values.type "paradedb" }} + - CREATE EXTENSION IF NOT EXISTS pg_search; + - CREATE EXTENSION IF NOT EXISTS pg_analytics; + - CREATE EXTENSION IF NOT EXISTS pg_ivm; + - CREATE EXTENSION IF NOT EXISTS vector; - CREATE EXTENSION IF NOT EXISTS postgis; - CREATE EXTENSION IF NOT EXISTS postgis_topology; - CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; - CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder; - {{- else if eq .Values.type "timescaledb" }} - - CREATE EXTENSION IF NOT EXISTS timescaledb; + - ALTER DATABASE template1 SET search_path TO public,paradedb; {{- end }} {{- with .Values.cluster.initdb }} - {{- range .postInitApplicationSQL }} - {{- printf "- %s" . | nindent 6 }} - {{- end -}} + {{- range .postInitTemplateSQL }} + {{- printf "- %s" . | nindent 6 }} + {{- end -}} {{- end -}} {{- else if eq .Values.mode "recovery" -}} bootstrap: diff --git a/charts/cluster/templates/_colorize.tpl b/charts/paradedb/templates/_colorize.tpl similarity index 100% rename from charts/cluster/templates/_colorize.tpl rename to charts/paradedb/templates/_colorize.tpl diff --git a/charts/cluster/templates/_helpers.tpl b/charts/paradedb/templates/_helpers.tpl similarity index 75% rename from charts/cluster/templates/_helpers.tpl rename to charts/paradedb/templates/_helpers.tpl index 96726fdfe..041e90353 100644 --- a/charts/cluster/templates/_helpers.tpl +++ b/charts/paradedb/templates/_helpers.tpl @@ -51,13 +51,6 @@ app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/part-of: cloudnative-pg {{- end }} -{{/* -Whether we need to use TimescaleDB defaults -*/}} -{{- define "cluster.useTimescaleDBDefaults" -}} -{{ and (eq .Values.type "timescaledb") .Values.imageCatalog.create (empty .Values.cluster.imageCatalogRef.name) (empty .Values.imageCatalog.images) (empty .Values.cluster.imageName) }} -{{- end -}} - {{/* Get the PostgreSQL major version from .Values.version.postgresql */}} @@ -74,8 +67,8 @@ If a custom imageName is available, use it, otherwise use the defaults based on {{- .Values.cluster.imageName -}} {{- else if eq .Values.type "postgresql" -}} {{- printf "ghcr.io/cloudnative-pg/postgresql:%s" .Values.version.postgresql -}} - {{- else if eq .Values.type "postgis" -}} - {{- printf "ghcr.io/cloudnative-pg/postgis:%s-%s" .Values.version.postgresql .Values.version.postgis -}} + {{- else if eq .Values.type "paradedb" -}} + {{- printf "paradedb/paradedb:%s-v%s" .Values.version.postgresql .Values.version.paradedb -}} {{- else -}} {{ fail "Invalid cluster type!" }} {{- end }} @@ -83,7 +76,7 @@ If a custom imageName is available, use it, otherwise use the defaults based on {{/* Cluster Image -If imageCatalogRef defined, use it, otherwice calculate ordinary imageName. +If imageCatalogRef defined, use it, otherwise calculate ordinary imageName. */}} {{- define "cluster.image" }} {{- if .Values.cluster.imageCatalogRef.name }} @@ -97,12 +90,6 @@ imageCatalogRef: kind: ImageCatalog name: {{ include "cluster.fullname" . }} major: {{ include "cluster.postgresqlMajor" . }} -{{- else if eq (include "cluster.useTimescaleDBDefaults" .) "true" -}} -imageCatalogRef: - apiGroup: postgresql.cnpg.io - kind: ImageCatalog - name: {{ include "cluster.fullname" . }}-timescaledb-ha - major: {{ include "cluster.postgresqlMajor" . }} {{- else }} imageName: {{ include "cluster.imageName" . }} {{- end }} @@ -114,8 +101,8 @@ Postgres UID {{- define "cluster.postgresUID" -}} {{- if ge (int .Values.cluster.postgresUID) 0 -}} {{- .Values.cluster.postgresUID }} - {{- else if and (eq (include "cluster.useTimescaleDBDefaults" .) "true") (eq .Values.type "timescaledb") -}} - {{- 1000 -}} + {{- else if eq .Values.type "paradedb" -}} + {{- 999 -}} {{- else -}} {{- 26 -}} {{- end -}} @@ -127,8 +114,8 @@ Postgres GID {{- define "cluster.postgresGID" -}} {{- if ge (int .Values.cluster.postgresGID) 0 -}} {{- .Values.cluster.postgresGID }} - {{- else if and (eq (include "cluster.useTimescaleDBDefaults" .) "true") (eq .Values.type "timescaledb") -}} - {{- 1000 -}} + {{- else if eq .Values.type "paradedb" -}} + {{- 999 -}} {{- else -}} {{- 26 -}} {{- end -}} diff --git a/charts/cluster/templates/backup-azure-creds.yaml b/charts/paradedb/templates/backup-azure-creds.yaml similarity index 100% rename from charts/cluster/templates/backup-azure-creds.yaml rename to charts/paradedb/templates/backup-azure-creds.yaml diff --git a/charts/cluster/templates/backup-google-creds.yaml b/charts/paradedb/templates/backup-google-creds.yaml similarity index 100% rename from charts/cluster/templates/backup-google-creds.yaml rename to charts/paradedb/templates/backup-google-creds.yaml diff --git a/charts/cluster/templates/backup-s3-creds.yaml b/charts/paradedb/templates/backup-s3-creds.yaml similarity index 100% rename from charts/cluster/templates/backup-s3-creds.yaml rename to charts/paradedb/templates/backup-s3-creds.yaml diff --git a/charts/cluster/templates/ca-bundle.yaml b/charts/paradedb/templates/ca-bundle.yaml similarity index 100% rename from charts/cluster/templates/ca-bundle.yaml rename to charts/paradedb/templates/ca-bundle.yaml diff --git a/charts/cluster/templates/cluster.yaml b/charts/paradedb/templates/cluster.yaml similarity index 97% rename from charts/cluster/templates/cluster.yaml rename to charts/paradedb/templates/cluster.yaml index 169683fef..c682546c5 100644 --- a/charts/cluster/templates/cluster.yaml +++ b/charts/paradedb/templates/cluster.yaml @@ -53,8 +53,10 @@ spec: {{ end }} postgresql: shared_preload_libraries: - {{- if eq .Values.type "timescaledb" }} - - timescaledb + {{- if eq .Values.type "paradedb" }} + - pg_search + - pg_analytics + - pg_cron {{- end }} {{- with .Values.cluster.postgresql.shared_preload_libraries }} {{- toYaml . | nindent 6 }} diff --git a/charts/cluster/templates/image-catalog.yaml b/charts/paradedb/templates/image-catalog.yaml similarity index 100% rename from charts/cluster/templates/image-catalog.yaml rename to charts/paradedb/templates/image-catalog.yaml diff --git a/charts/cluster/templates/pooler.yaml b/charts/paradedb/templates/pooler.yaml similarity index 100% rename from charts/cluster/templates/pooler.yaml rename to charts/paradedb/templates/pooler.yaml diff --git a/charts/cluster/templates/prometheus-rule.yaml b/charts/paradedb/templates/prometheus-rule.yaml similarity index 100% rename from charts/cluster/templates/prometheus-rule.yaml rename to charts/paradedb/templates/prometheus-rule.yaml diff --git a/charts/cluster/templates/recovery-azure-creds.yaml b/charts/paradedb/templates/recovery-azure-creds.yaml similarity index 100% rename from charts/cluster/templates/recovery-azure-creds.yaml rename to charts/paradedb/templates/recovery-azure-creds.yaml diff --git a/charts/cluster/templates/recovery-google-creds.yaml b/charts/paradedb/templates/recovery-google-creds.yaml similarity index 100% rename from charts/cluster/templates/recovery-google-creds.yaml rename to charts/paradedb/templates/recovery-google-creds.yaml diff --git a/charts/cluster/templates/recovery-pg_basebackup-password.yaml b/charts/paradedb/templates/recovery-pg_basebackup-password.yaml similarity index 100% rename from charts/cluster/templates/recovery-pg_basebackup-password.yaml rename to charts/paradedb/templates/recovery-pg_basebackup-password.yaml diff --git a/charts/cluster/templates/recovery-s3-creds.yaml b/charts/paradedb/templates/recovery-s3-creds.yaml similarity index 100% rename from charts/cluster/templates/recovery-s3-creds.yaml rename to charts/paradedb/templates/recovery-s3-creds.yaml diff --git a/charts/cluster/templates/scheduled-backups.yaml b/charts/paradedb/templates/scheduled-backups.yaml similarity index 100% rename from charts/cluster/templates/scheduled-backups.yaml rename to charts/paradedb/templates/scheduled-backups.yaml diff --git a/charts/cluster/templates/tests/ping.yaml b/charts/paradedb/templates/tests/ping.yaml similarity index 100% rename from charts/cluster/templates/tests/ping.yaml rename to charts/paradedb/templates/tests/ping.yaml diff --git a/charts/cluster/templates/user-metrics.yaml b/charts/paradedb/templates/user-metrics.yaml similarity index 100% rename from charts/cluster/templates/user-metrics.yaml rename to charts/paradedb/templates/user-metrics.yaml diff --git a/charts/cluster/test/monitoring/01-monitoring_cluster-assert.yaml b/charts/paradedb/test/monitoring/01-monitoring_cluster-assert.yaml similarity index 89% rename from charts/cluster/test/monitoring/01-monitoring_cluster-assert.yaml rename to charts/paradedb/test/monitoring/01-monitoring_cluster-assert.yaml index ce6544e2e..575e02082 100644 --- a/charts/cluster/test/monitoring/01-monitoring_cluster-assert.yaml +++ b/charts/paradedb/test/monitoring/01-monitoring_cluster-assert.yaml @@ -35,11 +35,11 @@ spec: apiVersion: monitoring.coreos.com/v1 kind: PodMonitor metadata: - name: monitoring-cluster + name: monitoring-paradedb spec: selector: matchLabels: - cnpg.io/cluster: monitoring-cluster + cnpg.io/cluster: monitoring-paradedb podMetricsEndpoints: - bearerTokenSecret: key: '' @@ -60,11 +60,11 @@ spec: apiVersion: monitoring.coreos.com/v1 kind: PodMonitor metadata: - name: monitoring-cluster-pooler-rw + name: monitoring-paradedb-pooler-rw spec: selector: matchLabels: - cnpg.io/poolerName: monitoring-cluster-pooler-rw + cnpg.io/poolerName: monitoring-paradedb-pooler-rw podMetricsEndpoints: - bearerTokenSecret: key: '' @@ -87,11 +87,11 @@ spec: apiVersion: monitoring.coreos.com/v1 kind: PodMonitor metadata: - name: monitoring-cluster-pooler-ro + name: monitoring-paradedb-pooler-ro spec: selector: matchLabels: - cnpg.io/poolerName: monitoring-cluster-pooler-ro + cnpg.io/poolerName: monitoring-paradedb-pooler-ro podMetricsEndpoints: - bearerTokenSecret: key: '' @@ -114,12 +114,12 @@ spec: apiVersion: monitoring.coreos.com/v1 kind: PrometheusRule metadata: - name: monitoring-cluster-alert-rules + name: monitoring-paradedb-alert-rules --- apiVersion: v1 kind: ConfigMap metadata: - name: monitoring-cluster-monitoring + name: monitoring-paradedb-monitoring data: custom-queries: | pg_cache_hit_ratio: diff --git a/charts/cluster/test/monitoring/01-monitoring_cluster.yaml b/charts/paradedb/test/monitoring/01-monitoring_cluster.yaml similarity index 100% rename from charts/cluster/test/monitoring/01-monitoring_cluster.yaml rename to charts/paradedb/test/monitoring/01-monitoring_cluster.yaml diff --git a/charts/cluster/test/monitoring/chainsaw-test.yaml b/charts/paradedb/test/monitoring/chainsaw-test.yaml similarity index 100% rename from charts/cluster/test/monitoring/chainsaw-test.yaml rename to charts/paradedb/test/monitoring/chainsaw-test.yaml diff --git a/charts/cluster/test/postgresql-minio-backup-restore/00-minio_cleanup-assert.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/00-minio_cleanup-assert.yaml similarity index 100% rename from charts/cluster/test/postgresql-minio-backup-restore/00-minio_cleanup-assert.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/00-minio_cleanup-assert.yaml diff --git a/charts/cluster/test/timescale-minio-backup-restore/00-minio_cleanup.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/00-minio_cleanup.yaml similarity index 83% rename from charts/cluster/test/timescale-minio-backup-restore/00-minio_cleanup.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/00-minio_cleanup.yaml index ce71b1ef7..19d550162 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/00-minio_cleanup.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/00-minio_cleanup.yaml @@ -13,4 +13,4 @@ spec: args: - | mc alias set myminio https://minio.minio.svc.cluster.local minio minio123 - mc rm --recursive --force myminio/mybucket/timescale + mc rm --recursive --force myminio/mybucket/paradedb diff --git a/charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster-assert.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/01-paradedb_cluster-assert.yaml similarity index 76% rename from charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster-assert.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/01-paradedb_cluster-assert.yaml index 3bbd2f8fe..b72bd8548 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster-assert.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/01-paradedb_cluster-assert.yaml @@ -1,6 +1,6 @@ apiVersion: postgresql.cnpg.io/v1 kind: Cluster metadata: - name: timescale-cluster + name: paradedb status: readyInstances: 2 diff --git a/charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/01-paradedb_cluster.yaml similarity index 90% rename from charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/01-paradedb_cluster.yaml index f84117fe0..3bae23341 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/01-timescale_cluster.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/01-paradedb_cluster.yaml @@ -1,4 +1,4 @@ -type: timescaledb +type: paradedb mode: standalone cluster: @@ -8,7 +8,6 @@ cluster: backups: enabled: true - provider: s3 endpointURL: "https://minio.minio.svc.cluster.local" endpointCA: @@ -20,7 +19,7 @@ backups: encryption: "" s3: bucket: "mybucket" - path: "/timescale/v1" + path: "/paradedb/v1" accessKey: "minio" secretKey: "minio123" region: "local" diff --git a/charts/cluster/test/timescale-minio-backup-restore/03-timescale_test-assert.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/02-paradedb_write-assert.yaml similarity index 73% rename from charts/cluster/test/timescale-minio-backup-restore/03-timescale_test-assert.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/02-paradedb_write-assert.yaml index aa63a21c9..3fac848be 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/03-timescale_test-assert.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/02-paradedb_write-assert.yaml @@ -1,6 +1,6 @@ apiVersion: batch/v1 kind: Job metadata: - name: timescale-test + name: paradedb-write status: succeeded: 1 diff --git a/charts/paradedb/test/paradedb-minio-backup-restore/02-paradedb_write.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/02-paradedb_write.yaml new file mode 100644 index 000000000..56d96ba64 --- /dev/null +++ b/charts/paradedb/test/paradedb-minio-backup-restore/02-paradedb_write.yaml @@ -0,0 +1,33 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: paradedb-write +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-write + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: paradedb-app + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + psql "$DB_URI" <<-EOSQL + CALL paradedb.create_bm25_test_table( schema_name => 'public', table_name => 'mock_items' ); + CALL paradedb.create_bm25( + index_name => 'search_idx', + schema_name => 'public', + table_name => 'mock_items', + key_field => 'id', + text_fields => paradedb.field('description', tokenizer => paradedb.tokenizer('en_stem')) || + paradedb.field('category'), + numeric_fields => paradedb.field('rating') + ); + EOSQL diff --git a/charts/paradedb/test/paradedb-minio-backup-restore/03-paradedb_test-assert.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/03-paradedb_test-assert.yaml new file mode 100644 index 000000000..678c11c9b --- /dev/null +++ b/charts/paradedb/test/paradedb-minio-backup-restore/03-paradedb_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: paradedb-test +status: + succeeded: 1 diff --git a/charts/cluster/test/timescale-minio-backup-restore/03-timescale_test.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/03-paradedb_test.yaml similarity index 55% rename from charts/cluster/test/timescale-minio-backup-restore/03-timescale_test.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/03-paradedb_test.yaml index 9b7581f96..3741db798 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/03-timescale_test.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/03-paradedb_test.yaml @@ -1,7 +1,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: timescale-test + name: paradedb-test spec: template: spec: @@ -12,11 +12,16 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: timescale-cluster-app + name: paradedb-app key: uri image: alpine:3.19 command: ['sh', '-c'] args: - | apk --no-cache add postgresql-client - test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM pg_extension WHERE extname = '\''timescaledb'\'')' --csv -q 2>/dev/null)" = "t" \ No newline at end of file + RESULT=$(psql "$DB_URI" -t) <<-EOSQL + SELECT description + FROM search_idx.search('description:"bluetooth speaker"~1'); + EOSQL + echo -$RESULT- + test "$RESULT" = " Bluetooth-enabled speaker" diff --git a/charts/cluster/test/timescale-minio-backup-restore/04-data_write-assert.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/04-data_write-assert.yaml similarity index 100% rename from charts/cluster/test/timescale-minio-backup-restore/04-data_write-assert.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/04-data_write-assert.yaml diff --git a/charts/cluster/test/timescale-minio-backup-restore/04-data_write.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/04-data_write.yaml similarity index 96% rename from charts/cluster/test/timescale-minio-backup-restore/04-data_write.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/04-data_write.yaml index b827de143..c215d365f 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/04-data_write.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/04-data_write.yaml @@ -39,7 +39,7 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: timescale-cluster-superuser + name: paradedb-superuser key: uri image: alpine:3.19 command: ['sh', '-c'] diff --git a/charts/cluster/test/timescale-minio-backup-restore/05-backup.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/05-backup.yaml similarity index 81% rename from charts/cluster/test/timescale-minio-backup-restore/05-backup.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/05-backup.yaml index be5e4b181..56933359e 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/05-backup.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/05-backup.yaml @@ -5,4 +5,4 @@ metadata: spec: method: barmanObjectStore cluster: - name: timescale-cluster + name: paradedb diff --git a/charts/cluster/test/timescale-minio-backup-restore/05-backup_completed-assert.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/05-backup_completed-assert.yaml similarity index 84% rename from charts/cluster/test/timescale-minio-backup-restore/05-backup_completed-assert.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/05-backup_completed-assert.yaml index 040b1a49e..cc179ed65 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/05-backup_completed-assert.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/05-backup_completed-assert.yaml @@ -4,7 +4,7 @@ metadata: name: post-init-backup spec: cluster: - name: timescale-cluster + name: paradedb method: barmanObjectStore status: phase: completed diff --git a/charts/cluster/test/timescale-minio-backup-restore/05-backup_running-assert.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/05-backup_running-assert.yaml similarity index 84% rename from charts/cluster/test/timescale-minio-backup-restore/05-backup_running-assert.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/05-backup_running-assert.yaml index dc35727a0..bc609eca5 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/05-backup_running-assert.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/05-backup_running-assert.yaml @@ -4,7 +4,7 @@ metadata: name: post-init-backup spec: cluster: - name: timescale-cluster + name: paradedb method: barmanObjectStore status: phase: running diff --git a/charts/cluster/test/timescale-minio-backup-restore/05-checkpoint.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/05-checkpoint.yaml similarity index 93% rename from charts/cluster/test/timescale-minio-backup-restore/05-checkpoint.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/05-checkpoint.yaml index 3ba7fc727..e59ff1f70 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/05-checkpoint.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/05-checkpoint.yaml @@ -12,7 +12,7 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: timescale-cluster-superuser + name: paradedb-superuser key: uri image: alpine:3.19 command: ['sh', '-c'] diff --git a/charts/cluster/test/timescale-minio-backup-restore/06-post_backup_data_write-assert.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/06-post_backup_data_write-assert.yaml similarity index 100% rename from charts/cluster/test/timescale-minio-backup-restore/06-post_backup_data_write-assert.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/06-post_backup_data_write-assert.yaml diff --git a/charts/cluster/test/timescale-minio-backup-restore/06-post_backup_data_write.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/06-post_backup_data_write.yaml similarity index 93% rename from charts/cluster/test/timescale-minio-backup-restore/06-post_backup_data_write.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/06-post_backup_data_write.yaml index 8585b247d..582104f7d 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/06-post_backup_data_write.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/06-post_backup_data_write.yaml @@ -12,7 +12,7 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: timescale-cluster-superuser + name: paradedb-superuser key: uri - name: NAMESPACE valueFrom: diff --git a/charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml similarity index 69% rename from charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml index 2b6b9651f..640223c16 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml @@ -1,6 +1,6 @@ apiVersion: postgresql.cnpg.io/v1 kind: Cluster metadata: - name: recovery-backup-pitr-cluster + name: recovery-backup-pitr-paradedb status: readyInstances: 2 diff --git a/charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml similarity index 92% rename from charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml index 7e9c38f55..8031e3c2b 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml @@ -1,4 +1,4 @@ -type: timescaledb +type: paradedb mode: recovery cluster: @@ -20,7 +20,7 @@ recovery: encryption: "" s3: bucket: "mybucket" - path: "/timescale/v1" + path: "/paradedb/v1" accessKey: "minio" secretKey: "minio123" region: "local" @@ -40,7 +40,7 @@ backups: encryption: "" s3: bucket: "mybucket" - path: "/timescale/v2" + path: "/paradedb/v2" accessKey: "minio" secretKey: "minio123" region: "local" diff --git a/charts/cluster/test/timescale-minio-backup-restore/08-data_test-assert.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/08-data_test-assert.yaml similarity index 100% rename from charts/cluster/test/timescale-minio-backup-restore/08-data_test-assert.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/08-data_test-assert.yaml diff --git a/charts/cluster/test/timescale-minio-backup-restore/08-data_test.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/08-data_test.yaml similarity index 93% rename from charts/cluster/test/timescale-minio-backup-restore/08-data_test.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/08-data_test.yaml index 5fb4faf39..412852502 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/08-data_test.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/08-data_test.yaml @@ -12,7 +12,7 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: recovery-backup-pitr-cluster-superuser + name: recovery-backup-pitr-paradedb-superuser key: uri image: alpine:3.19 command: ['sh', '-c'] diff --git a/charts/cluster/test/timescale-minio-backup-restore/chainsaw-test.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/chainsaw-test.yaml similarity index 76% rename from charts/cluster/test/timescale-minio-backup-restore/chainsaw-test.yaml rename to charts/paradedb/test/paradedb-minio-backup-restore/chainsaw-test.yaml index 496153398..313b1285b 100644 --- a/charts/cluster/test/timescale-minio-backup-restore/chainsaw-test.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/chainsaw-test.yaml @@ -1,14 +1,14 @@ ## -# This test sets up a timescale cluster with MinIO backups and ensured that timescale extensions are installed and +# This test sets up a ParadeDB CNPG Cluster with MinIO backups and ensures that ParadeDB extensions are installed and # PITR recovery is enabled and working. apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: - name: timescale + name: paradedb spec: timeouts: apply: 1s - assert: 5m + assert: 7m cleanup: 1m steps: - name: Clear the MinIO bucket @@ -17,7 +17,7 @@ spec: file: ./00-minio_cleanup.yaml - assert: file: ./00-minio_cleanup-assert.yaml - - name: Install a standalone timescale cluster + - name: Install a standalone ParadeDB CNPG Cluster try: - script: content: | @@ -25,26 +25,41 @@ spec: helm upgrade \ --install \ --namespace $NAMESPACE \ - --values ./01-timescale_cluster.yaml \ + --values ./01-paradedb_cluster.yaml \ --wait \ - timescale ../../ + paradedb ../../ - assert: - file: ./01-timescale_cluster-assert.yaml + file: ./01-paradedb_cluster-assert.yaml catch: - describe: apiVersion: postgresql.cnpg.io/v1 kind: Cluster - podLogs: - selector: cnpg.io/cluster=timescale-cluster - - name: Verify timescale extensions are installed + selector: cnpg.io/cluster=paradedb-paradedb + - name: Initialize with ParadeDB sample data + timeouts: + apply: 1s + assert: 10s + try: + - apply: + file: ./02-paradedb_write.yaml + - assert: + file: ./02-paradedb_write-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=data-write + - name: Verify ParadeDB extensions are installed timeouts: apply: 1s assert: 30s try: - apply: - file: 03-timescale_test.yaml + file: 03-paradedb_test.yaml - assert: - file: 03-timescale_test-assert.yaml + file: 03-paradedb_test-assert.yaml catch: - describe: apiVersion: batch/v1 @@ -108,7 +123,7 @@ spec: apiVersion: postgresql.cnpg.io/v1 kind: Cluster - podLogs: - selector: cnpg.io/cluster=recovery-backup-pitr-cluster + selector: cnpg.io/cluster=recovery-backup-pitr-paradedb - name: Verify the pre-backup data on the recovery cluster exists but not the post-backup data try: - apply: @@ -126,4 +141,4 @@ spec: try: - script: content: | - helm uninstall --namespace $NAMESPACE timescale + helm uninstall --namespace $NAMESPACE paradedb diff --git a/charts/cluster/test/pooler/01-pooler_cluster-assert.yaml b/charts/paradedb/test/pooler/01-pooler_cluster-assert.yaml similarity index 84% rename from charts/cluster/test/pooler/01-pooler_cluster-assert.yaml rename to charts/paradedb/test/pooler/01-pooler_cluster-assert.yaml index db23167c7..ee35bd9ba 100644 --- a/charts/cluster/test/pooler/01-pooler_cluster-assert.yaml +++ b/charts/paradedb/test/pooler/01-pooler_cluster-assert.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: pooler-cluster-pooler-rw + name: pooler-paradedb-pooler-rw status: readyReplicas: 2 --- @@ -15,10 +15,10 @@ status: apiVersion: postgresql.cnpg.io/v1 kind: Pooler metadata: - name: pooler-cluster-pooler-rw + name: pooler-paradedb-pooler-rw spec: cluster: - name: pooler-cluster + name: pooler-paradedb instances: 2 pgbouncer: poolMode: transaction diff --git a/charts/cluster/test/pooler/01-pooler_cluster.yaml b/charts/paradedb/test/pooler/01-pooler_cluster.yaml similarity index 100% rename from charts/cluster/test/pooler/01-pooler_cluster.yaml rename to charts/paradedb/test/pooler/01-pooler_cluster.yaml diff --git a/charts/cluster/test/pooler/chainsaw-test.yaml b/charts/paradedb/test/pooler/chainsaw-test.yaml similarity index 100% rename from charts/cluster/test/pooler/chainsaw-test.yaml rename to charts/paradedb/test/pooler/chainsaw-test.yaml diff --git a/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml b/charts/paradedb/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml similarity index 93% rename from charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml rename to charts/paradedb/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml index 5f5c62a68..9a2efc44e 100644 --- a/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml +++ b/charts/paradedb/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Cluster metadata: - name: non-default-configuration-cluster + name: non-default-configuration-paradedb labels: foo: bar annotations: @@ -15,12 +15,14 @@ spec: postgresql: parameters: max_connections: "42" + cron.database_name: "postgres" pg_hba: - host all 1.2.3.4/32 trust pg_ident: - mymap /^(.*)@mydomain\.com$ \1 shared_preload_libraries: - pgaudit + - pg_cron bootstrap: initdb: database: mydb @@ -32,6 +34,7 @@ spec: postInitTemplateSQL: - CREATE TABLE mytable (id serial PRIMARY KEY, name VARCHAR(255)); postInitSQL: + - CREATE EXTENSION IF NOT EXISTS pg_cron; - CREATE TABLE mytable (id serial PRIMARY KEY, name VARCHAR(255)); superuserSecret: name: supersecret-secret diff --git a/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml b/charts/paradedb/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml similarity index 95% rename from charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml rename to charts/paradedb/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml index 570ea8409..53a534a89 100644 --- a/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml +++ b/charts/paradedb/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml @@ -55,12 +55,14 @@ cluster: postgresql: parameters: max_connections: "42" + cron.database_name: "postgres" pg_hba: - host all 1.2.3.4/32 trust pg_ident: - mymap /^(.*)@mydomain\.com$ \1 shared_preload_libraries: - pgaudit + - pg_cron initdb: database: mydb owner: dante @@ -71,6 +73,7 @@ cluster: postInitTemplateSQL: - CREATE TABLE mytable (id serial PRIMARY KEY, name VARCHAR(255)); postInitSQL: + - CREATE EXTENSION IF NOT EXISTS pg_cron; - CREATE TABLE mytable (id serial PRIMARY KEY, name VARCHAR(255)); additionalLabels: foo: bar diff --git a/charts/cluster/test/postgresql-cluster-configuration/chainsaw-test.yaml b/charts/paradedb/test/postgresql-cluster-configuration/chainsaw-test.yaml similarity index 100% rename from charts/cluster/test/postgresql-cluster-configuration/chainsaw-test.yaml rename to charts/paradedb/test/postgresql-cluster-configuration/chainsaw-test.yaml diff --git a/charts/cluster/test/timescale-minio-backup-restore/00-minio_cleanup-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/00-minio_cleanup-assert.yaml similarity index 100% rename from charts/cluster/test/timescale-minio-backup-restore/00-minio_cleanup-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/00-minio_cleanup-assert.yaml diff --git a/charts/cluster/test/postgresql-minio-backup-restore/00-minio_cleanup.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/00-minio_cleanup.yaml similarity index 100% rename from charts/cluster/test/postgresql-minio-backup-restore/00-minio_cleanup.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/00-minio_cleanup.yaml diff --git a/charts/cluster/test/postgresql-minio-backup-restore/01-standalone_cluster-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/01-standalone_cluster-assert.yaml similarity index 75% rename from charts/cluster/test/postgresql-minio-backup-restore/01-standalone_cluster-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/01-standalone_cluster-assert.yaml index 0663e78c9..d759bfcdb 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/01-standalone_cluster-assert.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/01-standalone_cluster-assert.yaml @@ -1,6 +1,6 @@ apiVersion: postgresql.cnpg.io/v1 kind: Cluster metadata: - name: standalone-cluster + name: standalone-paradedb status: readyInstances: 2 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/01-standalone_cluster.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/01-standalone_cluster.yaml similarity index 100% rename from charts/cluster/test/postgresql-minio-backup-restore/01-standalone_cluster.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/01-standalone_cluster.yaml diff --git a/charts/cluster/test/postgresql-minio-backup-restore/02-data_write-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/02-data_write-assert.yaml similarity index 100% rename from charts/cluster/test/postgresql-minio-backup-restore/02-data_write-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/02-data_write-assert.yaml diff --git a/charts/cluster/test/postgresql-minio-backup-restore/02-data_write.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/02-data_write.yaml similarity index 91% rename from charts/cluster/test/postgresql-minio-backup-restore/02-data_write.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/02-data_write.yaml index e674d8b53..34a3d2371 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/02-data_write.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/02-data_write.yaml @@ -12,7 +12,7 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: standalone-cluster-superuser + name: standalone-paradedb-superuser key: uri image: alpine:3.19 command: ['sh', '-c'] diff --git a/charts/cluster/test/postgresql-minio-backup-restore/03-backup.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/03-backup.yaml similarity index 80% rename from charts/cluster/test/postgresql-minio-backup-restore/03-backup.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/03-backup.yaml index c3afd4676..8fe8c4686 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/03-backup.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/03-backup.yaml @@ -5,4 +5,4 @@ metadata: spec: method: barmanObjectStore cluster: - name: standalone-cluster + name: standalone-paradedb diff --git a/charts/cluster/test/postgresql-minio-backup-restore/03-backup_completed-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/03-backup_completed-assert.yaml similarity index 83% rename from charts/cluster/test/postgresql-minio-backup-restore/03-backup_completed-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/03-backup_completed-assert.yaml index 7b1e9e534..efffadd46 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/03-backup_completed-assert.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/03-backup_completed-assert.yaml @@ -4,7 +4,7 @@ metadata: name: post-init-backup spec: cluster: - name: standalone-cluster + name: standalone-paradedb method: barmanObjectStore status: phase: completed diff --git a/charts/cluster/test/postgresql-minio-backup-restore/03-backup_running-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/03-backup_running-assert.yaml similarity index 83% rename from charts/cluster/test/postgresql-minio-backup-restore/03-backup_running-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/03-backup_running-assert.yaml index cbd9645c5..ab709ada9 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/03-backup_running-assert.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/03-backup_running-assert.yaml @@ -4,7 +4,7 @@ metadata: name: post-init-backup spec: cluster: - name: standalone-cluster + name: standalone-paradedb method: barmanObjectStore status: phase: running diff --git a/charts/cluster/test/postgresql-minio-backup-restore/03-checkpoint.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/03-checkpoint.yaml similarity index 92% rename from charts/cluster/test/postgresql-minio-backup-restore/03-checkpoint.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/03-checkpoint.yaml index 52862bf07..680ba1b2d 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/03-checkpoint.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/03-checkpoint.yaml @@ -12,7 +12,7 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: standalone-cluster-superuser + name: standalone-paradedb-superuser key: uri image: alpine:3.19 command: ['sh', '-c'] diff --git a/charts/cluster/test/postgresql-minio-backup-restore/04-post_backup_data_write-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/04-post_backup_data_write-assert.yaml similarity index 100% rename from charts/cluster/test/postgresql-minio-backup-restore/04-post_backup_data_write-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/04-post_backup_data_write-assert.yaml diff --git a/charts/cluster/test/postgresql-minio-backup-restore/04-post_backup_data_write.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/04-post_backup_data_write.yaml similarity index 96% rename from charts/cluster/test/postgresql-minio-backup-restore/04-post_backup_data_write.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/04-post_backup_data_write.yaml index 2e56595de..50c7ab6a2 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/04-post_backup_data_write.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/04-post_backup_data_write.yaml @@ -39,7 +39,7 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: standalone-cluster-superuser + name: standalone-paradedb-superuser key: uri - name: NAMESPACE valueFrom: diff --git a/charts/cluster/test/postgresql-minio-backup-restore/05-recovery_backup_cluster-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/05-recovery_backup_cluster-assert.yaml similarity index 72% rename from charts/cluster/test/postgresql-minio-backup-restore/05-recovery_backup_cluster-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/05-recovery_backup_cluster-assert.yaml index 90c4b24db..3d99b1357 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/05-recovery_backup_cluster-assert.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/05-recovery_backup_cluster-assert.yaml @@ -1,6 +1,6 @@ apiVersion: postgresql.cnpg.io/v1 kind: Cluster metadata: - name: recovery-backup-cluster + name: recovery-backup-paradedb status: readyInstances: 2 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/05-recovery_backup_cluster.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/05-recovery_backup_cluster.yaml similarity index 100% rename from charts/cluster/test/postgresql-minio-backup-restore/05-recovery_backup_cluster.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/05-recovery_backup_cluster.yaml diff --git a/charts/cluster/test/postgresql-minio-backup-restore/06-data_test-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/06-data_test-assert.yaml similarity index 100% rename from charts/cluster/test/postgresql-minio-backup-restore/06-data_test-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/06-data_test-assert.yaml diff --git a/charts/cluster/test/postgresql-minio-backup-restore/06-data_test.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/06-data_test.yaml similarity index 91% rename from charts/cluster/test/postgresql-minio-backup-restore/06-data_test.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/06-data_test.yaml index 86a15439b..734b45f41 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/06-data_test.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/06-data_test.yaml @@ -12,7 +12,7 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: recovery-backup-cluster-superuser + name: recovery-backup-paradedb-superuser key: uri image: alpine:3.19 command: ['sh', '-c'] diff --git a/charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster-assert.yaml similarity index 68% rename from charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster-assert.yaml index f8693036b..d9895f17c 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster-assert.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster-assert.yaml @@ -1,6 +1,6 @@ apiVersion: postgresql.cnpg.io/v1 kind: Cluster metadata: - name: recovery-object-store-cluster + name: recovery-object-store-paradedb status: readyInstances: 2 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster.yaml similarity index 96% rename from charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster.yaml index 7f059e394..07f5dbc53 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/07-recovery_object_store_cluster.yaml @@ -8,7 +8,7 @@ cluster: recovery: method: object_store - clusterName: "standalone-cluster" + clusterName: "standalone-paradedb" provider: s3 endpointURL: "https://minio.minio.svc.cluster.local" endpointCA: diff --git a/charts/cluster/test/postgresql-minio-backup-restore/08-data_test-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/08-data_test-assert.yaml similarity index 100% rename from charts/cluster/test/postgresql-minio-backup-restore/08-data_test-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/08-data_test-assert.yaml diff --git a/charts/cluster/test/postgresql-minio-backup-restore/08-data_test.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/08-data_test.yaml similarity index 90% rename from charts/cluster/test/postgresql-minio-backup-restore/08-data_test.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/08-data_test.yaml index 94ac2c34e..402d243ad 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/08-data_test.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/08-data_test.yaml @@ -12,7 +12,7 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: recovery-object-store-cluster-superuser + name: recovery-object-store-paradedb-superuser key: uri image: alpine:3.19 command: ['sh', '-c'] diff --git a/charts/cluster/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster-assert.yaml similarity index 69% rename from charts/cluster/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster-assert.yaml index 2b6b9651f..640223c16 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster-assert.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster-assert.yaml @@ -1,6 +1,6 @@ apiVersion: postgresql.cnpg.io/v1 kind: Cluster metadata: - name: recovery-backup-pitr-cluster + name: recovery-backup-pitr-paradedb status: readyInstances: 2 diff --git a/charts/cluster/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster.yaml similarity index 100% rename from charts/cluster/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/09-recovery_backup_pitr_cluster.yaml diff --git a/charts/cluster/test/postgresql-minio-backup-restore/10-data_test-assert.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/10-data_test-assert.yaml similarity index 100% rename from charts/cluster/test/postgresql-minio-backup-restore/10-data_test-assert.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/10-data_test-assert.yaml diff --git a/charts/cluster/test/postgresql-minio-backup-restore/10-data_test.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/10-data_test.yaml similarity index 93% rename from charts/cluster/test/postgresql-minio-backup-restore/10-data_test.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/10-data_test.yaml index 5fb4faf39..412852502 100644 --- a/charts/cluster/test/postgresql-minio-backup-restore/10-data_test.yaml +++ b/charts/paradedb/test/postgresql-minio-backup-restore/10-data_test.yaml @@ -12,7 +12,7 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: recovery-backup-pitr-cluster-superuser + name: recovery-backup-pitr-paradedb-superuser key: uri image: alpine:3.19 command: ['sh', '-c'] diff --git a/charts/cluster/test/postgresql-minio-backup-restore/chainsaw-test.yaml b/charts/paradedb/test/postgresql-minio-backup-restore/chainsaw-test.yaml similarity index 100% rename from charts/cluster/test/postgresql-minio-backup-restore/chainsaw-test.yaml rename to charts/paradedb/test/postgresql-minio-backup-restore/chainsaw-test.yaml diff --git a/charts/cluster/test/postgresql-pg_basebackup/00-source-cluster-assert.yaml b/charts/paradedb/test/postgresql-pg_basebackup/00-source-cluster-assert.yaml similarity index 78% rename from charts/cluster/test/postgresql-pg_basebackup/00-source-cluster-assert.yaml rename to charts/paradedb/test/postgresql-pg_basebackup/00-source-cluster-assert.yaml index 90ea90fd5..f68d5419a 100644 --- a/charts/cluster/test/postgresql-pg_basebackup/00-source-cluster-assert.yaml +++ b/charts/paradedb/test/postgresql-pg_basebackup/00-source-cluster-assert.yaml @@ -1,6 +1,6 @@ apiVersion: postgresql.cnpg.io/v1 kind: Cluster metadata: - name: source-cluster + name: source-paradedb status: readyInstances: 1 diff --git a/charts/cluster/test/postgresql-pg_basebackup/00-source-cluster.yaml b/charts/paradedb/test/postgresql-pg_basebackup/00-source-cluster.yaml similarity index 100% rename from charts/cluster/test/postgresql-pg_basebackup/00-source-cluster.yaml rename to charts/paradedb/test/postgresql-pg_basebackup/00-source-cluster.yaml diff --git a/charts/cluster/test/postgresql-pg_basebackup/01-data_write-assert.yaml b/charts/paradedb/test/postgresql-pg_basebackup/01-data_write-assert.yaml similarity index 100% rename from charts/cluster/test/postgresql-pg_basebackup/01-data_write-assert.yaml rename to charts/paradedb/test/postgresql-pg_basebackup/01-data_write-assert.yaml diff --git a/charts/cluster/test/postgresql-pg_basebackup/01-data_write.yaml b/charts/paradedb/test/postgresql-pg_basebackup/01-data_write.yaml similarity index 80% rename from charts/cluster/test/postgresql-pg_basebackup/01-data_write.yaml rename to charts/paradedb/test/postgresql-pg_basebackup/01-data_write.yaml index cc5a743ad..17f92db89 100644 --- a/charts/cluster/test/postgresql-pg_basebackup/01-data_write.yaml +++ b/charts/paradedb/test/postgresql-pg_basebackup/01-data_write.yaml @@ -12,15 +12,15 @@ spec: - name: DB_USER valueFrom: secretKeyRef: - name: source-cluster-superuser + name: source-paradedb-superuser key: username - name: DB_PASS valueFrom: secretKeyRef: - name: source-cluster-superuser + name: source-paradedb-superuser key: password - name: DB_URI - value: postgres://$(DB_USER):$(DB_PASS)@source-cluster-rw:5432 + value: postgres://$(DB_USER):$(DB_PASS)@source-paradedb-rw:5432 image: alpine:3.19 command: ['sh', '-c'] args: diff --git a/charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster-assert.yaml b/charts/paradedb/test/postgresql-pg_basebackup/02-pg_basebackup-cluster-assert.yaml similarity index 73% rename from charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster-assert.yaml rename to charts/paradedb/test/postgresql-pg_basebackup/02-pg_basebackup-cluster-assert.yaml index 9b953d44a..511de6837 100644 --- a/charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster-assert.yaml +++ b/charts/paradedb/test/postgresql-pg_basebackup/02-pg_basebackup-cluster-assert.yaml @@ -1,6 +1,6 @@ apiVersion: postgresql.cnpg.io/v1 kind: Cluster metadata: - name: pg-basebackup-cluster + name: pg-basebackup-paradedb status: readyInstances: 2 diff --git a/charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml b/charts/paradedb/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml similarity index 73% rename from charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml rename to charts/paradedb/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml index d389200e8..0042bd629 100644 --- a/charts/cluster/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml +++ b/charts/paradedb/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml @@ -4,15 +4,15 @@ recovery: method: "pg_basebackup" pgBaseBackup: source: - host: "source-cluster-rw" + host: "source-paradedb-rw" database: "mygooddb" username: "streaming_replica" sslMode: "require" sslKeySecret: - name: source-cluster-replication + name: source-paradedb-replication key: tls.key sslCertSecret: - name: source-cluster-replication + name: source-paradedb-replication key: tls.crt cluster: diff --git a/charts/cluster/test/postgresql-pg_basebackup/03-data_test-assert.yaml b/charts/paradedb/test/postgresql-pg_basebackup/03-data_test-assert.yaml similarity index 100% rename from charts/cluster/test/postgresql-pg_basebackup/03-data_test-assert.yaml rename to charts/paradedb/test/postgresql-pg_basebackup/03-data_test-assert.yaml diff --git a/charts/cluster/test/postgresql-pg_basebackup/03-data_test.yaml b/charts/paradedb/test/postgresql-pg_basebackup/03-data_test.yaml similarity index 91% rename from charts/cluster/test/postgresql-pg_basebackup/03-data_test.yaml rename to charts/paradedb/test/postgresql-pg_basebackup/03-data_test.yaml index 40eb9029a..487e25bf2 100644 --- a/charts/cluster/test/postgresql-pg_basebackup/03-data_test.yaml +++ b/charts/paradedb/test/postgresql-pg_basebackup/03-data_test.yaml @@ -12,7 +12,7 @@ spec: - name: DB_URI valueFrom: secretKeyRef: - name: pg-basebackup-cluster-superuser + name: pg-basebackup-paradedb-superuser key: uri image: alpine:3.19 command: ['sh', '-c'] diff --git a/charts/cluster/test/postgresql-pg_basebackup/chainsaw-test.yaml b/charts/paradedb/test/postgresql-pg_basebackup/chainsaw-test.yaml similarity index 100% rename from charts/cluster/test/postgresql-pg_basebackup/chainsaw-test.yaml rename to charts/paradedb/test/postgresql-pg_basebackup/chainsaw-test.yaml diff --git a/charts/cluster/values.schema.json b/charts/paradedb/values.schema.json similarity index 98% rename from charts/cluster/values.schema.json rename to charts/paradedb/values.schema.json index 81899f1cd..c9130b534 100644 --- a/charts/cluster/values.schema.json +++ b/charts/paradedb/values.schema.json @@ -200,7 +200,12 @@ "type": "array" }, "initdb": { - "type": "object" + "type": "object", + "properties": { + "database": { + "type": "string" + } + } }, "instances": { "type": "integer" @@ -556,14 +561,11 @@ "version": { "type": "object", "properties": { - "postgis": { + "paradedb": { "type": "string" }, "postgresql": { "type": "string" - }, - "timescaledb": { - "type": "string" } } } diff --git a/charts/cluster/values.yaml b/charts/paradedb/values.yaml similarity index 96% rename from charts/cluster/values.yaml rename to charts/paradedb/values.yaml index d45e5c1e9..c916c6dcd 100644 --- a/charts/cluster/values.yaml +++ b/charts/paradedb/values.yaml @@ -5,18 +5,15 @@ fullnameOverride: "" ### # -- Type of the CNPG database. Available types: -# * `postgresql` -# * `postgis` -# * `timescaledb` -type: postgresql +# * `paradedb` +type: paradedb version: # -- PostgreSQL major version to use postgresql: "16" - # -- If using TimescaleDB, specify the version - timescaledb: "2.15" - # -- If using PostGIS, specify the version - postgis: "3.4" + # -- The ParadeDB version, set in the publish CI workflow from the latest paradedb/paradedb GitHub tag + # -- We default to v0.10.3 for testing and local development + paradedb: "0.10.3" ### # -- Cluster mode of operation. Available modes: @@ -92,8 +89,8 @@ recovery: # See https://cloudnative-pg.io/documentation/1.22/bootstrap/#bootstrap-from-a-live-cluster-pg_basebackup pgBaseBackup: - # -- Name of the database used by the application. Default: `app`. - database: app + # -- Name of the database used by the application. Default: `paradedb`. + database: paradedb # -- Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. secret: "" # -- Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch @@ -102,7 +99,7 @@ recovery: host: "" port: 5432 username: "" - database: "app" + database: "paradedb" sslMode: "verify-full" passwordSecret: # -- Whether to create a secret for the password @@ -130,7 +127,7 @@ cluster: # -- Name of the container image, supporting both tags (:) and digests for deterministic and repeatable deployments: # :@sha256: - imageName: "" # Default value depends on type (postgresql/postgis/timescaledb) + imageName: "" # Default value depends on type (postgresql/paradedb) # -- Reference to `ImageCatalog` of `ClusterImageCatalog`, if specified takes precedence over `cluster.imageName` imageCatalogRef: {} @@ -254,7 +251,9 @@ cluster: postgresql: # -- PostgreSQL configuration options (postgresql.conf) - parameters: {} + parameters: + # Required by pg_cron + cron.database_name: postgres # max_connections: 300 # -- PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) pg_hba: [] @@ -269,15 +268,14 @@ cluster: # -- BootstrapInitDB is the configuration of the bootstrap process when initdb is used. # See: https://cloudnative-pg.io/documentation/current/bootstrap/ # See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb - initdb: {} - # database: app + initdb: + database: paradedb # owner: "" # Defaults to the database name # secret: # name: "" # Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch # options: [] # encoding: UTF8 - # postInitSQL: - # - CREATE EXTENSION IF NOT EXISTS vector; + # postInitSQL: [] # postInitApplicationSQL: [] # postInitTemplateSQL: [] diff --git a/provenance.gpg b/provenance.gpg deleted file mode 100644 index 944a40b82..000000000 --- a/provenance.gpg +++ /dev/null @@ -1,83 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBGXXEuEBEADbLS7rJCSmZlrNmXvy0WPkfri4QEVZeGQQPcTCErAxm6b5dLnL -APZQfRRueiBtR784MPynsaz3358QMy54pEvgMoLruhWIZgSB6k+qQurmDj+i/W6f -inE5/Ekt7sa3C3CmPSQDYIL9MqkFBYtT8HMLCrDLJjsjU675/2SA47Dn63IHAMym -uEFuCWKwpWjP74+5F71AM9DYNLCZ/uS0Cqn/I7taOjhUQqBMPNl0BSzFnnrggMYg -W6uQDXWK3B6o7QBZR33SX9jknUQ3ZXCAW6wgGSxr8vHBhYnRyh8a6FNRdeGnWQEx -jYqg3r/4t8ObYus7hg/WEpEHd6QK4wujjqU578zsuruByWLpO/j7gKrpwVI7CrK9 -AOEm2hQrLsgLMi/dqmubVfcejgLhEoMnqzibKuGMK0v48nA0ab148UTgp8cWK5LB -1r66JDbgqVfUvN2PlgbnKkeNPX1aQVptRHQ+JU5DPEYjSau6dMn3i0IutJqePzoH -Wz6HrBULFOBwF/mIu38gQP7WB+YwMriz7sxYZjK6sl3Y3q2jpznG1tpObVYVki2p -sD3dila5AAY0hiu62kyVGA/JGaCAkS7HyEmEr3Y9lGnmeodCAOJy6SWJlJ2jTUlv -Xizw7U04w78XBDahMCcou3TmJzkQQ9hethC9QG+rpLQXJoVX92yZwtSC3QARAQAB -tHNDbG91ZE5hdGl2ZVBHIEhlbG0gQ2hhcnRzIChBdXRvbWF0aWNhbGx5IFNpZ25l -ZCBDaGFydHMgdmlhIEdpdEh1YiBBY3Rpb25zKSA8aGVsbS1jaGFydHMrbm8tcmVw -bHlAY2xvdWRuYXRpdmUtcGcuaW8+iQJUBBMBCgA+FiEEaZh15ou/yYAMvYau48aP -k7UMXsAFAmXXEuECGy8FCRLMAwAFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQ -48aPk7UMXsCfZA/7Bj6d/ZiXoKvz51V+l9TvDw4uuagiBTb+rKfwBlKfKuldd+Ld -p3ZTVqEJM/d+fCRg7+zatPLF5EQCdSa159NMw91s+HnsrJwcs6bfZN7tVR4OfVOe -7cqK/BwW+P+By8W9STI2xUZaSObA5S0mjvuLWCucq7vzQDtCqCPNkHCkcUN/D5q7 -Lv1NIUKyS/WSl2iGiaGEHpYprSakKhqxfj/wRHWO9bHpAKI2wvCgP6SGQ9u596X2 -hIDmLCRY58jZywWwUC99Ii0660284FChCdNdr1G+p/Wot0cUrX3RA0OPYs4hGiCx -N4WHZvMxbMo5ZQ72xSGdId3hT0w4JGeistbAbnhMpQJrI4pHE1Hh9Jd/nzKKBSrQ -3ZAqFM3Lkvy1LKcwv9o7SKhW3mB4+dyYqkGBTv8X9Uq1m/rYyM8zXeN/Q92d5B3d -lAuTrxyXMosUVQ59EdYvelhqyieGMZ/MBIMuv3R1/P+BRg7tDiPb4fq7MhU7nHve -ZC5jN1TAM2Z852iiQQonUE/gfSmD7l5Vqk48kqLKk3jNnbnxSDKuoEdxxzH7mXSU -Yc+uUSy85Age5iTcdZZ9lDJ3nOoj5xmgA60Vzl0CcP3l0YnOMfDwpc/fQ2Jj3Nd9 -zbgYvOKbyA5tQ2KtTOPNn3gALEj6Icwd/F/nCSrkpNwb4s0JB7WDX5O5eoWJATIE -EAEIAB0WIQRCKLl468bsh6Nzr4NYyuqPHMhQPgUCZdcVvQAKCRBYyuqPHMhQPlCt -B/IC5WhdcvQXrJtJ36XTdnwbx6uHF07PMzKm9aVhfmMcicLwnAsrolAkCXtjVng8 -UDPi89KcQDSPw4fcm1NIlyqs5ZyG5EncTr0WAFhrxGGgAs+NWiNFHB2pBkmKpt+p -PMr5CZgGqH4MgOtUpMXH1Vb21b8I1zST2tvqZ+34c6yPGbg/pz3yGuMyDmcwBmw7 -iyQahB9zkpYUI5hx+MVnvSqiQXFc6WZaO0eIJDwGv45WdL2g2DCYPf2KweFaVyWC -sY2cqf7vZfPujfkBFFZklU5GSkBZll+g+V4VD9XZQ/qQgTyPeuxP9wQhytDYnIOf -37lEfqcF794KCLV1oZAsMp6JAjMEEAEKAB0WIQQiaN4mLn+p64fo6pwKXEeyxVyu -tQUCZdcbfgAKCRAKXEeyxVyutSKuEACCrRMN14EJvc7hLs+LBn3ihhhqiu2brgw+ -BAAtpTlnGxc78laODj1vaRbNcnfpbl3gMeSD2CqRtj4jLoCg3Rl6WkVq993Nf3KV -zjXsaTTqagPnd+B+7QlTYfkceGgCjlLsZw6EkR40WqXuig7m0GUq0d5updWdtkID -/U6U7flcZA3n5vJQJVbZPGx1AQuCd7xjjyZFjI7ghQvBy4lIfdJPH8VQHjPtfssr -NDIycL/AlirqMjPEEOWYEXgqcpEX44nOluEdTuXRsOk9m4aouZPazWw3IzbYWfrh -0HRsW/QzIWHV1v1e1OKS9Vfbz24kuk+J89Ula76KslB31vR1y8Y1inL9YeDt0BEW -xNRdw6E15kWcpSjp6GmDBLPwBRgYG9UZ5MtQc7tg39m3DWD9NJCRxRR8hCtdANmF -SgWfELbrvt9OfzmgCq3BYTfRrKYuiMZu1dfN3+sv4BnC/iTMe2GtTBUDaWGXBOCF -3/CNgjaI4AfkiY8irgYJhxMhzednSqDnpwZQFB1RpHAouyKQ3gYsHiDds4lauCQT -PvPpa1yGN0HaySzbjsdQV/o+aI5g41t0YETC9CX5FzowKHj1r5ZEKRGDsxX4Ruqg -ZQ6GpEEkyaxOhYoGjOA6bG7G0evjBaGlLX1vRbq6Oy+6q3RJiKa0L2Fv7FD2hpo4 -JI7ot4OOXLkCDQRl1xLhARAA8hviIYBPp00JYc1ZEPNW7NqfN5JPSk0RMabV17sv -wggVfc/mgFsx9OrZ6LEphMZaeP4k1IIRilUGBuMzsvIiGu6QCgp6X27TeHaT2W/u -WxHA5tH3E+hBX053t1epdl3ZvviW0ylJCCwecEoZukbLVUqS4rt7MZNeDZI5SDhU -tHMqTlIA5xVCtJQFAuyn2IAW+SbSKx4fY05joXmcvPRLkLqUOJJyWecMUqdmYi9t -56yl33n+27nOVm1tJq1Jt0UpAPw4NXTaebxNAZZOciwjX14jphCKvVpbQsER6yg3 -swA4vrugf/Ig7RpuDqdi4bYqmwGjPUR6jq34XsId2KUn3Xxrme8uICHcdgycjIwx -vUWG6I9VqYv0qirgVU9JJ/ly9zf38LK28rxPkSefwW4gpcp+YKoKGDTGvjqzE28u -B8wPl0mzrViem+lnDgxRPFsRKm3+bLBL7Byk9i02pLxM+gEyrUexI5IGiYJ+zYEK -hJ1n3mAwz/pvoXw69UXNPf7CJ5ljeP860nwJWUaxspj7FLg7cBOCYt3Z31LCf9FX -Ty4EUUWAP5ikrgs8WlWAiV6DWNiUX4gIHOaUPvafY7QoMsDsajRolS70q8eTVz02 -Rta7UW4YP5WqocoJ1xFDLF43JyK5tX+l4Lqt35X8eGiawQnbXPbzBiPtUqy3ZycZ -cEkAEQEAAYkEcgQYAQoAJhYhBGmYdeaLv8mADL2GruPGj5O1DF7ABQJl1xLhAhsu -BQkSzAMAAkAJEOPGj5O1DF7AwXQgBBkBCgAdFiEE2/kVNvfboEsXjVCpNjzSe0Xa -3gAFAmXXEuEACgkQNjzSe0Xa3gB4nA/+L7CBpJvM1sbwk4HdKI/qhORtxbAlP5LY -QT6svWjUDZhDZwODPexlZ6PO957+4ClV/pa1vMnJ6C6c2jlI+V1wpiGXfKV3MdQU -L0yzOk8xB9CoJeGs9t9NxQaHOWrkFhW39odEb4cxeLYvE2vAQcb4VpK5BtYCbr/K -+pBWHDhHJbSKtufKfWJW4k0yJhMto0KcHYcLMsSiATHH84Zf3Mh94QE6Ib8qmhQv -N+W1XA/PzA6/7/5FmHIW/PFnUKTlf5cpwqzXWkV9SdGM5oHZFns1zev/0IdDBh5y -a2itEtB0qSx5zdjDQ6T0cE3oZnS8U3wIchlMaDAXEECdTKMB61Jb0MOoYOXTT/v6 -0t+j/Xh89G7N2M6JWXQu0mepnrryiOdh1J7s7EHhqsgLZQ68TFBaGlR7ja0ZEdK6 -u5csPI6+UJODx1tKskKHAovy/z5444j7TB6HWOR/3JZcgUPdQegL2+gEQNqyayWH -YrLuQxrmJsWCSCX6GX/4K0E//MgFTLNiHMZLMGOiYfBsbbnVS9A/swygY96Z63aY -DaR/VBp2Z6R8qh0ZJJBoaQzSkkbcGcHltQpI+wFZp4DMFpeVjaHFZCDVdag3CQfD -MZ4n7QcGPAoIvrQ5Te8Ftn9PWnTBA+h8U+l5ry+a+zKoSU5aOU+v9fAUFYGVXnfw -VDKknTCJi92oDQ/8DO095ePfqbagRp6v8FoR0vg7XgywSGhII88488OYZ++ErAme -h4rhYKKg8k6IjRj0mumGDtaFItAJx1U9+jwtqOAhCvYQbCKlUSsNj6+NWrdcU3Ic -LMBcb8Zb/MF5hs4ZpyrkixWKP35HnAqHs1nAGlRsfAVGJk6lLtuCZvMPEomUfUW4 -vUt9Pw0v8HFHXlq/OYk752XX4JDiReqa5Mz1MoeNbHJ9OgHGGyoUtKmeAp5Dh/FD -O6mU1ZMyWGkibZGtr7x87JBwuEMBlTldqs8e/O9Os4OSnx8VdDmmpeN84as+Xl/t -9gHYnd4HgSjH83oV+dXC7jNwjfucyFTyW9na78qxJkf31UrxHyq2WwSvDvS6CuhH -iSzJSx4/NOhEGjW+O0Cfazc1Jwpgx/1fcT6VijCsA7lv3uLfgF98la5Dv4QFBYA7 -oIRmJO+W1jfsyMwCc2j7va0iCkREjRY/8fsaT8ywQZLYLWzPHFreL/+JLSSMT7F2 -mAkr3qA+DLrudLxov+OYUwMoau12ImBvc1QSX05EAgaCZp/OgkKfqnhMQQUKMp+X -oAOFddo61el+ctOUHo0M4pVkew9MLkOd3rejeTP3eAmQLm8RzAcRbkd4yL3bNdiN -+gyAqqx+pNEQ7HAI/aqL1s+/vvXJHM25NF8uwkzPsrKbUHNSFiUWEmaxSts= -=ZWMG ------END PGP PUBLIC KEY BLOCK----- From f0eda4aaa3cfb0123faf47781ac03b4d71ae5ac1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20No=C3=ABl?= <21990816+philippemnoel@users.noreply.github.com> Date: Wed, 16 Oct 2024 13:24:23 -0400 Subject: [PATCH 2/8] chore: Use the new 0.11.0 release (#47) --- .github/workflows/check-typo.yml | 2 +- .github/workflows/tests-cluster-chainsaw.yaml | 2 +- charts/paradedb/Chart.yaml | 4 ++-- charts/paradedb/README.md | 2 +- charts/paradedb/examples/image-catalog-ref.yaml | 2 +- charts/paradedb/examples/image-catalog.yaml | 2 +- charts/paradedb/examples/paradedb.yaml | 2 +- .../02-paradedb_write.yaml | 15 ++++++++++----- .../03-paradedb_test.yaml | 4 +++- charts/paradedb/values.yaml | 4 ++-- 10 files changed, 23 insertions(+), 16 deletions(-) diff --git a/.github/workflows/check-typo.yml b/.github/workflows/check-typo.yml index 8b64424aa..134975b00 100644 --- a/.github/workflows/check-typo.yml +++ b/.github/workflows/check-typo.yml @@ -17,7 +17,7 @@ concurrency: jobs: check-typo: name: Check Typo using codespell - runs-on: depot-ubuntu-latest-2 + runs-on: ubuntu-latest if: github.event.pull_request.draft == false steps: diff --git a/.github/workflows/tests-cluster-chainsaw.yaml b/.github/workflows/tests-cluster-chainsaw.yaml index d7f9e6bdb..e471522fe 100644 --- a/.github/workflows/tests-cluster-chainsaw.yaml +++ b/.github/workflows/tests-cluster-chainsaw.yaml @@ -7,7 +7,7 @@ on: jobs: test-cluster-chainsaw: - runs-on: depot-ubuntu-latest-8 + runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 diff --git a/charts/paradedb/Chart.yaml b/charts/paradedb/Chart.yaml index 8fc821776..f8458a5e4 100644 --- a/charts/paradedb/Chart.yaml +++ b/charts/paradedb/Chart.yaml @@ -20,8 +20,8 @@ icon: https://raw.githubusercontent.com/paradedb/paradedb/main/docs/logo/light.s type: application # The Chart version, set in the publish CI workflow from GitHub Actions Variables -# We default to v0.10.3 for testing and local development -version: 0.10.3 +# We default to v0.11.0 for testing and local development +version: 0.11.0 sources: - https://github.com/paradedb/charts diff --git a/charts/paradedb/README.md b/charts/paradedb/README.md index 505a4c2b5..fabac0b69 100644 --- a/charts/paradedb/README.md +++ b/charts/paradedb/README.md @@ -255,7 +255,7 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | recovery.secret.create | bool | `true` | Whether to create a secret for the backup credentials | | recovery.secret.name | string | `""` | Name of the backup credentials secret | | type | string | `"paradedb"` | Type of the CNPG database. Available types: * `paradedb` | -| version.paradedb | string | `"0.10.3"` | We default to v0.10.3 for testing and local development | +| version.paradedb | string | `"0.11.0"` | We default to v0.11.0 for testing and local development | | version.postgresql | string | `"16"` | PostgreSQL major version to use | | poolers[].name | string | `` | Name of the pooler resource | | poolers[].instances | number | `1` | The number of replicas we want | diff --git a/charts/paradedb/examples/image-catalog-ref.yaml b/charts/paradedb/examples/image-catalog-ref.yaml index a512ddd45..f3f6bd8f8 100644 --- a/charts/paradedb/examples/image-catalog-ref.yaml +++ b/charts/paradedb/examples/image-catalog-ref.yaml @@ -2,7 +2,7 @@ type: postgresql mode: standalone version: major: "16" - paradedb: "0.10.3" + paradedb: "0.11.0" cluster: instances: 1 imageCatalogRef: diff --git a/charts/paradedb/examples/image-catalog.yaml b/charts/paradedb/examples/image-catalog.yaml index e82f99546..c4bbc2114 100644 --- a/charts/paradedb/examples/image-catalog.yaml +++ b/charts/paradedb/examples/image-catalog.yaml @@ -2,7 +2,7 @@ type: postgresql mode: standalone version: major: "16" - paradedb: "0.10.3" + paradedb: "0.11.0" cluster: instances: 1 backups: diff --git a/charts/paradedb/examples/paradedb.yaml b/charts/paradedb/examples/paradedb.yaml index 1c2c4f75e..4241445da 100644 --- a/charts/paradedb/examples/paradedb.yaml +++ b/charts/paradedb/examples/paradedb.yaml @@ -2,7 +2,7 @@ type: paradedb mode: standalone version: postgresql: "16.3" - paradedb: "0.10.3" + paradedb: "0.11.0" cluster: instances: 1 backups: diff --git a/charts/paradedb/test/paradedb-minio-backup-restore/02-paradedb_write.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/02-paradedb_write.yaml index 56d96ba64..2ae6607a7 100644 --- a/charts/paradedb/test/paradedb-minio-backup-restore/02-paradedb_write.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/02-paradedb_write.yaml @@ -20,14 +20,19 @@ spec: - | apk --no-cache add postgresql-client psql "$DB_URI" <<-EOSQL - CALL paradedb.create_bm25_test_table( schema_name => 'public', table_name => 'mock_items' ); + CALL paradedb.create_bm25_test_table( + schema_name => 'public', + table_name => 'mock_items' + ); CALL paradedb.create_bm25( index_name => 'search_idx', - schema_name => 'public', table_name => 'mock_items', key_field => 'id', - text_fields => paradedb.field('description', tokenizer => paradedb.tokenizer('en_stem')) || - paradedb.field('category'), - numeric_fields => paradedb.field('rating') + text_fields => paradedb.field('description') || paradedb.field('category'), + numeric_fields => paradedb.field('rating'), + boolean_fields => paradedb.field('in_stock'), + datetime_fields => paradedb.field('created_at'), + json_fields => paradedb.field('metadata'), + range_fields => paradedb.field('weight_range') ); EOSQL diff --git a/charts/paradedb/test/paradedb-minio-backup-restore/03-paradedb_test.yaml b/charts/paradedb/test/paradedb-minio-backup-restore/03-paradedb_test.yaml index 3741db798..8840d9d7c 100644 --- a/charts/paradedb/test/paradedb-minio-backup-restore/03-paradedb_test.yaml +++ b/charts/paradedb/test/paradedb-minio-backup-restore/03-paradedb_test.yaml @@ -21,7 +21,9 @@ spec: apk --no-cache add postgresql-client RESULT=$(psql "$DB_URI" -t) <<-EOSQL SELECT description - FROM search_idx.search('description:"bluetooth speaker"~1'); + FROM mock_items + WHERE description @@@ '"bluetooth speaker"~1' + LIMIT 1; EOSQL echo -$RESULT- test "$RESULT" = " Bluetooth-enabled speaker" diff --git a/charts/paradedb/values.yaml b/charts/paradedb/values.yaml index c916c6dcd..7a93ff602 100644 --- a/charts/paradedb/values.yaml +++ b/charts/paradedb/values.yaml @@ -12,8 +12,8 @@ version: # -- PostgreSQL major version to use postgresql: "16" # -- The ParadeDB version, set in the publish CI workflow from the latest paradedb/paradedb GitHub tag - # -- We default to v0.10.3 for testing and local development - paradedb: "0.10.3" + # -- We default to v0.11.0 for testing and local development + paradedb: "0.11.0" ### # -- Cluster mode of operation. Available modes: From 426225ee206ff72e71c9382a0bbefeb79bc70543 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20No=C3=ABl?= <21990816+philippemnoel@users.noreply.github.com> Date: Sat, 19 Oct 2024 08:38:30 -0400 Subject: [PATCH 3/8] feat: Enable monitoring via Prometheus and Grafana (rebased) (#48) Rm extra files Remove duplicate block in README --- .github/workflows/paradedb-test-eks.yml | 8 +- .github/workflows/tests-cluster-chainsaw.yaml | 2 +- README.md | 55 +- charts/cloudnative-pg/Chart.yaml | 44 - charts/cloudnative-pg/README.md | 77 - .../cloudnative-pg/templates/crds/crds.yaml | 15687 ---------------- charts/cloudnative-pg/templates/rbac.yaml | 314 - .../image-catalog-timescaledb-ha.yaml | 18 - charts/paradedb/README.md | 71 +- charts/paradedb/README.md.gotmpl | 109 +- .../01-monitoring_cluster-assert.yaml | 4 +- .../test/pooler/01-pooler_cluster-assert.yaml | 6 +- 12 files changed, 187 insertions(+), 16208 deletions(-) delete mode 100644 charts/cloudnative-pg/Chart.yaml delete mode 100644 charts/cloudnative-pg/README.md delete mode 100644 charts/cloudnative-pg/templates/crds/crds.yaml delete mode 100644 charts/cloudnative-pg/templates/rbac.yaml delete mode 100644 charts/cluster/templates/image-catalog-timescaledb-ha.yaml diff --git a/.github/workflows/paradedb-test-eks.yml b/.github/workflows/paradedb-test-eks.yml index 8148e3f86..76b6c3158 100644 --- a/.github/workflows/paradedb-test-eks.yml +++ b/.github/workflows/paradedb-test-eks.yml @@ -86,12 +86,12 @@ jobs: - name: Install the CloudNativePG Operator run: | helm repo add cnpg https://cloudnative-pg.github.io/charts - helm upgrade --install cnpg --namespace cnpg-system --create-namespace cnpg/cloudnative-pg + helm upgrade --atomic --install cnpg --namespace cnpg-system --create-namespace cnpg/cloudnative-pg - - name: Wait for CNPG Webhook Service to be Ready + - name: Install the Prometheus Operator run: | - kubectl wait --namespace cnpg-system --for=condition=available --timeout=120s deployment/cnpg-cloudnative-pg - kubectl get svc -n cnpg-system cnpg-webhook-service + helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + helm upgrade --atomic --install prometheus-community --namespace prometheus-community --create-namespace prometheus-community/prometheus-operator-crds - name: Test Helm Dependency Update working-directory: charts/paradedb/ diff --git a/.github/workflows/tests-cluster-chainsaw.yaml b/.github/workflows/tests-cluster-chainsaw.yaml index e471522fe..809ad5bdb 100644 --- a/.github/workflows/tests-cluster-chainsaw.yaml +++ b/.github/workflows/tests-cluster-chainsaw.yaml @@ -6,7 +6,7 @@ on: - 'gh-pages' jobs: - test-cluster-chainsaw: + test-cluster-standalone: runs-on: ubuntu-latest steps: - name: Checkout diff --git a/README.md b/README.md index ceef6acf0..ea6587b07 100644 --- a/README.md +++ b/README.md @@ -30,21 +30,36 @@ The [ParadeDB](https://github.com/paradedb/paradedb) Helm Chart is based on the Kubernetes, and specifically the CloudNativePG operator, is the recommended approach for deploying ParadeDB in production, with high availability. ParadeDB also provides a [Docker image](https://hub.docker.com/r/paradedb/paradedb) and [prebuilt binaries](https://github.com/paradedb/paradedb/releases) for Debian, Ubuntu and Red Hat Enterprise Linux. -The chart is also available on [ArtifactHub](https://artifacthub.io/packages/helm/paradedb/paradedb). +The chart is also available on [Artifact Hub](https://artifacthub.io/packages/helm/paradedb/paradedb). ## Getting Started First, install [Helm](https://helm.sh/docs/intro/install/). The following steps assume you have a Kubernetes cluster running v1.25+. If you are testing locally, we recommend using [Minikube](https://minikube.sigs.k8s.io/docs/start/). +### Installing the Prometheus Stack + +The ParadeDB Helm chart supports monitoring via Prometheus and Grafana. To enable this, you need to have the Prometheus CRDs installed before installing the CloudNativePG operator. If you do not yet have the Prometheus CRDs installed on your Kubernetes cluster, you can install it with: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm upgrade --atomic --install prometheus-community \ +--create-namespace \ +--namespace prometheus-community \ +--values https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml \ +prometheus-community/kube-prometheus-stack +``` + ### Installing the CloudNativePG Operator -Skip this step if the CNPG operator is already installed in your cluster. +Skip this step if the CloudNativePG operator is already installed in your cluster. If you do not wish to monitor your cluster, omit the `--set` commands. -```console +```bash helm repo add cnpg https://cloudnative-pg.github.io/charts -helm upgrade --install cnpg \ ---namespace cnpg-system \ +helm upgrade --atomic --install cnpg \ --create-namespace \ +--namespace cnpg-system \ +--set monitoring.podMonitorEnabled=true \ +--set monitoring.grafanaDashboard.create=true \ cnpg/cloudnative-pg ``` @@ -57,46 +72,56 @@ type: paradedb mode: standalone cluster: - instances: 2 + instances: 3 storage: size: 256Mi ``` -Then, launch the ParadeDB cluster. +Then, launch the ParadeDB cluster. If you do not wish to monitor your cluster, omit the `--set` command. ```bash helm repo add paradedb https://paradedb.github.io/charts -helm upgrade --install paradedb \ ---namespace paradedb-database \ +helm upgrade --atomic --install paradedb \ +--namespace paradedb \ --create-namespace \ --values values.yaml \ +--set cluster.monitoring.enabled=true \ paradedb/paradedb ``` -If `--values values.yaml` is omitted, the default values will be used. For additional configuration options for the `values.yaml` file, please refer to the [ParadeDB Helm Chart documentation](https://artifacthub.io/packages/helm/paradedb/paradedb#values). For advanced cluster configuration options, please refer to the [CloudNativePG Cluster Chart documentation](charts/paradedb/README.md). - -A more detailed guide on launching the cluster can be found in the [Getting Started docs](<./charts/paradedb/docs/Getting Started.md>). To get started with ParadeDB, we suggest you follow the [quickstart guide](/documentation/getting-started/quickstart). +If `--values values.yaml` is omitted, the default values will be used. For additional configuration options for the `values.yaml` file, including configuring backups and PgBouncer, please refer to the [ParadeDB Helm Chart documentation](https://artifacthub.io/packages/helm/paradedb/paradedb#values). For advanced cluster configuration options, please refer to the [CloudNativePG Cluster Chart documentation](charts/paradedb/README.md). ### Connecting to a ParadeDB CNPG Cluster The command to connect to the primary instance of the cluster will be printed in your terminal. If you do not modify any settings, it will be: ```bash -kubectl --namespace paradedb-database exec --stdin --tty services/paradedb-rw -- bash +kubectl --namespace paradedb exec --stdin --tty services/paradedb-rw -- bash ``` -This will launch a shell inside the instance. You can connect via `psql` with: +This will launch a Bash shell inside the instance. You can connect to the ParadeDB database via `psql` with: ```bash psql -d paradedb ``` +### Connecting to the Grafana Dashboard + +To connect to the Grafana dashboard for your cluster, we suggested port forwarding the Kubernetes service running Grafana to localhost: + +```bash +kubectl --namespace prometheus-community port-forward svc/prometheus-community-grafana 3000:80 +``` + +You can then access the Grafana dasbhoard at [http://localhost:3000/](http://localhost:3000/) using the credentials `admin` as username and `prom-operator` as password. These default credentials are +defined in the [`kube-stack-config.yaml`](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml) file used as the `values.yaml` file in [Installing the Prometheus CRDs](#installing-the-prometheus-stack) and can be modified by providing your own `values.yaml` file. + ## Development To test changes to the Chart on a local Minikube cluster, follow the instructions from [Getting Started](#getting-started), replacing the `helm upgrade` step by the path to the directory of the modified `Chart.yaml`. ```bash -helm upgrade --install paradedb --namespace paradedb-database --create-namespace ./charts/paradedb +helm upgrade --atomic --install paradedb --namespace paradedb --create-namespace ./charts/paradedb ``` ## License diff --git a/charts/cloudnative-pg/Chart.yaml b/charts/cloudnative-pg/Chart.yaml deleted file mode 100644 index b34e3817a..000000000 --- a/charts/cloudnative-pg/Chart.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# -# Copyright The CloudNativePG Contributors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -apiVersion: v2 -name: cloudnative-pg -description: CloudNativePG Operator Helm Chart -icon: https://raw.githubusercontent.com/cloudnative-pg/artwork/main/cloudnativepg-logo.svg -type: application -version: "0.22.1" -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning, they should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.24.1" -sources: - - https://github.com/cloudnative-pg/charts -keywords: - - operator - - controller - - postgresql - - postgres - - database -home: https://cloudnative-pg.io -maintainers: - - name: phisco - email: p.scorsolini@gmail.com -dependencies: - - name: cluster - alias: monitoring - condition: monitoring.grafanaDashboard.create - version: "0.0" - repository: https://cloudnative-pg.github.io/grafana-dashboards diff --git a/charts/cloudnative-pg/README.md b/charts/cloudnative-pg/README.md deleted file mode 100644 index a9e569466..000000000 --- a/charts/cloudnative-pg/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# cloudnative-pg - -![Version: 0.22.1](https://img.shields.io/badge/Version-0.22.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.24.1](https://img.shields.io/badge/AppVersion-1.24.1-informational?style=flat-square) - -CloudNativePG Operator Helm Chart - -**Homepage:** - -## Maintainers - -| Name | Email | Url | -| ---- | ------ | --- | -| phisco | | | - -## Source Code - -* - -## Requirements - -| Repository | Name | Version | -|------------|------|---------| -| https://cloudnative-pg.github.io/grafana-dashboards | monitoring(cluster) | 0.0 | - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| additionalArgs | list | `[]` | Additinal arguments to be added to the operator's args list. | -| additionalEnv | list | `[]` | Array containing extra environment variables which can be templated. For example: - name: RELEASE_NAME value: "{{ .Release.Name }}" - name: MY_VAR value: "mySpecialKey" | -| affinity | object | `{}` | Affinity for the operator to be installed. | -| commonAnnotations | object | `{}` | Annotations to be added to all other resources. | -| config | object | `{"create":true,"data":{},"name":"cnpg-controller-manager-config","secret":false}` | Operator configuration. | -| config.create | bool | `true` | Specifies whether the secret should be created. | -| config.data | object | `{}` | The content of the configmap/secret, see https://cloudnative-pg.io/documentation/current/operator_conf/#available-options for all the available options. | -| config.name | string | `"cnpg-controller-manager-config"` | The name of the configmap/secret to use. | -| config.secret | bool | `false` | Specifies whether it should be stored in a secret, instead of a configmap. | -| containerSecurityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":10001,"runAsUser":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | Container Security Context. | -| crds.create | bool | `true` | Specifies whether the CRDs should be created when installing the chart. | -| dnsPolicy | string | `""` | | -| fullnameOverride | string | `""` | | -| hostNetwork | bool | `false` | | -| image.pullPolicy | string | `"IfNotPresent"` | | -| image.repository | string | `"ghcr.io/cloudnative-pg/cloudnative-pg"` | | -| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | -| imagePullSecrets | list | `[]` | | -| monitoring.grafanaDashboard.annotations | object | `{}` | Annotations that ConfigMaps can have to get configured in Grafana. | -| monitoring.grafanaDashboard.configMapName | string | `"cnpg-grafana-dashboard"` | The name of the ConfigMap containing the dashboard. | -| monitoring.grafanaDashboard.create | bool | `false` | | -| monitoring.grafanaDashboard.labels | object | `{}` | Labels that ConfigMaps should have to get configured in Grafana. | -| monitoring.grafanaDashboard.namespace | string | `""` | Allows overriding the namespace where the ConfigMap will be created, defaulting to the same one as the Release. | -| monitoring.grafanaDashboard.sidecarLabel | string | `"grafana_dashboard"` | Label that ConfigMaps should have to be loaded as dashboards. DEPRECATED: Use labels instead. | -| monitoring.grafanaDashboard.sidecarLabelValue | string | `"1"` | Label value that ConfigMaps should have to be loaded as dashboards. DEPRECATED: Use labels instead. | -| monitoring.podMonitorAdditionalLabels | object | `{}` | Additional labels for the podMonitor | -| monitoring.podMonitorEnabled | bool | `false` | Specifies whether the monitoring should be enabled. Requires Prometheus Operator CRDs. | -| monitoring.podMonitorMetricRelabelings | list | `[]` | Metrics relabel configurations to apply to samples before ingestion. | -| monitoring.podMonitorRelabelings | list | `[]` | Relabel configurations to apply to samples before scraping. | -| monitoringQueriesConfigMap.name | string | `"cnpg-default-monitoring"` | The name of the default monitoring configmap. | -| monitoringQueriesConfigMap.queries | string | `"backends:\n query: |\n SELECT sa.datname\n , sa.usename\n , sa.application_name\n , states.state\n , COALESCE(sa.count, 0) AS total\n , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds\n FROM ( VALUES ('active')\n , ('idle')\n , ('idle in transaction')\n , ('idle in transaction (aborted)')\n , ('fastpath function call')\n , ('disabled')\n ) AS states(state)\n LEFT JOIN (\n SELECT datname\n , state\n , usename\n , COALESCE(application_name, '') AS application_name\n , COUNT(*)\n , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs\n FROM pg_catalog.pg_stat_activity\n GROUP BY datname, state, usename, application_name\n ) sa ON states.state = sa.state\n WHERE sa.usename IS NOT NULL\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - usename:\n usage: \"LABEL\"\n description: \"Name of the user\"\n - application_name:\n usage: \"LABEL\"\n description: \"Name of the application\"\n - state:\n usage: \"LABEL\"\n description: \"State of the backend\"\n - total:\n usage: \"GAUGE\"\n description: \"Number of backends\"\n - max_tx_duration_seconds:\n usage: \"GAUGE\"\n description: \"Maximum duration of a transaction in seconds\"\n\nbackends_waiting:\n query: |\n SELECT count(*) AS total\n FROM pg_catalog.pg_locks blocked_locks\n JOIN pg_catalog.pg_locks blocking_locks\n ON blocking_locks.locktype = blocked_locks.locktype\n AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database\n AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation\n AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page\n AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple\n AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid\n AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid\n AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid\n AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid\n AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid\n AND blocking_locks.pid != blocked_locks.pid\n JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid\n WHERE NOT blocked_locks.granted\n metrics:\n - total:\n usage: \"GAUGE\"\n description: \"Total number of backends that are currently waiting on other queries\"\n\npg_database:\n query: |\n SELECT datname\n , pg_catalog.pg_database_size(datname) AS size_bytes\n , pg_catalog.age(datfrozenxid) AS xid_age\n , pg_catalog.mxid_age(datminmxid) AS mxid_age\n FROM pg_catalog.pg_database\n WHERE datallowconn\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - size_bytes:\n usage: \"GAUGE\"\n description: \"Disk space used by the database\"\n - xid_age:\n usage: \"GAUGE\"\n description: \"Number of transactions from the frozen XID to the current one\"\n - mxid_age:\n usage: \"GAUGE\"\n description: \"Number of multiple transactions (Multixact) from the frozen XID to the current one\"\n\npg_postmaster:\n query: |\n SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time\n FROM pg_catalog.pg_postmaster_start_time()\n metrics:\n - start_time:\n usage: \"GAUGE\"\n description: \"Time at which postgres started (based on epoch)\"\n\npg_replication:\n query: \"SELECT CASE WHEN (\n NOT pg_catalog.pg_is_in_recovery()\n OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn())\n THEN 0\n ELSE GREATEST (0,\n EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp())))\n END AS lag,\n pg_catalog.pg_is_in_recovery() AS in_recovery,\n EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,\n (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas\"\n metrics:\n - lag:\n usage: \"GAUGE\"\n description: \"Replication lag behind primary in seconds\"\n - in_recovery:\n usage: \"GAUGE\"\n description: \"Whether the instance is in recovery\"\n - is_wal_receiver_up:\n usage: \"GAUGE\"\n description: \"Whether the instance wal_receiver is up\"\n - streaming_replicas:\n usage: \"GAUGE\"\n description: \"Number of streaming replicas connected to the instance\"\n\npg_replication_slots:\n query: |\n SELECT slot_name,\n slot_type,\n database,\n active,\n (CASE pg_catalog.pg_is_in_recovery()\n WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)\n ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)\n END) as pg_wal_lsn_diff\n FROM pg_catalog.pg_replication_slots\n WHERE NOT temporary\n metrics:\n - slot_name:\n usage: \"LABEL\"\n description: \"Name of the replication slot\"\n - slot_type:\n usage: \"LABEL\"\n description: \"Type of the replication slot\"\n - database:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - active:\n usage: \"GAUGE\"\n description: \"Flag indicating whether the slot is active\"\n - pg_wal_lsn_diff:\n usage: \"GAUGE\"\n description: \"Replication lag in bytes\"\n\npg_stat_archiver:\n query: |\n SELECT archived_count\n , failed_count\n , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival\n , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure\n , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time\n , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time\n , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn\n , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn\n , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time\n FROM pg_catalog.pg_stat_archiver\n metrics:\n - archived_count:\n usage: \"COUNTER\"\n description: \"Number of WAL files that have been successfully archived\"\n - failed_count:\n usage: \"COUNTER\"\n description: \"Number of failed attempts for archiving WAL files\"\n - seconds_since_last_archival:\n usage: \"GAUGE\"\n description: \"Seconds since the last successful archival operation\"\n - seconds_since_last_failure:\n usage: \"GAUGE\"\n description: \"Seconds since the last failed archival operation\"\n - last_archived_time:\n usage: \"GAUGE\"\n description: \"Epoch of the last time WAL archiving succeeded\"\n - last_failed_time:\n usage: \"GAUGE\"\n description: \"Epoch of the last time WAL archiving failed\"\n - last_archived_wal_start_lsn:\n usage: \"GAUGE\"\n description: \"Archived WAL start LSN\"\n - last_failed_wal_start_lsn:\n usage: \"GAUGE\"\n description: \"Last failed WAL LSN\"\n - stats_reset_time:\n usage: \"GAUGE\"\n description: \"Time at which these statistics were last reset\"\n\npg_stat_bgwriter:\n runonserver: \"<17.0.0\"\n query: |\n SELECT checkpoints_timed\n , checkpoints_req\n , checkpoint_write_time\n , checkpoint_sync_time\n , buffers_checkpoint\n , buffers_clean\n , maxwritten_clean\n , buffers_backend\n , buffers_backend_fsync\n , buffers_alloc\n FROM pg_catalog.pg_stat_bgwriter\n metrics:\n - checkpoints_timed:\n usage: \"COUNTER\"\n description: \"Number of scheduled checkpoints that have been performed\"\n - checkpoints_req:\n usage: \"COUNTER\"\n description: \"Number of requested checkpoints that have been performed\"\n - checkpoint_write_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds\"\n - checkpoint_sync_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds\"\n - buffers_checkpoint:\n usage: \"COUNTER\"\n description: \"Number of buffers written during checkpoints\"\n - buffers_clean:\n usage: \"COUNTER\"\n description: \"Number of buffers written by the background writer\"\n - maxwritten_clean:\n usage: \"COUNTER\"\n description: \"Number of times the background writer stopped a cleaning scan because it had written too many buffers\"\n - buffers_backend:\n usage: \"COUNTER\"\n description: \"Number of buffers written directly by a backend\"\n - buffers_backend_fsync:\n usage: \"COUNTER\"\n description: \"Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)\"\n - buffers_alloc:\n usage: \"COUNTER\"\n description: \"Number of buffers allocated\"\n\npg_stat_bgwriter_17:\n runonserver: \">=17.0.0\"\n name: pg_stat_bgwriter\n query: |\n SELECT buffers_clean\n , maxwritten_clean\n , buffers_alloc\n , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time\n FROM pg_catalog.pg_stat_bgwriter\n metrics:\n - buffers_clean:\n usage: \"COUNTER\"\n description: \"Number of buffers written by the background writer\"\n - maxwritten_clean:\n usage: \"COUNTER\"\n description: \"Number of times the background writer stopped a cleaning scan because it had written too many buffers\"\n - buffers_alloc:\n usage: \"COUNTER\"\n description: \"Number of buffers allocated\"\n - stats_reset_time:\n usage: \"GAUGE\"\n description: \"Time at which these statistics were last reset\"\n\npg_stat_checkpointer:\n runonserver: \">=17.0.0\"\n query: |\n SELECT num_timed AS checkpoints_timed\n , num_requested AS checkpoints_req\n , restartpoints_timed\n , restartpoints_req\n , restartpoints_done\n , write_time\n , sync_time\n , buffers_written\n , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time\n FROM pg_catalog.pg_stat_checkpointer\n metrics:\n - checkpoints_timed:\n usage: \"COUNTER\"\n description: \"Number of scheduled checkpoints that have been performed\"\n - checkpoints_req:\n usage: \"COUNTER\"\n description: \"Number of requested checkpoints that have been performed\"\n - restartpoints_timed:\n usage: \"COUNTER\"\n description: \"Number of scheduled restartpoints due to timeout or after a failed attempt to perform it\"\n - restartpoints_req:\n usage: \"COUNTER\"\n description: \"Number of requested restartpoints that have been performed\"\n - restartpoints_done:\n usage: \"COUNTER\"\n description: \"Number of restartpoints that have been performed\"\n - write_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds\"\n - sync_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds\"\n - buffers_written:\n usage: \"COUNTER\"\n description: \"Number of buffers written during checkpoints and restartpoints\"\n - stats_reset_time:\n usage: \"GAUGE\"\n description: \"Time at which these statistics were last reset\"\n\npg_stat_database:\n query: |\n SELECT datname\n , xact_commit\n , xact_rollback\n , blks_read\n , blks_hit\n , tup_returned\n , tup_fetched\n , tup_inserted\n , tup_updated\n , tup_deleted\n , conflicts\n , temp_files\n , temp_bytes\n , deadlocks\n , blk_read_time\n , blk_write_time\n FROM pg_catalog.pg_stat_database\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of this database\"\n - xact_commit:\n usage: \"COUNTER\"\n description: \"Number of transactions in this database that have been committed\"\n - xact_rollback:\n usage: \"COUNTER\"\n description: \"Number of transactions in this database that have been rolled back\"\n - blks_read:\n usage: \"COUNTER\"\n description: \"Number of disk blocks read in this database\"\n - blks_hit:\n usage: \"COUNTER\"\n description: \"Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)\"\n - tup_returned:\n usage: \"COUNTER\"\n description: \"Number of rows returned by queries in this database\"\n - tup_fetched:\n usage: \"COUNTER\"\n description: \"Number of rows fetched by queries in this database\"\n - tup_inserted:\n usage: \"COUNTER\"\n description: \"Number of rows inserted by queries in this database\"\n - tup_updated:\n usage: \"COUNTER\"\n description: \"Number of rows updated by queries in this database\"\n - tup_deleted:\n usage: \"COUNTER\"\n description: \"Number of rows deleted by queries in this database\"\n - conflicts:\n usage: \"COUNTER\"\n description: \"Number of queries canceled due to conflicts with recovery in this database\"\n - temp_files:\n usage: \"COUNTER\"\n description: \"Number of temporary files created by queries in this database\"\n - temp_bytes:\n usage: \"COUNTER\"\n description: \"Total amount of data written to temporary files by queries in this database\"\n - deadlocks:\n usage: \"COUNTER\"\n description: \"Number of deadlocks detected in this database\"\n - blk_read_time:\n usage: \"COUNTER\"\n description: \"Time spent reading data file blocks by backends in this database, in milliseconds\"\n - blk_write_time:\n usage: \"COUNTER\"\n description: \"Time spent writing data file blocks by backends in this database, in milliseconds\"\n\npg_stat_replication:\n primary: true\n query: |\n SELECT usename\n , COALESCE(application_name, '') AS application_name\n , COALESCE(client_addr::text, '') AS client_addr\n , COALESCE(client_port::text, '') AS client_port\n , EXTRACT(EPOCH FROM backend_start) AS backend_start\n , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes\n , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes\n , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds\n , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds\n , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds\n FROM pg_catalog.pg_stat_replication\n metrics:\n - usename:\n usage: \"LABEL\"\n description: \"Name of the replication user\"\n - application_name:\n usage: \"LABEL\"\n description: \"Name of the application\"\n - client_addr:\n usage: \"LABEL\"\n description: \"Client IP address\"\n - client_port:\n usage: \"LABEL\"\n description: \"Client TCP port\"\n - backend_start:\n usage: \"COUNTER\"\n description: \"Time when this process was started\"\n - backend_xmin_age:\n usage: \"COUNTER\"\n description: \"The age of this standby's xmin horizon\"\n - sent_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location sent on this connection\"\n - write_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location written to disk by this standby server\"\n - flush_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location flushed to disk by this standby server\"\n - replay_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location replayed into the database on this standby server\"\n - write_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it\"\n - flush_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it\"\n - replay_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it\"\n\npg_settings:\n query: |\n SELECT name,\n CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting\n FROM pg_catalog.pg_settings\n WHERE vartype IN ('integer', 'real', 'bool')\n ORDER BY 1\n metrics:\n - name:\n usage: \"LABEL\"\n description: \"Name of the setting\"\n - setting:\n usage: \"GAUGE\"\n description: \"Setting value\"\n"` | A string representation of a YAML defining monitoring queries. | -| nameOverride | string | `""` | | -| nodeSelector | object | `{}` | Nodeselector for the operator to be installed. | -| podAnnotations | object | `{}` | Annotations to be added to the pod. | -| podLabels | object | `{}` | Labels to be added to the pod. | -| podSecurityContext | object | `{"runAsNonRoot":true,"seccompProfile":{"type":"RuntimeDefault"}}` | Security Context for the whole pod. | -| priorityClassName | string | `""` | Priority indicates the importance of a Pod relative to other Pods. | -| rbac.aggregateClusterRoles | bool | `false` | Aggregate ClusterRoles to Kubernetes default user-facing roles. Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles | -| rbac.create | bool | `true` | Specifies whether ClusterRole and ClusterRoleBinding should be created. | -| replicaCount | int | `1` | | -| resources | object | `{}` | | -| service.name | string | `"cnpg-webhook-service"` | DO NOT CHANGE THE SERVICE NAME as it is currently used to generate the certificate and can not be configured | -| service.port | int | `443` | | -| service.type | string | `"ClusterIP"` | | -| serviceAccount.create | bool | `true` | Specifies whether the service account should be created. | -| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template. | -| tolerations | list | `[]` | Tolerations for the operator to be installed. | -| webhook | object | `{"livenessProbe":{"initialDelaySeconds":3},"mutating":{"create":true,"failurePolicy":"Fail"},"port":9443,"readinessProbe":{"initialDelaySeconds":3},"validating":{"create":true,"failurePolicy":"Fail"}}` | The webhook configuration. | - diff --git a/charts/cloudnative-pg/templates/crds/crds.yaml b/charts/cloudnative-pg/templates/crds/crds.yaml deleted file mode 100644 index 5c5d98132..000000000 --- a/charts/cloudnative-pg/templates/crds/crds.yaml +++ /dev/null @@ -1,15687 +0,0 @@ -{{- if .Values.crds.create }} -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.4 - helm.sh/resource-policy: keep - name: backups.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .spec.method - name: Method - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.error - name: Error - type: string - name: v1 - schema: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the backup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - cluster: - description: The cluster to backup - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - method: - default: barmanObjectStore - description: |- - The backup method to be used, possible options are `barmanObjectStore`, - `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. - enum: - - barmanObjectStore - - volumeSnapshot - - plugin - type: string - online: - description: |- - Whether the default type of backup with volume snapshots is - online/hot (`true`, default) or offline/cold (`false`) - Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' - type: boolean - onlineConfiguration: - description: |- - Configuration parameters to control the online/hot backup with volume snapshots - Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza - properties: - immediateCheckpoint: - description: |- - Control whether the I/O workload for the backup initial checkpoint will - be limited, according to the `checkpoint_completion_target` setting on - the PostgreSQL server. If set to true, an immediate checkpoint will be - used, meaning PostgreSQL will complete the checkpoint as soon as - possible. `false` by default. - type: boolean - waitForArchive: - default: true - description: |- - If false, the function will return immediately after the backup is completed, - without waiting for WAL to be archived. - This behavior is only useful with backup software that independently monitors WAL archiving. - Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. - By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is - enabled. - On a standby, this means that it will wait only when archive_mode = always. - If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger - an immediate segment switch. - type: boolean - type: object - pluginConfiguration: - description: Configuration parameters passed to the plugin managing - this backup - properties: - name: - description: Name is the name of the plugin managing this backup - type: string - parameters: - additionalProperties: - type: string - description: |- - Parameters are the configuration parameters passed to the backup - plugin for this backup - type: object - required: - - name - type: object - target: - description: |- - The policy to decide which instance should perform this backup. If empty, - it defaults to `cluster.spec.backup.target`. - Available options are empty string, `primary` and `prefer-standby`. - `primary` to have backups run always on primary instances, - `prefer-standby` to have backups run preferably on the most updated - standby, if available. - enum: - - primary - - prefer-standby - type: string - required: - - cluster - type: object - status: - description: |- - Most recently observed status of the backup. This data may not be up to - date. Populated by the system. Read-only. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - azureCredentials: - description: The credentials to use to upload data to Azure Blob Storage - properties: - connectionString: - description: The connection string to be used - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromAzureAD: - description: Use the Azure AD based authentication without providing - explicitly the keys. - type: boolean - storageAccount: - description: The storage account where to upload data - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageKey: - description: |- - The storage account key to be used in conjunction - with the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageSasToken: - description: |- - A shared-access-signature to be used in conjunction with - the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - backupId: - description: The ID of the Barman backup - type: string - backupLabelFile: - description: Backup label file content as returned by Postgres in - case of online (hot) backups - format: byte - type: string - backupName: - description: The Name of the Barman backup - type: string - beginLSN: - description: The starting xlog - type: string - beginWal: - description: The starting WAL - type: string - commandError: - description: The backup command output in case of error - type: string - commandOutput: - description: Unused. Retained for compatibility with old versions. - type: string - destinationPath: - description: |- - The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data. This may not be populated in case of errors. - type: string - encryption: - description: Encryption method required to S3 API - type: string - endLSN: - description: The ending xlog - type: string - endWal: - description: The ending WAL - type: string - endpointCA: - description: |- - EndpointCA store the CA bundle of the barman endpoint. - Useful when using self-signed certificates to avoid - errors with certificate issuer and barman-cloud-wal-archive. - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - endpointURL: - description: |- - Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - googleCredentials: - description: The credentials to use to upload data to Google Cloud - Storage - properties: - applicationCredentials: - description: The secret containing the Google Cloud Storage JSON - file with the credentials - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - gkeEnvironment: - description: |- - If set to true, will presume that it's running inside a GKE environment, - default to false. - type: boolean - type: object - instanceID: - description: Information to identify the instance where the backup - has been taken from - properties: - ContainerID: - description: The container ID - type: string - podName: - description: The pod name - type: string - type: object - method: - description: The backup method being used - type: string - online: - description: Whether the backup was online/hot (`true`) or offline/cold - (`false`) - type: boolean - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromIAMRole: - description: Use the role based authentication without providing - explicitly the keys. - type: boolean - region: - description: The reference to the secret containing the region - name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - sessionToken: - description: The references to the session key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - serverName: - description: |- - The server name on S3, the cluster name is used if this - parameter is omitted - type: string - snapshotBackupStatus: - description: Status of the volumeSnapshot backup - properties: - elements: - description: The elements list, populated with the gathered volume - snapshots - items: - description: BackupSnapshotElementStatus is a volume snapshot - that is part of a volume snapshot method backup - properties: - name: - description: Name is the snapshot resource name - type: string - tablespaceName: - description: |- - TablespaceName is the name of the snapshotted tablespace. Only set - when type is PG_TABLESPACE - type: string - type: - description: Type is tho role of the snapshot in the cluster, - such as PG_DATA, PG_WAL and PG_TABLESPACE - type: string - required: - - name - - type - type: object - type: array - type: object - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - tablespaceMapFile: - description: Tablespace map file content as returned by Postgres in - case of online (hot) backups - format: byte - type: string - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.4 - helm.sh/resource-policy: keep - name: clusterimagecatalogs.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: ClusterImageCatalog - listKind: ClusterImageCatalogList - plural: clusterimagecatalogs - singular: clusterimagecatalog - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1 - schema: - openAPIV3Schema: - description: ClusterImageCatalog is the Schema for the clusterimagecatalogs - API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the ClusterImageCatalog. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - images: - description: List of CatalogImages available in the catalog - items: - description: CatalogImage defines the image and major version - properties: - image: - description: The image reference - type: string - major: - description: The PostgreSQL major version of the image. Must - be unique within the catalog. - minimum: 10 - type: integer - required: - - image - - major - type: object - maxItems: 8 - minItems: 1 - type: array - x-kubernetes-validations: - - message: Images must have unique major versions - rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) - required: - - images - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.4 - helm.sh/resource-policy: keep - name: clusters.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the PostgreSQL API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the cluster. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - additionalPodAffinity: - description: AdditionalPodAffinity allows to specify pod affinity - terms to be passed to all the cluster's pods. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - additionalPodAntiAffinity: - description: |- - AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated - by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - enablePodAntiAffinity: - description: |- - Activates anti-affinity for the pods. The operator will define pods - anti-affinity unless this field is explicitly set to false - type: boolean - nodeAffinity: - description: |- - NodeAffinity describes node affinity scheduling rules for the pod. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-list-type: atomic - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - nodeSelector: - additionalProperties: - type: string - description: |- - NodeSelector is map of key-value pairs used to define the nodes on which - the pods can run. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - type: object - podAntiAffinityType: - description: |- - PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be - considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or - "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are - added if all the existing nodes don't match the required pod anti-affinity rule. - More info: - https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - type: string - tolerations: - description: |- - Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run - on tainted nodes. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologyKey: - description: |- - TopologyKey to use for anti-affinity configuration. See k8s documentation - for more info on that - type: string - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - azureCredentials: - description: The credentials to use to upload data to Azure - Blob Storage - properties: - connectionString: - description: The connection string to be used - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromAzureAD: - description: Use the Azure AD based authentication without - providing explicitly the keys. - type: boolean - storageAccount: - description: The storage account where to upload data - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageKey: - description: |- - The storage account key to be used in conjunction - with the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageSasToken: - description: |- - A shared-access-signature to be used in conjunction with - the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - data: - description: |- - The configuration to be used to backup the data files - When not defined, base backups files will be stored uncompressed and may - be unencrypted in the object store, according to the bucket default - policy. - properties: - additionalCommandArgs: - description: |- - AdditionalCommandArgs represents additional arguments that can be appended - to the 'barman-cloud-backup' command-line invocation. These arguments - provide flexibility to customize the backup process further according to - specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-backup' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - compression: - description: |- - Compress a backup file (a tar file per tablespace) while streaming it - to the object store. Available options are empty string (no - compression, default), `gzip`, `bzip2` or `snappy`. - enum: - - gzip - - bzip2 - - snappy - type: string - encryption: - description: |- - Whenever to force the encryption of files (if the bucket is - not already configured for that). - Allowed options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - enum: - - AES256 - - aws:kms - type: string - immediateCheckpoint: - description: |- - Control whether the I/O workload for the backup initial checkpoint will - be limited, according to the `checkpoint_completion_target` setting on - the PostgreSQL server. If set to true, an immediate checkpoint will be - used, meaning PostgreSQL will complete the checkpoint as soon as - possible. `false` by default. - type: boolean - jobs: - description: |- - The number of parallel jobs to be used to upload the backup, defaults - to 2 - format: int32 - minimum: 1 - type: integer - type: object - destinationPath: - description: |- - The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data - minLength: 1 - type: string - endpointCA: - description: |- - EndpointCA store the CA bundle of the barman endpoint. - Useful when using self-signed certificates to avoid - errors with certificate issuer and barman-cloud-wal-archive - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - endpointURL: - description: |- - Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - googleCredentials: - description: The credentials to use to upload data to Google - Cloud Storage - properties: - applicationCredentials: - description: The secret containing the Google Cloud Storage - JSON file with the credentials - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - gkeEnvironment: - description: |- - If set to true, will presume that it's running inside a GKE environment, - default to false. - type: boolean - type: object - historyTags: - additionalProperties: - type: string - description: |- - HistoryTags is a list of key value pairs that will be passed to the - Barman --history-tags option. - type: object - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromIAMRole: - description: Use the role based authentication without - providing explicitly the keys. - type: boolean - region: - description: The reference to the secret containing the - region name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - sessionToken: - description: The references to the session key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - serverName: - description: |- - The server name on S3, the cluster name is used if this - parameter is omitted - type: string - tags: - additionalProperties: - type: string - description: |- - Tags is a list of key value pairs that will be passed to the - Barman --tags option. - type: object - wal: - description: |- - The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed and may be - unencrypted in the object store, according to the bucket default policy. - properties: - archiveAdditionalCommandArgs: - description: |- - Additional arguments that can be appended to the 'barman-cloud-wal-archive' - command-line invocation. These arguments provide flexibility to customize - the WAL archive process further, according to specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - compression: - description: |- - Compress a WAL file before sending it to the object store. Available - options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. - enum: - - gzip - - bzip2 - - snappy - type: string - encryption: - description: |- - Whenever to force the encryption of files (if the bucket is - not already configured for that). - Allowed options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - enum: - - AES256 - - aws:kms - type: string - maxParallel: - description: |- - Number of WAL files to be either archived in parallel (when the - PostgreSQL instance is archiving to a backup object store) or - restored in parallel (when a PostgreSQL standby is fetching WAL - files from a recovery object store). If not specified, WAL files - will be processed one at a time. It accepts a positive integer as a - value - with 1 being the minimum accepted value. - minimum: 1 - type: integer - restoreAdditionalCommandArgs: - description: |- - Additional arguments that can be appended to the 'barman-cloud-wal-restore' - command-line invocation. These arguments provide flexibility to customize - the WAL restore process further, according to specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - type: object - required: - - destinationPath - type: object - retentionPolicy: - description: |- - RetentionPolicy is the retention policy to be used for backups - and WALs (i.e. '60d'). The retention policy is expressed in the form - of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - - days, weeks, months. - It's currently only applicable when using the BarmanObjectStore method. - pattern: ^[1-9][0-9]*[dwm]$ - type: string - target: - default: prefer-standby - description: |- - The policy to decide which instance should perform backups. Available - options are empty string, which will default to `prefer-standby` policy, - `primary` to have backups run always on primary instances, `prefer-standby` - to have backups run preferably on the most updated standby, if available. - enum: - - primary - - prefer-standby - type: string - volumeSnapshot: - description: VolumeSnapshot provides the configuration for the - execution of volume snapshot backups. - properties: - annotations: - additionalProperties: - type: string - description: Annotations key-value pairs that will be added - to .metadata.annotations snapshot resources. - type: object - className: - description: |- - ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. - It is the default class for the other types if no specific class is present - type: string - labels: - additionalProperties: - type: string - description: Labels are key-value pairs that will be added - to .metadata.labels snapshot resources. - type: object - online: - default: true - description: |- - Whether the default type of backup with volume snapshots is - online/hot (`true`, default) or offline/cold (`false`) - type: boolean - onlineConfiguration: - default: - immediateCheckpoint: false - waitForArchive: true - description: Configuration parameters to control the online/hot - backup with volume snapshots - properties: - immediateCheckpoint: - description: |- - Control whether the I/O workload for the backup initial checkpoint will - be limited, according to the `checkpoint_completion_target` setting on - the PostgreSQL server. If set to true, an immediate checkpoint will be - used, meaning PostgreSQL will complete the checkpoint as soon as - possible. `false` by default. - type: boolean - waitForArchive: - default: true - description: |- - If false, the function will return immediately after the backup is completed, - without waiting for WAL to be archived. - This behavior is only useful with backup software that independently monitors WAL archiving. - Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. - By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is - enabled. - On a standby, this means that it will wait only when archive_mode = always. - If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger - an immediate segment switch. - type: boolean - type: object - snapshotOwnerReference: - default: none - description: SnapshotOwnerReference indicates the type of - owner reference the snapshot should have - enum: - - none - - cluster - - backup - type: string - tablespaceClassName: - additionalProperties: - type: string - description: |- - TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. - defaults to the PGDATA Snapshot Class, if set - type: object - walClassName: - description: WalClassName specifies the Snapshot Class to - be used for the PG_WAL PersistentVolumeClaim. - type: string - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - dataChecksums: - description: |- - Whether the `-k` option should be passed to initdb, - enabling checksums on data pages (default: `false`) - type: boolean - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - encoding: - description: The value to be passed as option `--encoding` - for initdb (default:`UTF8`) - type: string - import: - description: |- - Bootstraps the new cluster by importing data from an existing PostgreSQL - instance using logical backup (`pg_dump` and `pg_restore`) - properties: - databases: - description: The databases to import - items: - type: string - type: array - postImportApplicationSQL: - description: |- - List of SQL queries to be executed as a superuser in the application - database right after is imported - to be used with extreme care - (by default empty). Only available in microservice type. - items: - type: string - type: array - roles: - description: The roles to import - items: - type: string - type: array - schemaOnly: - description: |- - When set to true, only the `pre-data` and `post-data` sections of - `pg_restore` are invoked, avoiding data import. Default: `false`. - type: boolean - source: - description: The source of the import - properties: - externalCluster: - description: The name of the externalCluster used - for import - type: string - required: - - externalCluster - type: object - type: - description: The import type. Can be `microservice` or - `monolith`. - enum: - - microservice - - monolith - type: string - required: - - databases - - source - - type - type: object - localeCType: - description: The value to be passed as option `--lc-ctype` - for initdb (default:`C`) - type: string - localeCollate: - description: The value to be passed as option `--lc-collate` - for initdb (default:`C`) - type: string - options: - description: |- - The list of options that must be passed to initdb when creating the cluster. - Deprecated: This could lead to inconsistent configurations, - please use the explicit provided parameters instead. - If defined, explicit values will be ignored. - items: - type: string - type: array - owner: - description: |- - Name of the owner of the database in the instance to be used - by applications. Defaults to the value of the `database` key. - type: string - postInitApplicationSQL: - description: |- - List of SQL queries to be executed as a superuser in the application - database right after the cluster has been created - to be used with extreme care - (by default empty) - items: - type: string - type: array - postInitApplicationSQLRefs: - description: |- - List of references to ConfigMaps or Secrets containing SQL files - to be executed as a superuser in the application database right after - the cluster has been created. The references are processed in a specific order: - first, all Secrets are processed, followed by all ConfigMaps. - Within each group, the processing order follows the sequence specified - in their respective arrays. - (by default empty) - properties: - configMapRefs: - description: ConfigMapRefs holds a list of references - to ConfigMaps - items: - description: |- - ConfigMapKeySelector contains enough information to let you locate - the key of a ConfigMap - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - secretRefs: - description: SecretRefs holds a list of references to - Secrets - items: - description: |- - SecretKeySelector contains enough information to let you locate - the key of a Secret - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - type: object - postInitSQL: - description: |- - List of SQL queries to be executed as a superuser in the `postgres` - database right after the cluster has been created - to be used with extreme care - (by default empty) - items: - type: string - type: array - postInitSQLRefs: - description: |- - List of references to ConfigMaps or Secrets containing SQL files - to be executed as a superuser in the `postgres` database right after - the cluster has been created. The references are processed in a specific order: - first, all Secrets are processed, followed by all ConfigMaps. - Within each group, the processing order follows the sequence specified - in their respective arrays. - (by default empty) - properties: - configMapRefs: - description: ConfigMapRefs holds a list of references - to ConfigMaps - items: - description: |- - ConfigMapKeySelector contains enough information to let you locate - the key of a ConfigMap - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - secretRefs: - description: SecretRefs holds a list of references to - Secrets - items: - description: |- - SecretKeySelector contains enough information to let you locate - the key of a Secret - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - type: object - postInitTemplateSQL: - description: |- - List of SQL queries to be executed as a superuser in the `template1` - database right after the cluster has been created - to be used with extreme care - (by default empty) - items: - type: string - type: array - postInitTemplateSQLRefs: - description: |- - List of references to ConfigMaps or Secrets containing SQL files - to be executed as a superuser in the `template1` database right after - the cluster has been created. The references are processed in a specific order: - first, all Secrets are processed, followed by all ConfigMaps. - Within each group, the processing order follows the sequence specified - in their respective arrays. - (by default empty) - properties: - configMapRefs: - description: ConfigMapRefs holds a list of references - to ConfigMaps - items: - description: |- - ConfigMapKeySelector contains enough information to let you locate - the key of a ConfigMap - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - secretRefs: - description: SecretRefs holds a list of references to - Secrets - items: - description: |- - SecretKeySelector contains enough information to let you locate - the key of a Secret - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - type: object - secret: - description: |- - Name of the secret containing the initial credentials for the - owner of the user database. If empty a new secret will be - created from scratch - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - walSegmentSize: - description: |- - The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` - option for initdb (default: empty, resulting in PostgreSQL default: 16MB) - maximum: 1024 - minimum: 1 - type: integer - type: object - pg_basebackup: - description: |- - Bootstrap the cluster taking a physical backup of another compatible - PostgreSQL instance - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - owner: - description: |- - Name of the owner of the database in the instance to be used - by applications. Defaults to the value of the `database` key. - type: string - secret: - description: |- - Name of the secret containing the initial credentials for the - owner of the user database. If empty a new secret will be - created from scratch - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - source: - description: The name of the server of which we need to take - a physical backup - minLength: 1 - type: string - required: - - source - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: |- - The backup object containing the physical base backup from which to - initiate the recovery procedure. - Mutually exclusive with `source` and `volumeSnapshots`. - properties: - endpointCA: - description: |- - EndpointCA store the CA bundle of the barman endpoint. - Useful when using self-signed certificates to avoid - errors with certificate issuer and barman-cloud-wal-archive. - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - name: - description: Name of the referent. - type: string - required: - - name - type: object - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - owner: - description: |- - Name of the owner of the database in the instance to be used - by applications. Defaults to the value of the `database` key. - type: string - recoveryTarget: - description: |- - By default, the recovery process applies all the available - WAL files in the archive (full recovery). However, you can also - end the recovery as soon as a consistent state is reached or - recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, - as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). - More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET - properties: - backupID: - description: |- - The ID of the backup from which to start the recovery process. - If empty (default) the operator will automatically detect the backup - based on targetTime or targetLSN if specified. Otherwise use the - latest available backup in chronological order. - type: string - exclusive: - description: |- - Set the target to be exclusive. If omitted, defaults to false, so that - in Postgres, `recovery_target_inclusive` will be true - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: |- - The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest" or a positive - integer) - type: string - targetTime: - description: The target time as a timestamp in the RFC3339 - standard - type: string - targetXID: - description: The target transaction ID - type: string - type: object - secret: - description: |- - Name of the secret containing the initial credentials for the - owner of the user database. If empty a new secret will be - created from scratch - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - source: - description: |- - The external cluster whose backup we will restore. This is also - used as the name of the folder under which the backup is stored, - so it must be set to the name of the source cluster - Mutually exclusive with `backup`. - type: string - volumeSnapshots: - description: |- - The static PVC data source(s) from which to initiate the - recovery procedure. Currently supporting `VolumeSnapshot` - and `PersistentVolumeClaim` resources that map an existing - PVC group, compatible with CloudNativePG, and taken with - a cold backup copy on a fenced Postgres instance (limitation - which will be removed in the future when online backup - will be implemented). - Mutually exclusive with `backup`. - properties: - storage: - description: Configuration of the storage of the instances - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - tablespaceStorage: - additionalProperties: - description: |- - TypedLocalObjectReference contains enough information to let you locate the - typed referenced object inside the same namespace. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - description: Configuration of the storage for PostgreSQL - tablespaces - type: object - walStorage: - description: Configuration of the storage for PostgreSQL - WAL (Write-Ahead Log) - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - required: - - storage - type: object - type: object - type: object - certificates: - description: The configuration for the CA and related certificates - properties: - clientCASecret: - description: |- - The secret containing the Client CA certificate. If not defined, a new secret will be created - with a self-signed CA and will be used to generate all the client certificates.
-
- Contains:
-
- - `ca.crt`: CA that should be used to validate the client certificates, - used as `ssl_ca_file` of all the instances.
- - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, - this can be omitted.
- type: string - replicationTLSSecret: - description: |- - The secret of type kubernetes.io/tls containing the client certificate to authenticate as - the `streaming_replica` user. - If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be - created using the provided CA. - type: string - serverAltDNSNames: - description: The list of the server alternative DNS names to be - added to the generated server TLS certificates, when required. - items: - type: string - type: array - serverCASecret: - description: |- - The secret containing the Server CA certificate. If not defined, a new secret will be created - with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
-
- Contains:
-
- - `ca.crt`: CA that should be used to validate the server certificate, - used as `sslrootcert` in client connection strings.
- - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, - this can be omitted.
- type: string - serverTLSSecret: - description: |- - The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as - `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. - If not defined, ServerCASecret must provide also `ca.key` and a new secret will be - created using the provided CA. - type: string - type: object - description: - description: Description of this PostgreSQL cluster - type: string - enablePDB: - default: true - description: |- - Manage the `PodDisruptionBudget` resources within the cluster. When - configured as `true` (default setting), the pod disruption budgets - will safeguard the primary node from being terminated. Conversely, - setting it to `false` will result in the absence of any - `PodDisruptionBudget` resource, permitting the shutdown of all nodes - hosting the PostgreSQL cluster. This latter configuration is - advisable for any PostgreSQL cluster employed for - development/staging purposes. - type: boolean - enableSuperuserAccess: - default: false - description: |- - When this option is enabled, the operator will use the `SuperuserSecret` - to update the `postgres` user password (if the secret is - not present, the operator will automatically create one). When this - option is disabled, the operator will ignore the `SuperuserSecret` content, delete - it when automatically created, and then blank the password of the `postgres` - user by setting it to `NULL`. Disabled by default. - type: boolean - env: - description: |- - Env follows the Env format to pass environment variables - to the pods created in the cluster - items: - description: EnvVar represents an environment variable present in - a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's value. Cannot - be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath is - written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the exposed - resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - description: |- - EnvFrom follows the EnvFrom format to pass environment variables - sources to the pods to be used by Env - items: - description: EnvFromSource represents the source of a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend to each key in - the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - ephemeralVolumeSource: - description: EphemeralVolumeSource allows the user to configure the - source of ephemeral volumes. - properties: - volumeClaimTemplate: - description: |- - Will be used to create a stand-alone PVC to provision the volume. - The pod in which this EphemeralVolumeSource is embedded will be the - owner of the PVC, i.e. the PVC will be deleted together with the - pod. The name of the PVC will be `-` where - `` is the name from the `PodSpec.Volumes` array - entry. Pod validation will reject the pod if the concatenated name - is not valid for a PVC (for example, too long). - - An existing PVC with that name that is not owned by the pod - will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC is - meant to be used by the pod, the PVC has to updated with an - owner reference to the pod once the pod exists. Normally - this should not be necessary, but it may be useful when - manually reconstructing a broken cluster. - - This field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. - - Required, must not be nil. - properties: - metadata: - description: |- - May contain labels and annotations that will be copied into the PVC - when creating it. No other fields are allowed and will be rejected during - validation. - type: object - spec: - description: |- - The specification for the PersistentVolumeClaim. The entire content is - copied unchanged into the PVC that gets created from this - template. The same fields as in a PersistentVolumeClaim - are also valid here. - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - x-kubernetes-list-type: atomic - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over volumes to - consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference to the - PersistentVolume backing this claim. - type: string - type: object - required: - - spec - type: object - type: object - ephemeralVolumesSizeLimit: - description: |- - EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral - volumes - properties: - shm: - anyOf: - - type: integer - - type: string - description: Shm is the size limit of the shared memory volume - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - temporaryData: - anyOf: - - type: integer - - type: string - description: TemporaryData is the size limit of the temporary - data volume - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - externalClusters: - description: The list of external clusters which are used in the configuration - items: - description: |- - ExternalCluster represents the connection parameters to an - external cluster which is used in the other sections of the configuration - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - azureCredentials: - description: The credentials to use to upload data to Azure - Blob Storage - properties: - connectionString: - description: The connection string to be used - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromAzureAD: - description: Use the Azure AD based authentication without - providing explicitly the keys. - type: boolean - storageAccount: - description: The storage account where to upload data - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageKey: - description: |- - The storage account key to be used in conjunction - with the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageSasToken: - description: |- - A shared-access-signature to be used in conjunction with - the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - data: - description: |- - The configuration to be used to backup the data files - When not defined, base backups files will be stored uncompressed and may - be unencrypted in the object store, according to the bucket default - policy. - properties: - additionalCommandArgs: - description: |- - AdditionalCommandArgs represents additional arguments that can be appended - to the 'barman-cloud-backup' command-line invocation. These arguments - provide flexibility to customize the backup process further according to - specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-backup' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - compression: - description: |- - Compress a backup file (a tar file per tablespace) while streaming it - to the object store. Available options are empty string (no - compression, default), `gzip`, `bzip2` or `snappy`. - enum: - - gzip - - bzip2 - - snappy - type: string - encryption: - description: |- - Whenever to force the encryption of files (if the bucket is - not already configured for that). - Allowed options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - enum: - - AES256 - - aws:kms - type: string - immediateCheckpoint: - description: |- - Control whether the I/O workload for the backup initial checkpoint will - be limited, according to the `checkpoint_completion_target` setting on - the PostgreSQL server. If set to true, an immediate checkpoint will be - used, meaning PostgreSQL will complete the checkpoint as soon as - possible. `false` by default. - type: boolean - jobs: - description: |- - The number of parallel jobs to be used to upload the backup, defaults - to 2 - format: int32 - minimum: 1 - type: integer - type: object - destinationPath: - description: |- - The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data - minLength: 1 - type: string - endpointCA: - description: |- - EndpointCA store the CA bundle of the barman endpoint. - Useful when using self-signed certificates to avoid - errors with certificate issuer and barman-cloud-wal-archive - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - endpointURL: - description: |- - Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - googleCredentials: - description: The credentials to use to upload data to Google - Cloud Storage - properties: - applicationCredentials: - description: The secret containing the Google Cloud - Storage JSON file with the credentials - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - gkeEnvironment: - description: |- - If set to true, will presume that it's running inside a GKE environment, - default to false. - type: boolean - type: object - historyTags: - additionalProperties: - type: string - description: |- - HistoryTags is a list of key value pairs that will be passed to the - Barman --history-tags option. - type: object - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromIAMRole: - description: Use the role based authentication without - providing explicitly the keys. - type: boolean - region: - description: The reference to the secret containing - the region name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - sessionToken: - description: The references to the session key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - serverName: - description: |- - The server name on S3, the cluster name is used if this - parameter is omitted - type: string - tags: - additionalProperties: - type: string - description: |- - Tags is a list of key value pairs that will be passed to the - Barman --tags option. - type: object - wal: - description: |- - The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed and may be - unencrypted in the object store, according to the bucket default policy. - properties: - archiveAdditionalCommandArgs: - description: |- - Additional arguments that can be appended to the 'barman-cloud-wal-archive' - command-line invocation. These arguments provide flexibility to customize - the WAL archive process further, according to specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - compression: - description: |- - Compress a WAL file before sending it to the object store. Available - options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. - enum: - - gzip - - bzip2 - - snappy - type: string - encryption: - description: |- - Whenever to force the encryption of files (if the bucket is - not already configured for that). - Allowed options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - enum: - - AES256 - - aws:kms - type: string - maxParallel: - description: |- - Number of WAL files to be either archived in parallel (when the - PostgreSQL instance is archiving to a backup object store) or - restored in parallel (when a PostgreSQL standby is fetching WAL - files from a recovery object store). If not specified, WAL files - will be processed one at a time. It accepts a positive integer as a - value - with 1 being the minimum accepted value. - minimum: 1 - type: integer - restoreAdditionalCommandArgs: - description: |- - Additional arguments that can be appended to the 'barman-cloud-wal-restore' - command-line invocation. These arguments provide flexibility to customize - the WAL restore process further, according to specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - type: object - required: - - destinationPath - type: object - connectionParameters: - additionalProperties: - type: string - description: The list of connection parameters, such as dbname, - host, username, etc - type: object - name: - description: The server name, required - type: string - password: - description: |- - The reference to the password to be used to connect to the server. - If a password is provided, CloudNativePG creates a PostgreSQL - passfile at `/controller/external/NAME/pass` (where "NAME" is the - cluster's name). This passfile is automatically referenced in the - connection string when establishing a connection to the remote - PostgreSQL server from the current PostgreSQL `Cluster`. This ensures - secure and efficient password management for external clusters. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - sslCert: - description: |- - The reference to an SSL certificate to be used to connect to this - instance - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - sslKey: - description: |- - The reference to an SSL private key to be used to connect to this - instance - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - sslRootCert: - description: |- - The reference to an SSL CA public key to be used to connect to this - instance - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - required: - - name - type: object - type: array - failoverDelay: - default: 0 - description: |- - The amount of time (in seconds) to wait before triggering a failover - after the primary PostgreSQL instance in the cluster was detected - to be unhealthy - format: int32 - type: integer - imageCatalogRef: - description: Defines the major PostgreSQL version we want to use within - an ImageCatalog - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - major: - description: The major version of PostgreSQL we want to use from - the ImageCatalog - type: integer - x-kubernetes-validations: - - message: Major is immutable - rule: self == oldSelf - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - major - - name - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: Only image catalogs are supported - rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' - - message: Only image catalogs are supported - rule: self.apiGroup == 'postgresql.cnpg.io' - imageName: - description: |- - Name of the container image, supporting both tags (`:`) - and digests for deterministic and repeatable deployments - (`:@sha256:`) - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of `Always`, `Never` or `IfNotPresent`. - If not defined, it defaults to `IfNotPresent`. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images - items: - description: |- - LocalObjectReference contains enough information to let you locate a - local object with a known type inside the same namespace - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - type: array - inheritedMetadata: - description: Metadata that will be inherited by all objects related - to the Cluster - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - type: object - instances: - default: 1 - description: Number of instances required in the cluster - minimum: 1 - type: integer - livenessProbeTimeout: - description: |- - LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance - to successfully respond to the liveness probe (default 30). - The Liveness probe failure threshold is derived from this value using the formula: - ceiling(livenessProbe / 10). - format: int32 - type: integer - logLevel: - default: info - description: 'The instances'' log level, one of the following values: - error, warning, info (default), debug, trace' - enum: - - error - - warning - - info - - debug - - trace - type: string - managed: - description: The configuration that is used by the portions of PostgreSQL - that are managed by the instance manager - properties: - roles: - description: Database roles managed by the `Cluster` - items: - description: |- - RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role - with the additional field Ensure specifying whether to ensure the presence or - absence of the role in the database - - The defaults of the CREATE ROLE command are applied - Reference: https://www.postgresql.org/docs/current/sql-createrole.html - properties: - bypassrls: - description: |- - Whether a role bypasses every row-level security (RLS) policy. - Default is `false`. - type: boolean - comment: - description: Description of the role - type: string - connectionLimit: - default: -1 - description: |- - If the role can log in, this specifies how many concurrent - connections the role can make. `-1` (the default) means no limit. - format: int64 - type: integer - createdb: - description: |- - When set to `true`, the role being defined will be allowed to create - new databases. Specifying `false` (default) will deny a role the - ability to create databases. - type: boolean - createrole: - description: |- - Whether the role will be permitted to create, alter, drop, comment - on, change the security label for, and grant or revoke membership in - other roles. Default is `false`. - type: boolean - disablePassword: - description: DisablePassword indicates that a role's password - should be set to NULL in Postgres - type: boolean - ensure: - default: present - description: Ensure the role is `present` or `absent` - - defaults to "present" - enum: - - present - - absent - type: string - inRoles: - description: |- - List of one or more existing roles to which this role will be - immediately added as a new member. Default empty. - items: - type: string - type: array - inherit: - default: true - description: |- - Whether a role "inherits" the privileges of roles it is a member of. - Defaults is `true`. - type: boolean - login: - description: |- - Whether the role is allowed to log in. A role having the `login` - attribute can be thought of as a user. Roles without this attribute - are useful for managing database privileges, but are not users in - the usual sense of the word. Default is `false`. - type: boolean - name: - description: Name of the role - type: string - passwordSecret: - description: |- - Secret containing the password of the role (if present) - If null, the password will be ignored unless DisablePassword is set - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - replication: - description: |- - Whether a role is a replication role. A role must have this - attribute (or be a superuser) in order to be able to connect to the - server in replication mode (physical or logical replication) and in - order to be able to create or drop replication slots. A role having - the `replication` attribute is a very highly privileged role, and - should only be used on roles actually used for replication. Default - is `false`. - type: boolean - superuser: - description: |- - Whether the role is a `superuser` who can override all access - restrictions within the database - superuser status is dangerous and - should be used only when really needed. You must yourself be a - superuser to create a new superuser. Defaults is `false`. - type: boolean - validUntil: - description: |- - Date and time after which the role's password is no longer valid. - When omitted, the password will never expire (default). - format: date-time - type: string - required: - - name - type: object - type: array - services: - description: Services roles managed by the `Cluster` - properties: - additional: - description: Additional is a list of additional managed services - specified by the user. - items: - description: |- - ManagedService represents a specific service managed by the cluster. - It includes the type of service and its associated template specification. - properties: - selectorType: - allOf: - - enum: - - rw - - r - - ro - - enum: - - rw - - r - - ro - description: |- - SelectorType specifies the type of selectors that the service will have. - Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. - type: string - serviceTemplate: - description: ServiceTemplate is the template specification - for the service. - properties: - metadata: - description: |- - Standard object's metadata. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations - type: object - labels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: http://kubernetes.io/docs/user-guide/labels - type: object - name: - description: The name of the resource. Only - supported for certain types - type: string - type: object - spec: - description: |- - Specification of the desired behavior of the service. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - allocateLoadBalancerNodePorts: - description: |- - allocateLoadBalancerNodePorts defines if NodePorts will be automatically - allocated for services with type LoadBalancer. Default is "true". It - may be set to "false" if the cluster load-balancer does not rely on - NodePorts. If the caller requests specific NodePorts (by specifying a - value), those requests will be respected, regardless of this field. - This field may only be set for services with type LoadBalancer and will - be cleared if the type is changed to any other type. - type: boolean - clusterIP: - description: |- - clusterIP is the IP address of the service and is usually assigned - randomly. If an address is specified manually, is in-range (as per - system configuration), and is not in use, it will be allocated to the - service; otherwise creation of the service will fail. This field may not - be changed through updates unless the type field is also being changed - to ExternalName (which requires this field to be blank) or the type - field is being changed from ExternalName (in which case this field may - optionally be specified, as describe above). Valid values are "None", - empty string (""), or a valid IP address. Setting this to "None" makes a - "headless service" (no virtual IP), which is useful when direct endpoint - connections are preferred and proxying is not required. Only applies to - types ClusterIP, NodePort, and LoadBalancer. If this field is specified - when creating a Service of type ExternalName, creation will fail. This - field will be wiped when updating a Service to type ExternalName. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - type: string - clusterIPs: - description: |- - ClusterIPs is a list of IP addresses assigned to this service, and are - usually assigned randomly. If an address is specified manually, is - in-range (as per system configuration), and is not in use, it will be - allocated to the service; otherwise creation of the service will fail. - This field may not be changed through updates unless the type field is - also being changed to ExternalName (which requires this field to be - empty) or the type field is being changed from ExternalName (in which - case this field may optionally be specified, as describe above). Valid - values are "None", empty string (""), or a valid IP address. Setting - this to "None" makes a "headless service" (no virtual IP), which is - useful when direct endpoint connections are preferred and proxying is - not required. Only applies to types ClusterIP, NodePort, and - LoadBalancer. If this field is specified when creating a Service of type - ExternalName, creation will fail. This field will be wiped when updating - a Service to type ExternalName. If this field is not specified, it will - be initialized from the clusterIP field. If this field is specified, - clients must ensure that clusterIPs[0] and clusterIP have the same - value. - - This field may hold a maximum of two entries (dual-stack IPs, in either order). - These IPs must correspond to the values of the ipFamilies field. Both - clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - items: - type: string - type: array - x-kubernetes-list-type: atomic - externalIPs: - description: |- - externalIPs is a list of IP addresses for which nodes in the cluster - will also accept traffic for this service. These IPs are not managed by - Kubernetes. The user is responsible for ensuring that traffic arrives - at a node with this IP. A common example is external load-balancers - that are not part of the Kubernetes system. - items: - type: string - type: array - x-kubernetes-list-type: atomic - externalName: - description: |- - externalName is the external reference that discovery mechanisms will - return as an alias for this service (e.g. a DNS CNAME record). No - proxying will be involved. Must be a lowercase RFC-1123 hostname - (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". - type: string - externalTrafficPolicy: - description: |- - externalTrafficPolicy describes how nodes distribute service traffic they - receive on one of the Service's "externally-facing" addresses (NodePorts, - ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure - the service in a way that assumes that external load balancers will take care - of balancing the service traffic between nodes, and so each node will deliver - traffic only to the node-local endpoints of the service, without masquerading - the client source IP. (Traffic mistakenly sent to a node with no endpoints will - be dropped.) The default value, "Cluster", uses the standard behavior of - routing to all endpoints evenly (possibly modified by topology and other - features). Note that traffic sent to an External IP or LoadBalancer IP from - within the cluster will always get "Cluster" semantics, but clients sending to - a NodePort from within the cluster may need to take traffic policy into account - when picking a node. - type: string - healthCheckNodePort: - description: |- - healthCheckNodePort specifies the healthcheck nodePort for the service. - This only applies when type is set to LoadBalancer and - externalTrafficPolicy is set to Local. If a value is specified, is - in-range, and is not in use, it will be used. If not specified, a value - will be automatically allocated. External systems (e.g. load-balancers) - can use this port to determine if a given node holds endpoints for this - service or not. If this field is specified when creating a Service - which does not need it, creation will fail. This field will be wiped - when updating a Service to no longer need it (e.g. changing type). - This field cannot be updated once set. - format: int32 - type: integer - internalTrafficPolicy: - description: |- - InternalTrafficPolicy describes how nodes distribute service traffic they - receive on the ClusterIP. If set to "Local", the proxy will assume that pods - only want to talk to endpoints of the service on the same node as the pod, - dropping the traffic if there are no local endpoints. The default value, - "Cluster", uses the standard behavior of routing to all endpoints evenly - (possibly modified by topology and other features). - type: string - ipFamilies: - description: |- - IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this - service. This field is usually assigned automatically based on cluster - configuration and the ipFamilyPolicy field. If this field is specified - manually, the requested family is available in the cluster, - and ipFamilyPolicy allows it, it will be used; otherwise creation of - the service will fail. This field is conditionally mutable: it allows - for adding or removing a secondary IP family, but it does not allow - changing the primary IP family of the Service. Valid values are "IPv4" - and "IPv6". This field only applies to Services of types ClusterIP, - NodePort, and LoadBalancer, and does apply to "headless" services. - This field will be wiped when updating a Service to type ExternalName. - - This field may hold a maximum of two entries (dual-stack families, in - either order). These families must correspond to the values of the - clusterIPs field, if specified. Both clusterIPs and ipFamilies are - governed by the ipFamilyPolicy field. - items: - description: |- - IPFamily represents the IP Family (IPv4 or IPv6). This type is used - to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). - type: string - type: array - x-kubernetes-list-type: atomic - ipFamilyPolicy: - description: |- - IPFamilyPolicy represents the dual-stack-ness requested or required by - this Service. If there is no value provided, then this field will be set - to SingleStack. Services can be "SingleStack" (a single IP family), - "PreferDualStack" (two IP families on dual-stack configured clusters or - a single IP family on single-stack clusters), or "RequireDualStack" - (two IP families on dual-stack configured clusters, otherwise fail). The - ipFamilies and clusterIPs fields depend on the value of this field. This - field will be wiped when updating a service to type ExternalName. - type: string - loadBalancerClass: - description: |- - loadBalancerClass is the class of the load balancer implementation this Service belongs to. - If specified, the value of this field must be a label-style identifier, with an optional prefix, - e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. - This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load - balancer implementation is used, today this is typically done through the cloud provider integration, - but should apply for any default implementation. If set, it is assumed that a load balancer - implementation is watching for Services with a matching class. Any default load balancer - implementation (e.g. cloud providers) should ignore Services that set this field. - This field can only be set when creating or updating a Service to type 'LoadBalancer'. - Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. - type: string - loadBalancerIP: - description: |- - Only applies to Service Type: LoadBalancer. - This feature depends on whether the underlying cloud-provider supports specifying - the loadBalancerIP when a load balancer is created. - This field will be ignored if the cloud-provider does not support the feature. - Deprecated: This field was under-specified and its meaning varies across implementations. - Using it is non-portable and it may not support dual-stack. - Users are encouraged to use implementation-specific annotations when available. - type: string - loadBalancerSourceRanges: - description: |- - If specified and supported by the platform, this will restrict traffic through the cloud-provider - load-balancer will be restricted to the specified client IPs. This field will be ignored if the - cloud-provider does not support the feature." - More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ - items: - type: string - type: array - x-kubernetes-list-type: atomic - ports: - description: |- - The list of ports that are exposed by this service. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - items: - description: ServicePort contains information - on service's port. - properties: - appProtocol: - description: |- - The application protocol for this port. - This is used as a hint for implementations to offer richer behavior for protocols that they understand. - This field follows standard Kubernetes label syntax. - Valid values are either: - - * Un-prefixed protocol names - reserved for IANA standard service names (as per - RFC-6335 and https://www.iana.org/assignments/service-names). - - * Kubernetes-defined prefixed names: - * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- - * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 - * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 - - * Other protocols should use implementation-defined prefixed names such as - mycompany.com/my-custom-protocol. - type: string - name: - description: |- - The name of this port within the service. This must be a DNS_LABEL. - All ports within a ServiceSpec must have unique names. When considering - the endpoints for a Service, this must match the 'name' field in the - EndpointPort. - Optional if only one ServicePort is defined on this service. - type: string - nodePort: - description: |- - The port on each node on which this service is exposed when type is - NodePort or LoadBalancer. Usually assigned by the system. If a value is - specified, in-range, and not in use it will be used, otherwise the - operation will fail. If not specified, a port will be allocated if this - Service requires one. If this field is specified when creating a - Service which does not need it, creation will fail. This field will be - wiped when updating a Service to no longer need it (e.g. changing type - from NodePort to ClusterIP). - More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - format: int32 - type: integer - port: - description: The port that will be exposed - by this service. - format: int32 - type: integer - protocol: - default: TCP - description: |- - The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". - Default is TCP. - type: string - targetPort: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the pods targeted by the service. - Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - If this is a string, it will be looked up as a named port in the - target Pod's container ports. If this is not specified, the value - of the 'port' field is used (an identity map). - This field is ignored for services with clusterIP=None, and should be - omitted or set equal to the 'port' field. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service - x-kubernetes-int-or-string: true - required: - - port - type: object - type: array - x-kubernetes-list-map-keys: - - port - - protocol - x-kubernetes-list-type: map - publishNotReadyAddresses: - description: |- - publishNotReadyAddresses indicates that any agent which deals with endpoints for this - Service should disregard any indications of ready/not-ready. - The primary use case for setting this field is for a StatefulSet's Headless Service to - propagate SRV DNS records for its Pods for the purpose of peer discovery. - The Kubernetes controllers that generate Endpoints and EndpointSlice resources for - Services interpret this to mean that all endpoints are considered "ready" even if the - Pods themselves are not. Agents which consume only Kubernetes generated endpoints - through the Endpoints or EndpointSlice resources can safely assume this behavior. - type: boolean - selector: - additionalProperties: - type: string - description: |- - Route service traffic to pods with label keys and values matching this - selector. If empty or not present, the service is assumed to have an - external process managing its endpoints, which Kubernetes will not - modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - Ignored if type is ExternalName. - More info: https://kubernetes.io/docs/concepts/services-networking/service/ - type: object - x-kubernetes-map-type: atomic - sessionAffinity: - description: |- - Supports "ClientIP" and "None". Used to maintain session affinity. - Enable client IP based session affinity. - Must be ClientIP or None. - Defaults to None. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - type: string - sessionAffinityConfig: - description: sessionAffinityConfig contains - the configurations of session affinity. - properties: - clientIP: - description: clientIP contains the configurations - of Client IP based session affinity. - properties: - timeoutSeconds: - description: |- - timeoutSeconds specifies the seconds of ClientIP type session sticky time. - The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". - Default value is 10800(for 3 hours). - format: int32 - type: integer - type: object - type: object - trafficDistribution: - description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. - type: string - type: - description: |- - type determines how the Service is exposed. Defaults to ClusterIP. Valid - options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - "ClusterIP" allocates a cluster-internal IP address for load-balancing - to endpoints. Endpoints are determined by the selector or if that is not - specified, by manual construction of an Endpoints object or - EndpointSlice objects. If clusterIP is "None", no virtual IP is - allocated and the endpoints are published as a set of endpoints rather - than a virtual IP. - "NodePort" builds on ClusterIP and allocates a port on every node which - routes to the same endpoints as the clusterIP. - "LoadBalancer" builds on NodePort and creates an external load-balancer - (if supported in the current cloud) which routes to the same endpoints - as the clusterIP. - "ExternalName" aliases this service to the specified externalName. - Several other fields do not apply to ExternalName services. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types - type: string - type: object - type: object - updateStrategy: - default: patch - description: UpdateStrategy describes how the service - differences should be reconciled - enum: - - patch - - replace - type: string - required: - - selectorType - - serviceTemplate - type: object - type: array - disabledDefaultServices: - description: |- - DisabledDefaultServices is a list of service types that are disabled by default. - Valid values are "r", and "ro", representing read, and read-only services. - items: - description: |- - ServiceSelectorType describes a valid value for generating the service selectors. - It indicates which type of service the selector applies to, such as read-write, read, or read-only - enum: - - rw - - r - - ro - type: string - type: array - type: object - type: object - maxSyncReplicas: - default: 0 - description: |- - The target value for the synchronous replication quorum, that can be - decreased if the number of ready standbys is lower than this. - Undefined or 0 disable synchronous replication. - minimum: 0 - type: integer - minSyncReplicas: - default: 0 - description: |- - Minimum number of instances required in synchronous replication with the - primary. Undefined or 0 allow writes to complete when no standby is - available. - minimum: 0 - type: integer - monitoring: - description: The configuration of the monitoring infrastructure of - this cluster - properties: - customQueriesConfigMap: - description: The list of config maps containing the custom queries - items: - description: |- - ConfigMapKeySelector contains enough information to let you locate - the key of a ConfigMap - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - customQueriesSecret: - description: The list of secrets containing the custom queries - items: - description: |- - SecretKeySelector contains enough information to let you locate - the key of a Secret - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - disableDefaultQueries: - default: false - description: |- - Whether the default queries should be injected. - Set it to `true` if you don't want to inject default queries into the cluster. - Default: false. - type: boolean - enablePodMonitor: - default: false - description: Enable or disable the `PodMonitor` - type: boolean - podMonitorMetricRelabelings: - description: The list of metric relabelings for the `PodMonitor`. - Applied to samples before ingestion. - items: - description: |- - RelabelConfig allows dynamic rewriting of the label set for targets, alerts, - scraped samples and remote write samples. - - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - properties: - action: - default: replace - description: |- - Action to perform based on the regex matching. - - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. - `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - - Default: "Replace" - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - description: |- - Modulus to take of the hash of the source label values. - - Only applicable when the action is `HashMod`. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. - type: string - replacement: - description: |- - Replacement value against which a Replace action is performed if the - regular expression matches. - - Regex capture groups are available. - type: string - separator: - description: Separator is the string between concatenated - SourceLabels. - type: string - sourceLabels: - description: |- - The source labels select values from existing labels. Their content is - concatenated using the configured Separator and matched against the - configured regular expression. - items: - description: |- - LabelName is a valid Prometheus label name which may only contain ASCII - letters, numbers, as well as underscores. - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - description: |- - Label to which the resulting string is written in a replacement. - - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, - `KeepEqual` and `DropEqual` actions. - - Regex capture groups are available. - type: string - type: object - type: array - podMonitorRelabelings: - description: The list of relabelings for the `PodMonitor`. Applied - to samples before scraping. - items: - description: |- - RelabelConfig allows dynamic rewriting of the label set for targets, alerts, - scraped samples and remote write samples. - - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - properties: - action: - default: replace - description: |- - Action to perform based on the regex matching. - - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. - `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - - Default: "Replace" - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - description: |- - Modulus to take of the hash of the source label values. - - Only applicable when the action is `HashMod`. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. - type: string - replacement: - description: |- - Replacement value against which a Replace action is performed if the - regular expression matches. - - Regex capture groups are available. - type: string - separator: - description: Separator is the string between concatenated - SourceLabels. - type: string - sourceLabels: - description: |- - The source labels select values from existing labels. Their content is - concatenated using the configured Separator and matched against the - configured regular expression. - items: - description: |- - LabelName is a valid Prometheus label name which may only contain ASCII - letters, numbers, as well as underscores. - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - description: |- - Label to which the resulting string is written in a replacement. - - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, - `KeepEqual` and `DropEqual` actions. - - Regex capture groups are available. - type: string - type: object - type: array - tls: - description: |- - Configure TLS communication for the metrics endpoint. - Changing tls.enabled option will force a rollout of all instances. - properties: - enabled: - default: false - description: |- - Enable TLS for the monitoring endpoint. - Changing this option will force a rollout of all instances. - type: boolean - type: object - type: object - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - default: false - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - default: true - description: |- - Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere - when `instances` >1) - type: boolean - type: object - plugins: - description: |- - The plugins configuration, containing - any plugin to be loaded with the corresponding configuration - items: - description: |- - PluginConfiguration specifies a plugin that need to be loaded for this - cluster to be reconciled - properties: - name: - description: Name is the plugin name - type: string - parameters: - additionalProperties: - type: string - description: Parameters is the configuration of the plugin - type: object - required: - - name - type: object - type: array - postgresGID: - default: 26 - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - default: 26 - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - enableAlterSystem: - description: |- - If this parameter is true, the user will be able to invoke `ALTER SYSTEM` - on this CloudNativePG Cluster. - This should only be used for debugging and troubleshooting. - Defaults to false. - type: boolean - ldap: - description: Options to specify LDAP configuration - properties: - bindAsAuth: - description: Bind as authentication configuration - properties: - prefix: - description: Prefix for the bind authentication option - type: string - suffix: - description: Suffix for the bind authentication option - type: string - type: object - bindSearchAuth: - description: Bind+Search authentication configuration - properties: - baseDN: - description: Root DN to begin the user search - type: string - bindDN: - description: DN of the user to bind to the directory - type: string - bindPassword: - description: Secret with the password for the user to - bind to the directory - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - searchAttribute: - description: Attribute to match against the username - type: string - searchFilter: - description: Search filter to use when doing the search+bind - authentication - type: string - type: object - port: - description: LDAP server port - type: integer - scheme: - description: LDAP schema to be used, possible options are - `ldap` and `ldaps` - enum: - - ldap - - ldaps - type: string - server: - description: LDAP hostname or IP address - type: string - tls: - description: Set to 'true' to enable LDAP over TLS. 'false' - is default - type: boolean - type: object - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: |- - PostgreSQL Host Based Authentication rules (lines to be appended - to the pg_hba.conf file) - items: - type: string - type: array - pg_ident: - description: |- - PostgreSQL User Name Maps rules (lines to be appended - to the pg_ident.conf file) - items: - type: string - type: array - promotionTimeout: - description: |- - Specifies the maximum number of seconds to wait when promoting an instance to primary. - Default value is 40000000, greater than one year in seconds, - big enough to simulate an infinite timeout - format: int32 - type: integer - shared_preload_libraries: - description: Lists of shared preload libraries to add to the default - ones - items: - type: string - type: array - syncReplicaElectionConstraint: - description: |- - Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be - set up. - properties: - enabled: - description: This flag enables the constraints for sync replicas - type: boolean - nodeLabelsAntiAffinity: - description: A list of node labels values to extract and compare - to evaluate if the pods reside in the same topology or not - items: - type: string - type: array - required: - - enabled - type: object - synchronous: - description: Configuration of the PostgreSQL synchronous replication - feature - properties: - maxStandbyNamesFromCluster: - description: |- - Specifies the maximum number of local cluster pods that can be - automatically included in the `synchronous_standby_names` option in - PostgreSQL. - type: integer - method: - description: |- - Method to select synchronous replication standbys from the listed - servers, accepting 'any' (quorum-based synchronous replication) or - 'first' (priority-based synchronous replication) as values. - enum: - - any - - first - type: string - number: - description: |- - Specifies the number of synchronous standby servers that - transactions must wait for responses from. - type: integer - x-kubernetes-validations: - - message: The number of synchronous replicas should be greater - than zero - rule: self > 0 - standbyNamesPost: - description: |- - A user-defined list of application names to be added to - `synchronous_standby_names` after local cluster pods (the order is - only useful for priority-based synchronous replication). - items: - type: string - type: array - standbyNamesPre: - description: |- - A user-defined list of application names to be added to - `synchronous_standby_names` before local cluster pods (the order is - only useful for priority-based synchronous replication). - items: - type: string - type: array - required: - - method - - number - type: object - type: object - primaryUpdateMethod: - default: restart - description: |- - Method to follow to upgrade the primary server during a rolling - update procedure, after all replicas have been successfully updated: - it can be with a switchover (`switchover`) or in-place (`restart` - default) - enum: - - switchover - - restart - type: string - primaryUpdateStrategy: - default: unsupervised - description: |- - Deployment strategy to follow to upgrade the primary server during a rolling - update procedure, after all replicas have been successfully updated: - it can be automated (`unsupervised` - default) or manual (`supervised`) - enum: - - unsupervised - - supervised - type: string - priorityClassName: - description: |- - Name of the priority class which will be used in every generated Pod, if the PriorityClass - specified does not exist, the pod will not be able to schedule. Please refer to - https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass - for more information - type: string - projectedVolumeTemplate: - description: |- - Template to be used to define projected volumes, projected volumes will be mounted - under `/projected` base folder - properties: - defaultMode: - description: |- - defaultMode are the mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: |- - sources is the list of volume projections. Each entry in this list - handles one source. - items: - description: |- - Projection that may be projected along with other supported volume types. - Exactly one of these fields must be set. - properties: - clusterTrustBundle: - description: |- - ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field - of ClusterTrustBundle objects in an auto-updating file. - - Alpha, gated by the ClusterTrustBundleProjection feature gate. - - ClusterTrustBundle objects can either be selected by name, or by the - combination of signer name and a label selector. - - Kubelet performs aggressive normalization of the PEM contents written - into the pod filesystem. Esoteric PEM features such as inter-block - comments and block headers are stripped. Certificates are deduplicated. - The ordering of certificates within the file is arbitrary, and Kubelet - may change the order over time. - properties: - labelSelector: - description: |- - Select all ClusterTrustBundles that match this label selector. Only has - effect if signerName is set. Mutually-exclusive with name. If unset, - interpreted as "match nothing". If set but empty, interpreted as "match - everything". - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - name: - description: |- - Select a single ClusterTrustBundle by object name. Mutually-exclusive - with signerName and labelSelector. - type: string - optional: - description: |- - If true, don't block pod startup if the referenced ClusterTrustBundle(s) - aren't available. If using name, then the named ClusterTrustBundle is - allowed not to exist. If using signerName, then the combination of - signerName and labelSelector is allowed to match zero - ClusterTrustBundles. - type: boolean - path: - description: Relative path from the volume root to write - the bundle. - type: string - signerName: - description: |- - Select all ClusterTrustBundles that match this signer name. - Mutually-exclusive with name. The contents of all selected - ClusterTrustBundles will be unified and deduplicated. - type: string - required: - - path - type: object - configMap: - description: configMap information about the configMap data - to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a - volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional specify whether the ConfigMap - or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - description: downwardAPI information about the downwardAPI - data to project - properties: - items: - description: Items is a list of DownwardAPIVolume file - items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field - properties: - fieldRef: - description: 'Required: Selects a field of the - pod: only annotations, labels, name, namespace - and uid are supported.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must not - be absolute or contain the ''..'' path. Must - be utf-8 encoded. The first item of the relative - path must not start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: required for - volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - x-kubernetes-list-type: atomic - type: object - secret: - description: secret information about the secret data to - project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a - volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional field specify whether the Secret - or its key must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - description: serviceAccountToken is information about the - serviceAccountToken data to project - properties: - audience: - description: |- - audience is the intended audience of the token. A recipient of a token - must identify itself with an identifier specified in the audience of the - token, and otherwise should reject the token. The audience defaults to the - identifier of the apiserver. - type: string - expirationSeconds: - description: |- - expirationSeconds is the requested duration of validity of the service - account token. As the token approaches expiration, the kubelet volume - plugin will proactively rotate the service account token. The kubelet will - start trying to rotate the token if the token is older than 80 percent of - its time to live or if the token is older than 24 hours.Defaults to 1 hour - and must be at least 10 minutes. - format: int64 - type: integer - path: - description: |- - path is the path relative to the mount point of the file to project the - token into. - type: string - required: - - path - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - replica: - description: Replica cluster configuration - properties: - enabled: - description: |- - If replica mode is enabled, this cluster will be a replica of an - existing cluster. Replica cluster can be created from a recovery - object store or via streaming through pg_basebackup. - Refer to the Replica clusters page of the documentation for more information. - type: boolean - minApplyDelay: - description: |- - When replica mode is enabled, this parameter allows you to replay - transactions only when the system time is at least the configured - time past the commit time. This provides an opportunity to correct - data loss errors. Note that when this parameter is set, a promotion - token cannot be used. - type: string - primary: - description: |- - Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the - topology specified in externalClusters - type: string - promotionToken: - description: |- - A demotion token generated by an external cluster used to - check if the promotion requirements are met. - type: string - self: - description: |- - Self defines the name of this cluster. It is used to determine if this is a primary - or a replica cluster, comparing it with `primary` - type: string - source: - description: The name of the external cluster which is the replication - origin - minLength: 1 - type: string - required: - - source - type: object - replicationSlots: - default: - highAvailability: - enabled: true - description: Replication slots management configuration - properties: - highAvailability: - default: - enabled: true - description: Replication slots for high availability configuration - properties: - enabled: - default: true - description: |- - If enabled (default), the operator will automatically manage replication slots - on the primary instance and use them in streaming replication - connections with all the standby instances that are part of the HA - cluster. If disabled, the operator will not take advantage - of replication slots in streaming connections with the replicas. - This feature also controls replication slots in replica cluster, - from the designated primary to its cascading replicas. - type: boolean - slotPrefix: - default: _cnpg_ - description: |- - Prefix for replication slots managed by the operator for HA. - It may only contain lower case letters, numbers, and the underscore character. - This can only be set at creation time. By default set to `_cnpg_`. - pattern: ^[0-9a-z_]*$ - type: string - type: object - synchronizeReplicas: - description: Configures the synchronization of the user defined - physical replication slots - properties: - enabled: - default: true - description: When set to true, every replication slot that - is on the primary is synchronized on each standby - type: boolean - excludePatterns: - description: List of regular expression patterns to match - the names of replication slots to be excluded (by default - empty) - items: - type: string - type: array - required: - - enabled - type: object - updateInterval: - default: 30 - description: |- - Standby will update the status of the local replication slots - every `updateInterval` seconds (default 30). - minimum: 1 - type: integer - type: object - resources: - description: |- - Resources requirements of every generated Pod. Please refer to - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - schedulerName: - description: |- - If specified, the pod will be dispatched by specified Kubernetes - scheduler. If not specified, the pod will be dispatched by the default - scheduler. More info: - https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ - type: string - seccompProfile: - description: |- - The SeccompProfile applied to every Pod and Container. - Defaults to: `RuntimeDefault` - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - serviceAccountTemplate: - description: Configure the generation of the service account - properties: - metadata: - description: |- - Metadata are the metadata to be used for the generated - service account - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations - type: object - labels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: http://kubernetes.io/docs/user-guide/labels - type: object - name: - description: The name of the resource. Only supported for - certain types - type: string - type: object - required: - - metadata - type: object - smartShutdownTimeout: - default: 180 - description: |- - The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. - Make sure you reserve enough time for the operator to request a fast shutdown of Postgres - (that is: `stopDelay` - `smartShutdownTimeout`). - format: int32 - type: integer - startDelay: - default: 3600 - description: |- - The time in seconds that is allowed for a PostgreSQL instance to - successfully start up (default 3600). - The startup probe failure threshold is derived from this value using the formula: - ceiling(startDelay / 10). - format: int32 - type: integer - stopDelay: - default: 1800 - description: |- - The time in seconds that is allowed for a PostgreSQL instance to - gracefully shutdown (default 1800) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - x-kubernetes-list-type: atomic - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over volumes to consider - for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - default: true - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: |- - Size of the storage. Required if not already specified in the PVC template. - Changes to this field are automatically reapplied to the created PVCs. - Size cannot be decreased. - type: string - storageClass: - description: |- - StorageClass to use for PVCs. Applied after - evaluating the PVC template, if available. - If not specified, the generated PVCs will use the - default storage class - type: string - type: object - superuserSecret: - description: |- - The secret containing the superuser password. If not defined a new - secret will be created with a randomly generated password - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - switchoverDelay: - default: 3600 - description: |- - The time in seconds that is allowed for a primary PostgreSQL instance - to gracefully shutdown during a switchover. - Default value is 3600 seconds (1 hour). - format: int32 - type: integer - tablespaces: - description: The tablespaces configuration - items: - description: |- - TablespaceConfiguration is the configuration of a tablespace, and includes - the storage specification for the tablespace - properties: - name: - description: The name of the tablespace - type: string - owner: - description: Owner is the PostgreSQL user owning the tablespace - properties: - name: - type: string - type: object - storage: - description: The storage configuration for the tablespace - properties: - pvcTemplate: - description: Template to be used to generate the Persistent - Volume Claim - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - x-kubernetes-list-type: atomic - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over volumes - to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference to - the PersistentVolume backing this claim. - type: string - type: object - resizeInUseVolumes: - default: true - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: |- - Size of the storage. Required if not already specified in the PVC template. - Changes to this field are automatically reapplied to the created PVCs. - Size cannot be decreased. - type: string - storageClass: - description: |- - StorageClass to use for PVCs. Applied after - evaluating the PVC template, if available. - If not specified, the generated PVCs will use the - default storage class - type: string - type: object - temporary: - default: false - description: |- - When set to true, the tablespace will be added as a `temp_tablespaces` - entry in PostgreSQL, and will be available to automatically house temp - database objects, or other temporary files. Please refer to PostgreSQL - documentation for more information on the `temp_tablespaces` GUC. - type: boolean - required: - - name - - storage - type: object - type: array - topologySpreadConstraints: - description: |- - TopologySpreadConstraints specifies how to spread matching pods among the given topology. - More info: - https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ - items: - description: TopologySpreadConstraint specifies how to spread matching - pods among the given topology. - properties: - labelSelector: - description: |- - LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine the number of pods - in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select the pods over which - spreading will be calculated. The keys are used to lookup values from the - incoming pod labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading will be calculated - for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't set. - Keys that don't exist in the incoming pod labels will - be ignored. A null or empty list means only match against labelSelector. - - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - description: |- - MaxSkew describes the degree to which pods may be unevenly distributed. - When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference - between the number of matching pods in the target topology and the global minimum. - The global minimum is the minimum number of matching pods in an eligible domain - or zero if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 2/2/1: - In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | - | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; - scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence - to topologies that satisfy it. - It's a required field. Default value is 1 and 0 is not allowed. - format: int32 - type: integer - minDomains: - description: |- - MinDomains indicates a minimum number of eligible domains. - When the number of eligible domains with matching topology keys is less than minDomains, - Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. - And when the number of eligible domains with matching topology keys equals or greater than minDomains, - this value has no effect on scheduling. - As a result, when the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to those domains. - If value is nil, the constraint behaves as if MinDomains is equal to 1. - Valid values are integers greater than 0. - When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | - | P P | P P | P P | - The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. - In this situation, new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. - format: int32 - type: integer - nodeAffinityPolicy: - description: |- - NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector - when calculating pod topology spread skew. Options are: - - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - - If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - nodeTaintsPolicy: - description: |- - NodeTaintsPolicy indicates how we will treat node taints when calculating - pod topology spread skew. Options are: - - Honor: nodes without taints, along with tainted nodes for which the incoming pod - has a toleration, are included. - - Ignore: node taints are ignored. All nodes are included. - - If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - topologyKey: - description: |- - TopologyKey is the key of node labels. Nodes that have a label with this key - and identical values are considered to be in the same topology. - We consider each as a "bucket", and try to put balanced number - of pods into each bucket. - We define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose nodes meet the requirements of - nodeAffinityPolicy and nodeTaintsPolicy. - e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. - And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. - It's a required field. - type: string - whenUnsatisfiable: - description: |- - WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule it. - - ScheduleAnyway tells the scheduler to schedule the pod in any location, - but giving higher precedence to topologies that would help reduce the - skew. - A constraint is considered "Unsatisfiable" for an incoming pod - if and only if every possible node assignment for that pod would violate - "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 3/1/1: - | zone1 | zone2 | zone3 | - | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - won't make it *more* imbalanced. - It's a required field. - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - walStorage: - description: Configuration of the storage for PostgreSQL WAL (Write-Ahead - Log) - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - x-kubernetes-list-type: atomic - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over volumes to consider - for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - default: true - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: |- - Size of the storage. Required if not already specified in the PVC template. - Changes to this field are automatically reapplied to the created PVCs. - Size cannot be decreased. - type: string - storageClass: - description: |- - StorageClass to use for PVCs. Applied after - evaluating the PVC template, if available. - If not specified, the generated PVCs will use the - default storage class - type: string - type: object - required: - - instances - type: object - x-kubernetes-validations: - - message: imageName and imageCatalogRef are mutually exclusive - rule: '!(has(self.imageCatalogRef) && has(self.imageName))' - status: - description: |- - Most recently observed status of the cluster. This data may not be up - to date. Populated by the system. Read-only. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - availableArchitectures: - description: AvailableArchitectures reports the available architectures - of a cluster - items: - description: AvailableArchitecture represents the state of a cluster's - architecture - properties: - goArch: - description: GoArch is the name of the executable architecture - type: string - hash: - description: Hash is the hash of the executable - type: string - required: - - goArch - - hash - type: object - type: array - azurePVCUpdateEnabled: - description: AzurePVCUpdateEnabled shows if the PVC online upgrade - is enabled for this cluster - type: boolean - certificates: - description: The configuration for the CA and related certificates, - initialized with defaults. - properties: - clientCASecret: - description: |- - The secret containing the Client CA certificate. If not defined, a new secret will be created - with a self-signed CA and will be used to generate all the client certificates.
-
- Contains:
-
- - `ca.crt`: CA that should be used to validate the client certificates, - used as `ssl_ca_file` of all the instances.
- - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, - this can be omitted.
- type: string - expirations: - additionalProperties: - type: string - description: Expiration dates for all certificates. - type: object - replicationTLSSecret: - description: |- - The secret of type kubernetes.io/tls containing the client certificate to authenticate as - the `streaming_replica` user. - If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be - created using the provided CA. - type: string - serverAltDNSNames: - description: The list of the server alternative DNS names to be - added to the generated server TLS certificates, when required. - items: - type: string - type: array - serverCASecret: - description: |- - The secret containing the Server CA certificate. If not defined, a new secret will be created - with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
-
- Contains:
-
- - `ca.crt`: CA that should be used to validate the server certificate, - used as `sslrootcert` in client connection strings.
- - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, - this can be omitted.
- type: string - serverTLSSecret: - description: |- - The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as - `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. - If not defined, ServerCASecret must provide also `ca.key` and a new secret will be - created using the provided CA. - type: string - type: object - cloudNativePGCommitHash: - description: The commit hash number of which this operator running - type: string - cloudNativePGOperatorHash: - description: The hash of the binary of the operator - type: string - conditions: - description: Conditions for cluster object - items: - description: Condition contains details for one aspect of the current - state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - configMapResourceVersion: - description: |- - The list of resource versions of the configmaps, - managed by the operator. Every change here is done in the - interest of the instance manager, which will refresh the - configmap data - properties: - metrics: - additionalProperties: - type: string - description: |- - A map with the versions of all the config maps used to pass metrics. - Map keys are the config map names, map values are the versions - type: object - type: object - currentPrimary: - description: Current primary instance - type: string - currentPrimaryFailingSinceTimestamp: - description: |- - The timestamp when the primary was detected to be unhealthy - This field is reported when `.spec.failoverDelay` is populated or during online upgrades - type: string - currentPrimaryTimestamp: - description: The timestamp when the last actual promotion to primary - has occurred - type: string - danglingPVC: - description: |- - List of all the PVCs created by this cluster and still available - which are not attached to a Pod - items: - type: string - type: array - demotionToken: - description: |- - DemotionToken is a JSON token containing the information - from pg_controldata such as Database system identifier, Latest checkpoint's - TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO - WAL file, and Time of latest checkpoint - type: string - firstRecoverabilityPoint: - description: |- - The first recoverability point, stored as a date in RFC3339 format. - This field is calculated from the content of FirstRecoverabilityPointByMethod - type: string - firstRecoverabilityPointByMethod: - additionalProperties: - format: date-time - type: string - description: The first recoverability point, stored as a date in RFC3339 - format, per backup method type - type: object - healthyPVC: - description: List of all the PVCs not dangling nor initializing - items: - type: string - type: array - image: - description: Image contains the image name used by the pods - type: string - initializingPVC: - description: List of all the PVCs that are being initialized by this - cluster - items: - type: string - type: array - instanceNames: - description: List of instance names in the cluster - items: - type: string - type: array - instances: - description: The total number of PVC Groups detected in the cluster. - It may differ from the number of existing instance pods. - type: integer - instancesReportedState: - additionalProperties: - description: InstanceReportedState describes the last reported state - of an instance during a reconciliation loop - properties: - isPrimary: - description: indicates if an instance is the primary one - type: boolean - timeLineID: - description: indicates on which TimelineId the instance is - type: integer - required: - - isPrimary - type: object - description: The reported state of the instances during the last reconciliation - loop - type: object - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: InstancesStatus indicates in which status the instances - are - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - lastFailedBackup: - description: Stored as a date in RFC3339 format - type: string - lastPromotionToken: - description: |- - LastPromotionToken is the last verified promotion token that - was used to promote a replica cluster - type: string - lastSuccessfulBackup: - description: |- - Last successful backup, stored as a date in RFC3339 format - This field is calculated from the content of LastSuccessfulBackupByMethod - type: string - lastSuccessfulBackupByMethod: - additionalProperties: - format: date-time - type: string - description: Last successful backup, stored as a date in RFC3339 format, - per backup method type - type: object - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - type: integer - managedRolesStatus: - description: ManagedRolesStatus reports the state of the managed roles - in the cluster - properties: - byStatus: - additionalProperties: - items: - type: string - type: array - description: ByStatus gives the list of roles in each state - type: object - cannotReconcile: - additionalProperties: - items: - type: string - type: array - description: |- - CannotReconcile lists roles that cannot be reconciled in PostgreSQL, - with an explanation of the cause - type: object - passwordStatus: - additionalProperties: - description: PasswordState represents the state of the password - of a managed RoleConfiguration - properties: - resourceVersion: - description: the resource version of the password secret - type: string - transactionID: - description: the last transaction ID to affect the role - definition in PostgreSQL - format: int64 - type: integer - type: object - description: PasswordStatus gives the last transaction id and - password secret version for each managed role - type: object - type: object - onlineUpdateEnabled: - description: OnlineUpdateEnabled shows if the online upgrade is enabled - inside the cluster - type: boolean - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pluginStatus: - description: PluginStatus is the status of the loaded plugins - items: - description: PluginStatus is the status of a loaded plugin - properties: - backupCapabilities: - description: |- - BackupCapabilities are the list of capabilities of the - plugin regarding the Backup management - items: - type: string - type: array - capabilities: - description: |- - Capabilities are the list of capabilities of the - plugin - items: - type: string - type: array - name: - description: Name is the name of the plugin - type: string - operatorCapabilities: - description: |- - OperatorCapabilities are the list of capabilities of the - plugin regarding the reconciler - items: - type: string - type: array - status: - description: Status contain the status reported by the plugin - through the SetStatusInCluster interface - type: string - version: - description: |- - Version is the version of the plugin loaded by the - latest reconciliation loop - type: string - walCapabilities: - description: |- - WALCapabilities are the list of capabilities of the - plugin regarding the WAL management - items: - type: string - type: array - required: - - name - - version - type: object - type: array - poolerIntegrations: - description: The integration needed by poolers referencing the cluster - properties: - pgBouncerIntegration: - description: PgBouncerIntegrationStatus encapsulates the needed - integration for the pgbouncer poolers referencing the cluster - properties: - secrets: - items: - type: string - type: array - type: object - type: object - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: The total number of ready instances in the cluster. It - is equal to the number of ready instance pods. - type: integer - resizingPVC: - description: List of all the PVCs that have ResizingPVC condition. - items: - type: string - type: array - secretsResourceVersion: - description: |- - The list of resource versions of the secrets - managed by the operator. Every change here is done in the - interest of the instance manager, which will refresh the - secret data - properties: - applicationSecretVersion: - description: The resource version of the "app" user secret - type: string - barmanEndpointCA: - description: The resource version of the Barman Endpoint CA if - provided - type: string - caSecretVersion: - description: Unused. Retained for compatibility with old versions. - type: string - clientCaSecretVersion: - description: The resource version of the PostgreSQL client-side - CA secret version - type: string - externalClusterSecretVersion: - additionalProperties: - type: string - description: The resource versions of the external cluster secrets - type: object - managedRoleSecretVersion: - additionalProperties: - type: string - description: The resource versions of the managed roles secrets - type: object - metrics: - additionalProperties: - type: string - description: |- - A map with the versions of all the secrets used to pass metrics. - Map keys are the secret names, map values are the versions - type: object - replicationSecretVersion: - description: The resource version of the "streaming_replica" user - secret - type: string - serverCaSecretVersion: - description: The resource version of the PostgreSQL server-side - CA secret version - type: string - serverSecretVersion: - description: The resource version of the PostgreSQL server-side - secret version - type: string - superuserSecretVersion: - description: The resource version of the "postgres" user secret - type: string - type: object - switchReplicaClusterStatus: - description: SwitchReplicaClusterStatus is the status of the switch - to replica cluster - properties: - inProgress: - description: InProgress indicates if there is an ongoing procedure - of switching a cluster to a replica cluster. - type: boolean - type: object - tablespacesStatus: - description: TablespacesStatus reports the state of the declarative - tablespaces in the cluster - items: - description: TablespaceState represents the state of a tablespace - in a cluster - properties: - error: - description: Error is the reconciliation error, if any - type: string - name: - description: Name is the name of the tablespace - type: string - owner: - description: Owner is the PostgreSQL user owning the tablespace - type: string - state: - description: State is the latest reconciliation state - type: string - required: - - name - - state - type: object - type: array - targetPrimary: - description: |- - Target primary instance, this is different from the previous one - during a switchover or a failover - type: string - targetPrimaryTimestamp: - description: The timestamp when the last request for a new primary - has occurred - type: string - timelineID: - description: The timeline of the Postgres cluster - type: integer - topology: - description: Instances topology. - properties: - instances: - additionalProperties: - additionalProperties: - type: string - description: PodTopologyLabels represent the topology of a Pod. - map[labelName]labelValue - type: object - description: Instances contains the pod topology of the instances - type: object - nodesUsed: - description: |- - NodesUsed represents the count of distinct nodes accommodating the instances. - A value of '1' suggests that all instances are hosted on a single node, - implying the absence of High Availability (HA). Ideally, this value should - be the same as the number of instances in the Postgres HA cluster, implying - shared nothing architecture on the compute side. - format: int32 - type: integer - successfullyExtracted: - description: |- - SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors - in synchronous replica election in case of failures - type: boolean - type: object - unusablePVC: - description: List of all the PVCs that are unusable because another - PVC is missing - items: - type: string - type: array - writeService: - description: Current write pod - type: string - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.4 - helm.sh/resource-policy: keep - name: imagecatalogs.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: ImageCatalog - listKind: ImageCatalogList - plural: imagecatalogs - singular: imagecatalog - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1 - schema: - openAPIV3Schema: - description: ImageCatalog is the Schema for the imagecatalogs API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the ImageCatalog. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - images: - description: List of CatalogImages available in the catalog - items: - description: CatalogImage defines the image and major version - properties: - image: - description: The image reference - type: string - major: - description: The PostgreSQL major version of the image. Must - be unique within the catalog. - minimum: 10 - type: integer - required: - - image - - major - type: object - maxItems: 8 - minItems: 1 - type: array - x-kubernetes-validations: - - message: Images must have unique major versions - rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) - required: - - images - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.4 - helm.sh/resource-policy: keep - name: poolers.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: Pooler - listKind: PoolerList - plural: poolers - singular: pooler - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .spec.type - name: Type - type: string - name: v1 - schema: - openAPIV3Schema: - description: Pooler is the Schema for the poolers API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the Pooler. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - cluster: - description: |- - This is the cluster reference on which the Pooler will work. - Pooler name should never match with any cluster name within the same namespace. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - deploymentStrategy: - description: The deployment strategy to use for pgbouncer to replace - existing pods with new ones - properties: - rollingUpdate: - description: |- - Rolling update config params. Present only if DeploymentStrategyType = - RollingUpdate. - properties: - maxSurge: - anyOf: - - type: integer - - type: string - description: |- - The maximum number of pods that can be scheduled above the desired number of - pods. - Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - This can not be 0 if MaxUnavailable is 0. - Absolute number is calculated from percentage by rounding up. - Defaults to 25%. - Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when - the rolling update starts, such that the total number of old and new pods do not exceed - 130% of desired pods. Once old pods have been killed, - new ReplicaSet can be scaled up further, ensuring that total number of pods running - at any time during the update is at most 130% of desired pods. - x-kubernetes-int-or-string: true - maxUnavailable: - anyOf: - - type: integer - - type: string - description: |- - The maximum number of pods that can be unavailable during the update. - Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - Absolute number is calculated from percentage by rounding down. - This can not be 0 if MaxSurge is 0. - Defaults to 25%. - Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods - immediately when the rolling update starts. Once new pods are ready, old ReplicaSet - can be scaled down further, followed by scaling up the new ReplicaSet, ensuring - that the total number of pods available at all times during the update is at - least 70% of desired pods. - x-kubernetes-int-or-string: true - type: object - type: - description: Type of deployment. Can be "Recreate" or "RollingUpdate". - Default is RollingUpdate. - type: string - type: object - instances: - default: 1 - description: 'The number of replicas we want. Default: 1.' - format: int32 - type: integer - monitoring: - description: The configuration of the monitoring infrastructure of - this pooler. - properties: - enablePodMonitor: - default: false - description: Enable or disable the `PodMonitor` - type: boolean - podMonitorMetricRelabelings: - description: The list of metric relabelings for the `PodMonitor`. - Applied to samples before ingestion. - items: - description: |- - RelabelConfig allows dynamic rewriting of the label set for targets, alerts, - scraped samples and remote write samples. - - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - properties: - action: - default: replace - description: |- - Action to perform based on the regex matching. - - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. - `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - - Default: "Replace" - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - description: |- - Modulus to take of the hash of the source label values. - - Only applicable when the action is `HashMod`. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. - type: string - replacement: - description: |- - Replacement value against which a Replace action is performed if the - regular expression matches. - - Regex capture groups are available. - type: string - separator: - description: Separator is the string between concatenated - SourceLabels. - type: string - sourceLabels: - description: |- - The source labels select values from existing labels. Their content is - concatenated using the configured Separator and matched against the - configured regular expression. - items: - description: |- - LabelName is a valid Prometheus label name which may only contain ASCII - letters, numbers, as well as underscores. - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - description: |- - Label to which the resulting string is written in a replacement. - - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, - `KeepEqual` and `DropEqual` actions. - - Regex capture groups are available. - type: string - type: object - type: array - podMonitorRelabelings: - description: The list of relabelings for the `PodMonitor`. Applied - to samples before scraping. - items: - description: |- - RelabelConfig allows dynamic rewriting of the label set for targets, alerts, - scraped samples and remote write samples. - - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - properties: - action: - default: replace - description: |- - Action to perform based on the regex matching. - - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. - `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - - Default: "Replace" - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - description: |- - Modulus to take of the hash of the source label values. - - Only applicable when the action is `HashMod`. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. - type: string - replacement: - description: |- - Replacement value against which a Replace action is performed if the - regular expression matches. - - Regex capture groups are available. - type: string - separator: - description: Separator is the string between concatenated - SourceLabels. - type: string - sourceLabels: - description: |- - The source labels select values from existing labels. Their content is - concatenated using the configured Separator and matched against the - configured regular expression. - items: - description: |- - LabelName is a valid Prometheus label name which may only contain ASCII - letters, numbers, as well as underscores. - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - description: |- - Label to which the resulting string is written in a replacement. - - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, - `KeepEqual` and `DropEqual` actions. - - Regex capture groups are available. - type: string - type: object - type: array - type: object - pgbouncer: - description: The PgBouncer configuration - properties: - authQuery: - description: |- - The query that will be used to download the hash of the password - of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". - In case it is specified, also an AuthQuerySecret has to be specified and - no automatic CNPG Cluster integration will be triggered. - type: string - authQuerySecret: - description: |- - The credentials of the user that need to be used for the authentication - query. In case it is specified, also an AuthQuery - (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") - has to be specified and no automatic CNPG Cluster integration will be triggered. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - parameters: - additionalProperties: - type: string - description: |- - Additional parameters to be passed to PgBouncer - please check - the CNPG documentation for a list of options you can configure - type: object - paused: - default: false - description: |- - When set to `true`, PgBouncer will disconnect from the PostgreSQL - server, first waiting for all queries to complete, and pause all new - client connections until this value is set to `false` (default). Internally, - the operator calls PgBouncer's `PAUSE` and `RESUME` commands. - type: boolean - pg_hba: - description: |- - PostgreSQL Host Based Authentication rules (lines to be appended - to the pg_hba.conf file) - items: - type: string - type: array - poolMode: - default: session - description: 'The pool mode. Default: `session`.' - enum: - - session - - transaction - type: string - type: object - serviceTemplate: - description: Template for the Service to be created - properties: - metadata: - description: |- - Standard object's metadata. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations - type: object - labels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: http://kubernetes.io/docs/user-guide/labels - type: object - name: - description: The name of the resource. Only supported for - certain types - type: string - type: object - spec: - description: |- - Specification of the desired behavior of the service. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - allocateLoadBalancerNodePorts: - description: |- - allocateLoadBalancerNodePorts defines if NodePorts will be automatically - allocated for services with type LoadBalancer. Default is "true". It - may be set to "false" if the cluster load-balancer does not rely on - NodePorts. If the caller requests specific NodePorts (by specifying a - value), those requests will be respected, regardless of this field. - This field may only be set for services with type LoadBalancer and will - be cleared if the type is changed to any other type. - type: boolean - clusterIP: - description: |- - clusterIP is the IP address of the service and is usually assigned - randomly. If an address is specified manually, is in-range (as per - system configuration), and is not in use, it will be allocated to the - service; otherwise creation of the service will fail. This field may not - be changed through updates unless the type field is also being changed - to ExternalName (which requires this field to be blank) or the type - field is being changed from ExternalName (in which case this field may - optionally be specified, as describe above). Valid values are "None", - empty string (""), or a valid IP address. Setting this to "None" makes a - "headless service" (no virtual IP), which is useful when direct endpoint - connections are preferred and proxying is not required. Only applies to - types ClusterIP, NodePort, and LoadBalancer. If this field is specified - when creating a Service of type ExternalName, creation will fail. This - field will be wiped when updating a Service to type ExternalName. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - type: string - clusterIPs: - description: |- - ClusterIPs is a list of IP addresses assigned to this service, and are - usually assigned randomly. If an address is specified manually, is - in-range (as per system configuration), and is not in use, it will be - allocated to the service; otherwise creation of the service will fail. - This field may not be changed through updates unless the type field is - also being changed to ExternalName (which requires this field to be - empty) or the type field is being changed from ExternalName (in which - case this field may optionally be specified, as describe above). Valid - values are "None", empty string (""), or a valid IP address. Setting - this to "None" makes a "headless service" (no virtual IP), which is - useful when direct endpoint connections are preferred and proxying is - not required. Only applies to types ClusterIP, NodePort, and - LoadBalancer. If this field is specified when creating a Service of type - ExternalName, creation will fail. This field will be wiped when updating - a Service to type ExternalName. If this field is not specified, it will - be initialized from the clusterIP field. If this field is specified, - clients must ensure that clusterIPs[0] and clusterIP have the same - value. - - This field may hold a maximum of two entries (dual-stack IPs, in either order). - These IPs must correspond to the values of the ipFamilies field. Both - clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - items: - type: string - type: array - x-kubernetes-list-type: atomic - externalIPs: - description: |- - externalIPs is a list of IP addresses for which nodes in the cluster - will also accept traffic for this service. These IPs are not managed by - Kubernetes. The user is responsible for ensuring that traffic arrives - at a node with this IP. A common example is external load-balancers - that are not part of the Kubernetes system. - items: - type: string - type: array - x-kubernetes-list-type: atomic - externalName: - description: |- - externalName is the external reference that discovery mechanisms will - return as an alias for this service (e.g. a DNS CNAME record). No - proxying will be involved. Must be a lowercase RFC-1123 hostname - (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". - type: string - externalTrafficPolicy: - description: |- - externalTrafficPolicy describes how nodes distribute service traffic they - receive on one of the Service's "externally-facing" addresses (NodePorts, - ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure - the service in a way that assumes that external load balancers will take care - of balancing the service traffic between nodes, and so each node will deliver - traffic only to the node-local endpoints of the service, without masquerading - the client source IP. (Traffic mistakenly sent to a node with no endpoints will - be dropped.) The default value, "Cluster", uses the standard behavior of - routing to all endpoints evenly (possibly modified by topology and other - features). Note that traffic sent to an External IP or LoadBalancer IP from - within the cluster will always get "Cluster" semantics, but clients sending to - a NodePort from within the cluster may need to take traffic policy into account - when picking a node. - type: string - healthCheckNodePort: - description: |- - healthCheckNodePort specifies the healthcheck nodePort for the service. - This only applies when type is set to LoadBalancer and - externalTrafficPolicy is set to Local. If a value is specified, is - in-range, and is not in use, it will be used. If not specified, a value - will be automatically allocated. External systems (e.g. load-balancers) - can use this port to determine if a given node holds endpoints for this - service or not. If this field is specified when creating a Service - which does not need it, creation will fail. This field will be wiped - when updating a Service to no longer need it (e.g. changing type). - This field cannot be updated once set. - format: int32 - type: integer - internalTrafficPolicy: - description: |- - InternalTrafficPolicy describes how nodes distribute service traffic they - receive on the ClusterIP. If set to "Local", the proxy will assume that pods - only want to talk to endpoints of the service on the same node as the pod, - dropping the traffic if there are no local endpoints. The default value, - "Cluster", uses the standard behavior of routing to all endpoints evenly - (possibly modified by topology and other features). - type: string - ipFamilies: - description: |- - IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this - service. This field is usually assigned automatically based on cluster - configuration and the ipFamilyPolicy field. If this field is specified - manually, the requested family is available in the cluster, - and ipFamilyPolicy allows it, it will be used; otherwise creation of - the service will fail. This field is conditionally mutable: it allows - for adding or removing a secondary IP family, but it does not allow - changing the primary IP family of the Service. Valid values are "IPv4" - and "IPv6". This field only applies to Services of types ClusterIP, - NodePort, and LoadBalancer, and does apply to "headless" services. - This field will be wiped when updating a Service to type ExternalName. - - This field may hold a maximum of two entries (dual-stack families, in - either order). These families must correspond to the values of the - clusterIPs field, if specified. Both clusterIPs and ipFamilies are - governed by the ipFamilyPolicy field. - items: - description: |- - IPFamily represents the IP Family (IPv4 or IPv6). This type is used - to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). - type: string - type: array - x-kubernetes-list-type: atomic - ipFamilyPolicy: - description: |- - IPFamilyPolicy represents the dual-stack-ness requested or required by - this Service. If there is no value provided, then this field will be set - to SingleStack. Services can be "SingleStack" (a single IP family), - "PreferDualStack" (two IP families on dual-stack configured clusters or - a single IP family on single-stack clusters), or "RequireDualStack" - (two IP families on dual-stack configured clusters, otherwise fail). The - ipFamilies and clusterIPs fields depend on the value of this field. This - field will be wiped when updating a service to type ExternalName. - type: string - loadBalancerClass: - description: |- - loadBalancerClass is the class of the load balancer implementation this Service belongs to. - If specified, the value of this field must be a label-style identifier, with an optional prefix, - e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. - This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load - balancer implementation is used, today this is typically done through the cloud provider integration, - but should apply for any default implementation. If set, it is assumed that a load balancer - implementation is watching for Services with a matching class. Any default load balancer - implementation (e.g. cloud providers) should ignore Services that set this field. - This field can only be set when creating or updating a Service to type 'LoadBalancer'. - Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. - type: string - loadBalancerIP: - description: |- - Only applies to Service Type: LoadBalancer. - This feature depends on whether the underlying cloud-provider supports specifying - the loadBalancerIP when a load balancer is created. - This field will be ignored if the cloud-provider does not support the feature. - Deprecated: This field was under-specified and its meaning varies across implementations. - Using it is non-portable and it may not support dual-stack. - Users are encouraged to use implementation-specific annotations when available. - type: string - loadBalancerSourceRanges: - description: |- - If specified and supported by the platform, this will restrict traffic through the cloud-provider - load-balancer will be restricted to the specified client IPs. This field will be ignored if the - cloud-provider does not support the feature." - More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ - items: - type: string - type: array - x-kubernetes-list-type: atomic - ports: - description: |- - The list of ports that are exposed by this service. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - items: - description: ServicePort contains information on service's - port. - properties: - appProtocol: - description: |- - The application protocol for this port. - This is used as a hint for implementations to offer richer behavior for protocols that they understand. - This field follows standard Kubernetes label syntax. - Valid values are either: - - * Un-prefixed protocol names - reserved for IANA standard service names (as per - RFC-6335 and https://www.iana.org/assignments/service-names). - - * Kubernetes-defined prefixed names: - * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- - * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 - * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 - - * Other protocols should use implementation-defined prefixed names such as - mycompany.com/my-custom-protocol. - type: string - name: - description: |- - The name of this port within the service. This must be a DNS_LABEL. - All ports within a ServiceSpec must have unique names. When considering - the endpoints for a Service, this must match the 'name' field in the - EndpointPort. - Optional if only one ServicePort is defined on this service. - type: string - nodePort: - description: |- - The port on each node on which this service is exposed when type is - NodePort or LoadBalancer. Usually assigned by the system. If a value is - specified, in-range, and not in use it will be used, otherwise the - operation will fail. If not specified, a port will be allocated if this - Service requires one. If this field is specified when creating a - Service which does not need it, creation will fail. This field will be - wiped when updating a Service to no longer need it (e.g. changing type - from NodePort to ClusterIP). - More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - format: int32 - type: integer - port: - description: The port that will be exposed by this service. - format: int32 - type: integer - protocol: - default: TCP - description: |- - The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". - Default is TCP. - type: string - targetPort: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the pods targeted by the service. - Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - If this is a string, it will be looked up as a named port in the - target Pod's container ports. If this is not specified, the value - of the 'port' field is used (an identity map). - This field is ignored for services with clusterIP=None, and should be - omitted or set equal to the 'port' field. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service - x-kubernetes-int-or-string: true - required: - - port - type: object - type: array - x-kubernetes-list-map-keys: - - port - - protocol - x-kubernetes-list-type: map - publishNotReadyAddresses: - description: |- - publishNotReadyAddresses indicates that any agent which deals with endpoints for this - Service should disregard any indications of ready/not-ready. - The primary use case for setting this field is for a StatefulSet's Headless Service to - propagate SRV DNS records for its Pods for the purpose of peer discovery. - The Kubernetes controllers that generate Endpoints and EndpointSlice resources for - Services interpret this to mean that all endpoints are considered "ready" even if the - Pods themselves are not. Agents which consume only Kubernetes generated endpoints - through the Endpoints or EndpointSlice resources can safely assume this behavior. - type: boolean - selector: - additionalProperties: - type: string - description: |- - Route service traffic to pods with label keys and values matching this - selector. If empty or not present, the service is assumed to have an - external process managing its endpoints, which Kubernetes will not - modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - Ignored if type is ExternalName. - More info: https://kubernetes.io/docs/concepts/services-networking/service/ - type: object - x-kubernetes-map-type: atomic - sessionAffinity: - description: |- - Supports "ClientIP" and "None". Used to maintain session affinity. - Enable client IP based session affinity. - Must be ClientIP or None. - Defaults to None. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - type: string - sessionAffinityConfig: - description: sessionAffinityConfig contains the configurations - of session affinity. - properties: - clientIP: - description: clientIP contains the configurations of Client - IP based session affinity. - properties: - timeoutSeconds: - description: |- - timeoutSeconds specifies the seconds of ClientIP type session sticky time. - The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". - Default value is 10800(for 3 hours). - format: int32 - type: integer - type: object - type: object - trafficDistribution: - description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. - type: string - type: - description: |- - type determines how the Service is exposed. Defaults to ClusterIP. Valid - options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - "ClusterIP" allocates a cluster-internal IP address for load-balancing - to endpoints. Endpoints are determined by the selector or if that is not - specified, by manual construction of an Endpoints object or - EndpointSlice objects. If clusterIP is "None", no virtual IP is - allocated and the endpoints are published as a set of endpoints rather - than a virtual IP. - "NodePort" builds on ClusterIP and allocates a port on every node which - routes to the same endpoints as the clusterIP. - "LoadBalancer" builds on NodePort and creates an external load-balancer - (if supported in the current cloud) which routes to the same endpoints - as the clusterIP. - "ExternalName" aliases this service to the specified externalName. - Several other fields do not apply to ExternalName services. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types - type: string - type: object - type: object - template: - description: The template of the Pod to be created - properties: - metadata: - description: |- - Standard object's metadata. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations - type: object - labels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: http://kubernetes.io/docs/user-guide/labels - type: object - name: - description: The name of the resource. Only supported for - certain types - type: string - type: object - spec: - description: |- - Specification of the desired behavior of the pod. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - activeDeadlineSeconds: - description: |- - Optional duration in seconds the pod may be active on the node relative to - StartTime before the system will actively try to mark it failed and kill associated containers. - Value must be a positive integer. - format: int64 - type: integer - affinity: - description: If specified, the pod's scheduling constraints - properties: - nodeAffinity: - description: Describes node affinity scheduling rules - for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated - with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - weight: - description: Weight associated with matching - the corresponding nodeSelectorTerm, in the - range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector - terms. The terms are ORed. - items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-list-type: atomic - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. - co-locate this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, - associated with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules - (e.g. avoid putting this pod in the same node, zone, - etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, - associated with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - type: object - automountServiceAccountToken: - description: AutomountServiceAccountToken indicates whether - a service account token should be automatically mounted. - type: boolean - containers: - description: |- - List of containers belonging to the pod. - Containers cannot currently be added or removed. - There must be at least one container in a Pod. - Cannot be updated. - items: - description: A single application container that you want - to run within a pod. - properties: - args: - description: |- - Arguments to the entrypoint. - The container image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - command: - description: |- - Entrypoint array. Not executed within a shell. - The container image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in - the pod's namespace - properties: - key: - description: The key of the secret to - select from. Must be a valid secret - key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of - a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - x-kubernetes-list-type: atomic - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: |- - Actions that the management system should take in response to container lifecycle events. - Cannot be updated. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: |- - Periodic probe of container liveness. - Container will be restarted if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. - type: string - ports: - description: |- - List of ports to expose from the container. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port which is - listening on the default "0.0.0.0" address inside a container will be - accessible from the network. - Modifying this array with strategic merge patch may corrupt the data. - For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. - items: - description: ContainerPort represents a network port - in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external - port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: |- - Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Compute Resources required by this container. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: - this init container will be continually restarted on - exit until all regular containers have terminated. Once all regular - containers have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init containers and - is often referred to as a "sidecar" container. Although this init - container still starts in the init container sequence, it does not wait - for the container to complete before proceeding to the next init - container. Instead, the next init container starts immediately after this - init container is started, or after any startupProbe has successfully - completed. - type: string - securityContext: - description: |- - SecurityContext defines the security options the container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by this container. If set, this profile - overrides the pod's appArmorProfile. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default value is Default which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that - applies to the container. - type: string - role: - description: Role is a SELinux role label that - applies to the container. - type: string - type: - description: Type is a SELinux type label that - applies to the container. - type: string - user: - description: User is a SELinux user label that - applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: |- - StartupProbe indicates that the Pod has successfully initialized. - If specified, no other probes are executed until this completes successfully. - If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - when it might take a long time to load data or warm a cache, than during steady-state operation. - This cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a - raw block device within a container. - properties: - devicePath: - description: devicePath is the path inside of - the container that the device will be mapped - to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - devicePath - x-kubernetes-list-type: map - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a - Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified - (which defaults to None). - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - recursiveReadOnly: - description: |- - RecursiveReadOnly specifies whether read-only mounts should be handled - recursively. - - If ReadOnly is false, this field has no meaning and must be unspecified. - - If ReadOnly is true, and this field is set to Disabled, the mount is not made - recursively read-only. If this field is set to IfPossible, the mount is made - recursively read-only, if it is supported by the container runtime. If this - field is set to Enabled, the mount is made recursively read-only if it is - supported by the container runtime, otherwise the pod will not be started and - an error will be generated to indicate the reason. - - If this field is set to IfPossible or Enabled, MountPropagation must be set to - None (or be unspecified, which defaults to None). - - If this field is not specified, it is treated as an equivalent of Disabled. - type: string - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - mountPath - x-kubernetes-list-type: map - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - dnsConfig: - description: |- - Specifies the DNS parameters of a pod. - Parameters specified here will be merged to the generated DNS - configuration based on DNSPolicy. - properties: - nameservers: - description: |- - A list of DNS name server IP addresses. - This will be appended to the base nameservers generated from DNSPolicy. - Duplicated nameservers will be removed. - items: - type: string - type: array - x-kubernetes-list-type: atomic - options: - description: |- - A list of DNS resolver options. - This will be merged with the base options generated from DNSPolicy. - Duplicated entries will be removed. Resolution options given in Options - will override those that appear in the base DNSPolicy. - items: - description: PodDNSConfigOption defines DNS resolver - options of a pod. - properties: - name: - description: Required. - type: string - value: - type: string - type: object - type: array - x-kubernetes-list-type: atomic - searches: - description: |- - A list of DNS search domains for host-name lookup. - This will be appended to the base search paths generated from DNSPolicy. - Duplicated search paths will be removed. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - dnsPolicy: - description: |- - Set DNS policy for the pod. - Defaults to "ClusterFirst". - Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - To have DNS options set along with hostNetwork, you have to specify DNS policy - explicitly to 'ClusterFirstWithHostNet'. - type: string - enableServiceLinks: - description: |- - EnableServiceLinks indicates whether information about services should be injected into pod's - environment variables, matching the syntax of Docker links. - Optional: Defaults to true. - type: boolean - ephemeralContainers: - description: |- - List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing - pod to perform user-initiated actions such as debugging. This list cannot be specified when - creating a pod, and it cannot be modified by updating the pod spec. In order to add an - ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - items: - description: |- - An EphemeralContainer is a temporary container that you may add to an existing Pod for - user-initiated activities such as debugging. Ephemeral containers have no resource or - scheduling guarantees, and they will not be restarted when they exit or when a Pod is - removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the - Pod to exceed its resource allocation. - - To add an ephemeral container, use the ephemeralcontainers subresource of an existing - Pod. Ephemeral containers may not be removed or restarted. - properties: - args: - description: |- - Arguments to the entrypoint. - The image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - command: - description: |- - Entrypoint array. Not executed within a shell. - The image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in - the pod's namespace - properties: - key: - description: The key of the secret to - select from. Must be a valid secret - key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of - a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - x-kubernetes-list-type: atomic - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: Lifecycle is not allowed for ephemeral - containers. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the ephemeral container specified as a DNS_LABEL. - This name must be unique among all containers, init containers and ephemeral containers. - type: string - ports: - description: Ports are not allowed for ephemeral containers. - items: - description: ContainerPort represents a network port - in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external - port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources - already allocated to the pod. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - Restart policy for the container to manage the restart behavior of each - container within a pod. - This may only be set for init containers. You cannot set this field on - ephemeral containers. - type: string - securityContext: - description: |- - Optional: SecurityContext defines the security options the ephemeral container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by this container. If set, this profile - overrides the pod's appArmorProfile. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default value is Default which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that - applies to the container. - type: string - role: - description: Role is a SELinux role label that - applies to the container. - type: string - type: - description: Type is a SELinux type label that - applies to the container. - type: string - user: - description: User is a SELinux user label that - applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - targetContainerName: - description: |- - If set, the name of the container from PodSpec that this ephemeral container targets. - The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - If not set then the ephemeral container uses the namespaces configured in the Pod spec. - - The container runtime must implement support for this feature. If the runtime does not - support namespace targeting then the result of setting this field is undefined. - type: string - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a - raw block device within a container. - properties: - devicePath: - description: devicePath is the path inside of - the container that the device will be mapped - to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - devicePath - x-kubernetes-list-type: map - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a - Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified - (which defaults to None). - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - recursiveReadOnly: - description: |- - RecursiveReadOnly specifies whether read-only mounts should be handled - recursively. - - If ReadOnly is false, this field has no meaning and must be unspecified. - - If ReadOnly is true, and this field is set to Disabled, the mount is not made - recursively read-only. If this field is set to IfPossible, the mount is made - recursively read-only, if it is supported by the container runtime. If this - field is set to Enabled, the mount is made recursively read-only if it is - supported by the container runtime, otherwise the pod will not be started and - an error will be generated to indicate the reason. - - If this field is set to IfPossible or Enabled, MountPropagation must be set to - None (or be unspecified, which defaults to None). - - If this field is not specified, it is treated as an equivalent of Disabled. - type: string - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - mountPath - x-kubernetes-list-type: map - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - hostAliases: - description: |- - HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - file if specified. - items: - description: |- - HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the - pod's hosts file. - properties: - hostnames: - description: Hostnames for the above IP address. - items: - type: string - type: array - x-kubernetes-list-type: atomic - ip: - description: IP address of the host file entry. - type: string - required: - - ip - type: object - type: array - x-kubernetes-list-map-keys: - - ip - x-kubernetes-list-type: map - hostIPC: - description: |- - Use the host's ipc namespace. - Optional: Default to false. - type: boolean - hostNetwork: - description: |- - Host networking requested for this pod. Use the host's network namespace. - If this option is set, the ports that will be used must be specified. - Default to false. - type: boolean - hostPID: - description: |- - Use the host's pid namespace. - Optional: Default to false. - type: boolean - hostUsers: - description: |- - Use the host's user namespace. - Optional: Default to true. - If set to true or not present, the pod will be run in the host user namespace, useful - for when the pod needs a feature only available to the host user namespace, such as - loading a kernel module with CAP_SYS_MODULE. - When set to false, a new userns is created for the pod. Setting false is useful for - mitigating container breakout vulnerabilities even allowing users to run their - containers as root without actually having root privileges on the host. - This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. - type: boolean - hostname: - description: |- - Specifies the hostname of the Pod - If not specified, the pod's hostname will be set to a system-defined value. - type: string - imagePullSecrets: - description: |- - ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - If specified, these secrets will be passed to individual puller implementations for them to use. - More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - initContainers: - description: |- - List of initialization containers belonging to the pod. - Init containers are executed in order prior to containers being started. If any - init container fails, the pod is considered to have failed and is handled according - to its restartPolicy. The name for an init container or normal container must be - unique among all containers. - Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. - The resourceRequirements of an init container are taken into account during scheduling - by finding the highest request/limit for each resource type, and then using the max of - of that value or the sum of the normal containers. Limits are applied to init containers - in a similar fashion. - Init containers cannot currently be added or removed. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - items: - description: A single application container that you want - to run within a pod. - properties: - args: - description: |- - Arguments to the entrypoint. - The container image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - command: - description: |- - Entrypoint array. Not executed within a shell. - The container image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in - the pod's namespace - properties: - key: - description: The key of the secret to - select from. Must be a valid secret - key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of - a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - x-kubernetes-list-type: atomic - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: |- - Actions that the management system should take in response to container lifecycle events. - Cannot be updated. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: |- - Periodic probe of container liveness. - Container will be restarted if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. - type: string - ports: - description: |- - List of ports to expose from the container. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port which is - listening on the default "0.0.0.0" address inside a container will be - accessible from the network. - Modifying this array with strategic merge patch may corrupt the data. - For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. - items: - description: ContainerPort represents a network port - in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external - port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: |- - Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Compute Resources required by this container. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: - this init container will be continually restarted on - exit until all regular containers have terminated. Once all regular - containers have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init containers and - is often referred to as a "sidecar" container. Although this init - container still starts in the init container sequence, it does not wait - for the container to complete before proceeding to the next init - container. Instead, the next init container starts immediately after this - init container is started, or after any startupProbe has successfully - completed. - type: string - securityContext: - description: |- - SecurityContext defines the security options the container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by this container. If set, this profile - overrides the pod's appArmorProfile. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default value is Default which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that - applies to the container. - type: string - role: - description: Role is a SELinux role label that - applies to the container. - type: string - type: - description: Type is a SELinux type label that - applies to the container. - type: string - user: - description: User is a SELinux user label that - applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: |- - StartupProbe indicates that the Pod has successfully initialized. - If specified, no other probes are executed until this completes successfully. - If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - when it might take a long time to load data or warm a cache, than during steady-state operation. - This cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a - raw block device within a container. - properties: - devicePath: - description: devicePath is the path inside of - the container that the device will be mapped - to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - devicePath - x-kubernetes-list-type: map - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a - Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified - (which defaults to None). - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - recursiveReadOnly: - description: |- - RecursiveReadOnly specifies whether read-only mounts should be handled - recursively. - - If ReadOnly is false, this field has no meaning and must be unspecified. - - If ReadOnly is true, and this field is set to Disabled, the mount is not made - recursively read-only. If this field is set to IfPossible, the mount is made - recursively read-only, if it is supported by the container runtime. If this - field is set to Enabled, the mount is made recursively read-only if it is - supported by the container runtime, otherwise the pod will not be started and - an error will be generated to indicate the reason. - - If this field is set to IfPossible or Enabled, MountPropagation must be set to - None (or be unspecified, which defaults to None). - - If this field is not specified, it is treated as an equivalent of Disabled. - type: string - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - mountPath - x-kubernetes-list-type: map - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - nodeName: - description: |- - NodeName indicates in which node this pod is scheduled. - If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. - Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. - This field should not be used to express a desire for the pod to be scheduled on a specific node. - https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename - type: string - nodeSelector: - additionalProperties: - type: string - description: |- - NodeSelector is a selector which must be true for the pod to fit on a node. - Selector which must match a node's labels for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - type: object - x-kubernetes-map-type: atomic - os: - description: |- - Specifies the OS of the containers in the pod. - Some pod and container fields are restricted if this is set. - - If the OS field is set to linux, the following fields must be unset: - -securityContext.windowsOptions - - If the OS field is set to windows, following fields must be unset: - - spec.hostPID - - spec.hostIPC - - spec.hostUsers - - spec.securityContext.appArmorProfile - - spec.securityContext.seLinuxOptions - - spec.securityContext.seccompProfile - - spec.securityContext.fsGroup - - spec.securityContext.fsGroupChangePolicy - - spec.securityContext.sysctls - - spec.shareProcessNamespace - - spec.securityContext.runAsUser - - spec.securityContext.runAsGroup - - spec.securityContext.supplementalGroups - - spec.securityContext.supplementalGroupsPolicy - - spec.containers[*].securityContext.appArmorProfile - - spec.containers[*].securityContext.seLinuxOptions - - spec.containers[*].securityContext.seccompProfile - - spec.containers[*].securityContext.capabilities - - spec.containers[*].securityContext.readOnlyRootFilesystem - - spec.containers[*].securityContext.privileged - - spec.containers[*].securityContext.allowPrivilegeEscalation - - spec.containers[*].securityContext.procMount - - spec.containers[*].securityContext.runAsUser - - spec.containers[*].securityContext.runAsGroup - properties: - name: - description: |- - Name is the name of the operating system. The currently supported values are linux and windows. - Additional value may be defined in future and can be one of: - https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration - Clients should expect to handle additional values and treat unrecognized values in this field as os: null - type: string - required: - - name - type: object - overhead: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. - This field will be autopopulated at admission time by the RuntimeClass admission controller. If - the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. - The RuntimeClass admission controller will reject Pod create requests which have the overhead already - set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value - defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. - More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - type: object - preemptionPolicy: - description: |- - PreemptionPolicy is the Policy for preempting pods with lower priority. - One of Never, PreemptLowerPriority. - Defaults to PreemptLowerPriority if unset. - type: string - priority: - description: |- - The priority value. Various system components use this field to find the - priority of the pod. When Priority Admission Controller is enabled, it - prevents users from setting this field. The admission controller populates - this field from PriorityClassName. - The higher the value, the higher the priority. - format: int32 - type: integer - priorityClassName: - description: |- - If specified, indicates the pod's priority. "system-node-critical" and - "system-cluster-critical" are two special keywords which indicate the - highest priorities with the former being the highest priority. Any other - name must be defined by creating a PriorityClass object with that name. - If not specified, the pod priority will be default or zero if there is no - default. - type: string - readinessGates: - description: |- - If specified, all readiness gates will be evaluated for pod readiness. - A pod is ready when all its containers are ready AND - all conditions specified in the readiness gates have status equal to "True" - More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates - items: - description: PodReadinessGate contains the reference to - a pod condition - properties: - conditionType: - description: ConditionType refers to a condition in - the pod's condition list with matching type. - type: string - required: - - conditionType - type: object - type: array - x-kubernetes-list-type: atomic - resourceClaims: - description: |- - ResourceClaims defines which ResourceClaims must be allocated - and reserved before the Pod is allowed to start. The resources - will be made available to those containers which consume them - by name. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. - items: - description: |- - PodResourceClaim references exactly one ResourceClaim, either directly - or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim - for the pod. - - It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. - Containers that need access to the ResourceClaim reference it with this name. - properties: - name: - description: |- - Name uniquely identifies this resource claim inside the pod. - This must be a DNS_LABEL. - type: string - resourceClaimName: - description: |- - ResourceClaimName is the name of a ResourceClaim object in the same - namespace as this pod. - - Exactly one of ResourceClaimName and ResourceClaimTemplateName must - be set. - type: string - resourceClaimTemplateName: - description: |- - ResourceClaimTemplateName is the name of a ResourceClaimTemplate - object in the same namespace as this pod. - - The template will be used to create a new ResourceClaim, which will - be bound to this pod. When this pod is deleted, the ResourceClaim - will also be deleted. The pod name and resource name, along with a - generated component, will be used to form a unique name for the - ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - - This field is immutable and no changes will be made to the - corresponding ResourceClaim by the control plane after creating the - ResourceClaim. - - Exactly one of ResourceClaimName and ResourceClaimTemplateName must - be set. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - restartPolicy: - description: |- - Restart policy for all containers within the pod. - One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. - Default to Always. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy - type: string - runtimeClassName: - description: |- - RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used - to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. - If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an - empty definition that uses the default runtime handler. - More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class - type: string - schedulerName: - description: |- - If specified, the pod will be dispatched by specified scheduler. - If not specified, the pod will be dispatched by default scheduler. - type: string - schedulingGates: - description: |- - SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the - scheduler will not attempt to schedule the pod. - - SchedulingGates can only be set at pod creation time, and be removed only afterwards. - items: - description: PodSchedulingGate is associated to a Pod to - guard its scheduling. - properties: - name: - description: |- - Name of the scheduling gate. - Each scheduling gate must have a unique name field. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - securityContext: - description: |- - SecurityContext holds pod-level security attributes and common container settings. - Optional: Defaults to empty. See type description for default values of each field. - properties: - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - fsGroup: - description: |- - A special supplemental group that applies to all containers in a pod. - Some volume types allow the Kubelet to change the ownership of that volume - to be owned by the pod: - - 1. The owning GID will be the FSGroup - 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - 3. The permission bits are OR'd with rw-rw---- - - If unset, the Kubelet will not modify the ownership and permissions of any volume. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - fsGroupChangePolicy: - description: |- - fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - before being exposed inside Pod. This field will only apply to - volume types which support fsGroup based ownership(and permissions). - It will have no effect on ephemeral volume types such as: secret, configmaps - and emptydir. - Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. - Note that this field cannot be set when spec.os.name is windows. - type: string - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in SecurityContext. If set in - both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - supplementalGroups: - description: |- - A list of groups applied to the first process run in each container, in - addition to the container's primary GID and fsGroup (if specified). If - the SupplementalGroupsPolicy feature is enabled, the - supplementalGroupsPolicy field determines whether these are in addition - to or instead of any group memberships defined in the container image. - If unspecified, no additional groups are added, though group memberships - defined in the container image may still be used, depending on the - supplementalGroupsPolicy field. - Note that this field cannot be set when spec.os.name is windows. - items: - format: int64 - type: integer - type: array - x-kubernetes-list-type: atomic - supplementalGroupsPolicy: - description: |- - Defines how supplemental groups of the first container processes are calculated. - Valid values are "Merge" and "Strict". If not specified, "Merge" is used. - (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled - and the container runtime must implement support for this feature. - Note that this field cannot be set when spec.os.name is windows. - type: string - sysctls: - description: |- - Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - sysctls (by the container runtime) might fail to launch. - Note that this field cannot be set when spec.os.name is windows. - items: - description: Sysctl defines a kernel parameter to be - set - properties: - name: - description: Name of a property to set - type: string - value: - description: Value of a property to set - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of - the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - serviceAccount: - description: |- - DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. - Deprecated: Use serviceAccountName instead. - type: string - serviceAccountName: - description: |- - ServiceAccountName is the name of the ServiceAccount to use to run this pod. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - type: string - setHostnameAsFQDN: - description: |- - If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). - In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). - In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. - If a pod does not have FQDN, this has no effect. - Default to false. - type: boolean - shareProcessNamespace: - description: |- - Share a single process namespace between all of the containers in a pod. - When this is set containers will be able to view and signal processes from other containers - in the same pod, and the first process in each container will not be assigned PID 1. - HostPID and ShareProcessNamespace cannot both be set. - Optional: Default to false. - type: boolean - subdomain: - description: |- - If specified, the fully qualified Pod hostname will be "...svc.". - If not specified, the pod will not have a domainname at all. - type: string - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - If this value is nil, the default grace period will be used instead. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - Defaults to 30 seconds. - format: int64 - type: integer - tolerations: - description: If specified, the pod's tolerations. - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - x-kubernetes-list-type: atomic - topologySpreadConstraints: - description: |- - TopologySpreadConstraints describes how a group of pods ought to spread across topology - domains. Scheduler will schedule pods in a way which abides by the constraints. - All topologySpreadConstraints are ANDed. - items: - description: TopologySpreadConstraint specifies how to spread - matching pods among the given topology. - properties: - labelSelector: - description: |- - LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine the number of pods - in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select the pods over which - spreading will be calculated. The keys are used to lookup values from the - incoming pod labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading will be calculated - for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't set. - Keys that don't exist in the incoming pod labels will - be ignored. A null or empty list means only match against labelSelector. - - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - description: |- - MaxSkew describes the degree to which pods may be unevenly distributed. - When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference - between the number of matching pods in the target topology and the global minimum. - The global minimum is the minimum number of matching pods in an eligible domain - or zero if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 2/2/1: - In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | - | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; - scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence - to topologies that satisfy it. - It's a required field. Default value is 1 and 0 is not allowed. - format: int32 - type: integer - minDomains: - description: |- - MinDomains indicates a minimum number of eligible domains. - When the number of eligible domains with matching topology keys is less than minDomains, - Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. - And when the number of eligible domains with matching topology keys equals or greater than minDomains, - this value has no effect on scheduling. - As a result, when the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to those domains. - If value is nil, the constraint behaves as if MinDomains is equal to 1. - Valid values are integers greater than 0. - When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | - | P P | P P | P P | - The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. - In this situation, new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. - format: int32 - type: integer - nodeAffinityPolicy: - description: |- - NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector - when calculating pod topology spread skew. Options are: - - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - - If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - nodeTaintsPolicy: - description: |- - NodeTaintsPolicy indicates how we will treat node taints when calculating - pod topology spread skew. Options are: - - Honor: nodes without taints, along with tainted nodes for which the incoming pod - has a toleration, are included. - - Ignore: node taints are ignored. All nodes are included. - - If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - topologyKey: - description: |- - TopologyKey is the key of node labels. Nodes that have a label with this key - and identical values are considered to be in the same topology. - We consider each as a "bucket", and try to put balanced number - of pods into each bucket. - We define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose nodes meet the requirements of - nodeAffinityPolicy and nodeTaintsPolicy. - e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. - And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. - It's a required field. - type: string - whenUnsatisfiable: - description: |- - WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule it. - - ScheduleAnyway tells the scheduler to schedule the pod in any location, - but giving higher precedence to topologies that would help reduce the - skew. - A constraint is considered "Unsatisfiable" for an incoming pod - if and only if every possible node assignment for that pod would violate - "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 3/1/1: - | zone1 | zone2 | zone3 | - | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - won't make it *more* imbalanced. - It's a required field. - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - x-kubernetes-list-map-keys: - - topologyKey - - whenUnsatisfiable - x-kubernetes-list-type: map - volumes: - description: |- - List of volumes that can be mounted by containers belonging to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes - items: - description: Volume represents a named volume in a pod that - may be accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: |- - awsElasticBlockStore represents an AWS Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - properties: - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: string - partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - format: int32 - type: integer - readOnly: - description: |- - readOnly value true will force the readOnly setting in VolumeMounts. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: boolean - volumeID: - description: |- - volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: string - required: - - volumeID - type: object - azureDisk: - description: azureDisk represents an Azure Data Disk - mount on the host and bind mount to the pod. - properties: - cachingMode: - description: 'cachingMode is the Host Caching mode: - None, Read Only, Read Write.' - type: string - diskName: - description: diskName is the Name of the data disk - in the blob storage - type: string - diskURI: - description: diskURI is the URI of data disk in - the blob storage - type: string - fsType: - default: ext4 - description: |- - fsType is Filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - kind: - description: 'kind expected values are Shared: multiple - blob disks per storage account Dedicated: single - blob disk per storage account Managed: azure - managed data disk (only in managed availability - set). defaults to shared' - type: string - readOnly: - default: false - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: azureFile represents an Azure File Service - mount on the host and bind mount to the pod. - properties: - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: secretName is the name of secret that - contains Azure Storage Account Name and Key - type: string - shareName: - description: shareName is the azure share Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: cephFS represents a Ceph FS mount on the - host that shares a pod's lifetime - properties: - monitors: - description: |- - monitors is Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - items: - type: string - type: array - x-kubernetes-list-type: atomic - path: - description: 'path is Optional: Used as the mounted - root, rather than the full Ceph tree, default - is /' - type: string - readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: boolean - secretFile: - description: |- - secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: string - secretRef: - description: |- - secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: |- - user is optional: User is the rados user name, default is admin - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: string - required: - - monitors - type: object - cinder: - description: |- - cinder represents a cinder volume attached and mounted on kubelets host machine. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: boolean - secretRef: - description: |- - secretRef is optional: points to a secret object containing parameters used to connect - to OpenStack. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - volumeID: - description: |- - volumeID used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: string - required: - - volumeID - type: object - configMap: - description: configMap represents a configMap that should - populate this volume - properties: - defaultMode: - description: |- - defaultMode is optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional specify whether the ConfigMap - or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - csi: - description: csi (Container Storage Interface) represents - ephemeral storage that is handled by certain external - CSI drivers (Beta feature). - properties: - driver: - description: |- - driver is the name of the CSI driver that handles this volume. - Consult with your admin for the correct name as registered in the cluster. - type: string - fsType: - description: |- - fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated CSI driver - which will determine the default filesystem to apply. - type: string - nodePublishSecretRef: - description: |- - nodePublishSecretRef is a reference to the secret object containing - sensitive information to pass to the CSI driver to complete the CSI - NodePublishVolume and NodeUnpublishVolume calls. - This field is optional, and may be empty if no secret is required. If the - secret object contains more than one secret, all secret references are passed. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - readOnly: - description: |- - readOnly specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: |- - volumeAttributes stores driver-specific properties that are passed to the CSI - driver. Consult your driver's documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: downwardAPI represents downward API about - the pod that should populate this volume - properties: - defaultMode: - description: |- - Optional: mode bits to use on created files by default. Must be a - Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: Items is a list of downward API volume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing the - pod field - properties: - fieldRef: - description: 'Required: Selects a field of - the pod: only annotations, labels, name, - namespace and uid are supported.' - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' path. - Must be utf-8 encoded. The first item of - the relative path must not start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - x-kubernetes-list-type: atomic - type: object - emptyDir: - description: |- - emptyDir represents a temporary directory that shares a pod's lifetime. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - properties: - medium: - description: |- - medium represents what type of storage medium should back this directory. - The default is "" which means to use the node's default medium. - Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - description: |- - sizeLimit is the total amount of local storage required for this EmptyDir volume. - The size limit is also applicable for memory medium. - The maximum usage on memory medium EmptyDir would be the minimum value between - the SizeLimit specified here and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - description: |- - ephemeral represents a volume that is handled by a cluster storage driver. - The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, - and deleted when the pod is removed. - - Use this if: - a) the volume is only needed while the pod runs, - b) features of normal volumes like restoring from snapshot or capacity - tracking are needed, - c) the storage driver is specified through a storage class, and - d) the storage driver supports dynamic volume provisioning through - a PersistentVolumeClaim (see EphemeralVolumeSource for more - information on the connection between this volume type - and PersistentVolumeClaim). - - Use PersistentVolumeClaim or one of the vendor-specific - APIs for volumes that persist for longer than the lifecycle - of an individual pod. - - Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to - be used that way - see the documentation of the driver for - more information. - - A pod can use both types of ephemeral volumes and - persistent volumes at the same time. - properties: - volumeClaimTemplate: - description: |- - Will be used to create a stand-alone PVC to provision the volume. - The pod in which this EphemeralVolumeSource is embedded will be the - owner of the PVC, i.e. the PVC will be deleted together with the - pod. The name of the PVC will be `-` where - `` is the name from the `PodSpec.Volumes` array - entry. Pod validation will reject the pod if the concatenated name - is not valid for a PVC (for example, too long). - - An existing PVC with that name that is not owned by the pod - will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC is - meant to be used by the pod, the PVC has to updated with an - owner reference to the pod once the pod exists. Normally - this should not be necessary, but it may be useful when - manually reconstructing a broken cluster. - - This field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. - - Required, must not be nil. - properties: - metadata: - description: |- - May contain labels and annotations that will be copied into the PVC - when creating it. No other fields are allowed and will be rejected during - validation. - type: object - spec: - description: |- - The specification for the PersistentVolumeClaim. The entire content is - copied unchanged into the PVC that gets created from this - template. The same fields as in a PersistentVolumeClaim - are also valid here. - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - x-kubernetes-list-type: atomic - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource - being referenced - type: string - name: - description: Name is the name of resource - being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource - being referenced - type: string - name: - description: Name is the name of resource - being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over - volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference - to the PersistentVolume backing this claim. - type: string - type: object - required: - - spec - type: object - type: object - fc: - description: fc represents a Fibre Channel resource - that is attached to a kubelet's host machine and then - exposed to the pod. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - lun: - description: 'lun is Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - targetWWNs: - description: 'targetWWNs is Optional: FC target - worldwide names (WWNs)' - items: - type: string - type: array - x-kubernetes-list-type: atomic - wwids: - description: |- - wwids Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - flexVolume: - description: |- - flexVolume represents a generic volume resource that is - provisioned/attached using an exec based plugin. - properties: - driver: - description: driver is the name of the driver to - use for this volume. - type: string - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - type: string - options: - additionalProperties: - type: string - description: 'options is Optional: this field holds - extra command options if any.' - type: object - readOnly: - description: |- - readOnly is Optional: defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef is Optional: secretRef is reference to the secret object containing - sensitive information to pass to the plugin scripts. This may be - empty if no secret object is specified. If the secret object - contains more than one secret, all secrets are passed to the plugin - scripts. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - required: - - driver - type: object - flocker: - description: flocker represents a Flocker volume attached - to a kubelet's host machine. This depends on the Flocker - control service being running - properties: - datasetName: - description: |- - datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker - should be considered as deprecated - type: string - datasetUUID: - description: datasetUUID is the UUID of the dataset. - This is unique identifier of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: |- - gcePersistentDisk represents a GCE Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - properties: - fsType: - description: |- - fsType is filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: string - partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - format: int32 - type: integer - pdName: - description: |- - pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: string - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: boolean - required: - - pdName - type: object - gitRepo: - description: |- - gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an - EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir - into the Pod's container. - properties: - directory: - description: |- - directory is the target directory name. - Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - git repository. Otherwise, if specified, the volume will contain the git repository in - the subdirectory with the given name. - type: string - repository: - description: repository is the URL - type: string - revision: - description: revision is the commit hash for the - specified revision. - type: string - required: - - repository - type: object - glusterfs: - description: |- - glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md - properties: - endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: string - path: - description: |- - path is the Glusterfs volume path. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: string - readOnly: - description: |- - readOnly here will force the Glusterfs volume to be mounted with read-only permissions. - Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: |- - hostPath represents a pre-existing file or directory on the host - machine that is directly exposed to the container. This is generally - used for system agents or other privileged things that are allowed - to see the host machine. Most containers will NOT need this. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - properties: - path: - description: |- - path of the directory on the host. - If the path is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - type: - description: |- - type for HostPath Volume - Defaults to "" - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - required: - - path - type: object - image: - description: |- - image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. - The volume is resolved at pod startup depending on which PullPolicy value is provided: - - - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. - - The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. - A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. - The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. - The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. - The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). - The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. - properties: - pullPolicy: - description: |- - Policy for pulling OCI objects. Possible values are: - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - type: string - reference: - description: |- - Required: Image or artifact reference to be used. - Behaves in the same way as pod.spec.containers[*].image. - Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - type: object - iscsi: - description: |- - iscsi represents an ISCSI Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md - properties: - chapAuthDiscovery: - description: chapAuthDiscovery defines whether support - iSCSI Discovery CHAP authentication - type: boolean - chapAuthSession: - description: chapAuthSession defines whether support - iSCSI Session CHAP authentication - type: boolean - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - type: string - initiatorName: - description: |- - initiatorName is the custom iSCSI Initiator Name. - If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - : will be created for the connection. - type: string - iqn: - description: iqn is the target iSCSI Qualified Name. - type: string - iscsiInterface: - default: default - description: |- - iscsiInterface is the interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). - type: string - lun: - description: lun represents iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: |- - portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). - items: - type: string - type: array - x-kubernetes-list-type: atomic - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - type: boolean - secretRef: - description: secretRef is the CHAP Secret for iSCSI - target and initiator authentication - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - targetPortal: - description: |- - targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: |- - name of the volume. - Must be a DNS_LABEL and unique within the pod. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - nfs: - description: |- - nfs represents an NFS mount on the host that shares a pod's lifetime - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - properties: - path: - description: |- - path that is exported by the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: string - readOnly: - description: |- - readOnly here will force the NFS export to be mounted with read-only permissions. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: boolean - server: - description: |- - server is the hostname or IP address of the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: |- - persistentVolumeClaimVolumeSource represents a reference to a - PersistentVolumeClaim in the same namespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - properties: - claimName: - description: |- - claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - type: string - readOnly: - description: |- - readOnly Will force the ReadOnly setting in VolumeMounts. - Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host - machine - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: pdID is the ID that identifies Photon - Controller persistent disk - type: string - required: - - pdID - type: object - portworxVolume: - description: portworxVolume represents a portworx volume - attached and mounted on kubelets host machine - properties: - fsType: - description: |- - fSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: volumeID uniquely identifies a Portworx - volume - type: string - required: - - volumeID - type: object - projected: - description: projected items for all in one resources - secrets, configmaps, and downward API - properties: - defaultMode: - description: |- - defaultMode are the mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: |- - sources is the list of volume projections. Each entry in this list - handles one source. - items: - description: |- - Projection that may be projected along with other supported volume types. - Exactly one of these fields must be set. - properties: - clusterTrustBundle: - description: |- - ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field - of ClusterTrustBundle objects in an auto-updating file. - - Alpha, gated by the ClusterTrustBundleProjection feature gate. - - ClusterTrustBundle objects can either be selected by name, or by the - combination of signer name and a label selector. - - Kubelet performs aggressive normalization of the PEM contents written - into the pod filesystem. Esoteric PEM features such as inter-block - comments and block headers are stripped. Certificates are deduplicated. - The ordering of certificates within the file is arbitrary, and Kubelet - may change the order over time. - properties: - labelSelector: - description: |- - Select all ClusterTrustBundles that match this label selector. Only has - effect if signerName is set. Mutually-exclusive with name. If unset, - interpreted as "match nothing". If set but empty, interpreted as "match - everything". - properties: - matchExpressions: - description: matchExpressions is a - list of label selector requirements. - The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - name: - description: |- - Select a single ClusterTrustBundle by object name. Mutually-exclusive - with signerName and labelSelector. - type: string - optional: - description: |- - If true, don't block pod startup if the referenced ClusterTrustBundle(s) - aren't available. If using name, then the named ClusterTrustBundle is - allowed not to exist. If using signerName, then the combination of - signerName and labelSelector is allowed to match zero - ClusterTrustBundles. - type: boolean - path: - description: Relative path from the volume - root to write the bundle. - type: string - signerName: - description: |- - Select all ClusterTrustBundles that match this signer name. - Mutually-exclusive with name. The contents of all selected - ClusterTrustBundles will be unified and deduplicated. - type: string - required: - - path - type: object - configMap: - description: configMap information about the - configMap data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a - path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional specify whether - the ConfigMap or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - description: downwardAPI information about - the downwardAPI data to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field - properties: - fieldRef: - description: 'Required: Selects - a field of the pod: only annotations, - labels, name, namespace and uid - are supported.' - properties: - apiVersion: - description: Version of the - schema the FieldPath is written - in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field - to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the - relative path name of the file - to be created. Must not be absolute - or contain the ''..'' path. Must - be utf-8 encoded. The first item - of the relative path must not - start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: - required for volumes, optional - for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output - format of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource - to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - x-kubernetes-list-type: atomic - type: object - secret: - description: secret information about the - secret data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a - path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional field specify whether - the Secret or its key must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - description: serviceAccountToken is information - about the serviceAccountToken data to project - properties: - audience: - description: |- - audience is the intended audience of the token. A recipient of a token - must identify itself with an identifier specified in the audience of the - token, and otherwise should reject the token. The audience defaults to the - identifier of the apiserver. - type: string - expirationSeconds: - description: |- - expirationSeconds is the requested duration of validity of the service - account token. As the token approaches expiration, the kubelet volume - plugin will proactively rotate the service account token. The kubelet will - start trying to rotate the token if the token is older than 80 percent of - its time to live or if the token is older than 24 hours.Defaults to 1 hour - and must be at least 10 minutes. - format: int64 - type: integer - path: - description: |- - path is the path relative to the mount point of the file to project the - token into. - type: string - required: - - path - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - quobyte: - description: quobyte represents a Quobyte mount on the - host that shares a pod's lifetime - properties: - group: - description: |- - group to map volume access to - Default is no group - type: string - readOnly: - description: |- - readOnly here will force the Quobyte volume to be mounted with read-only permissions. - Defaults to false. - type: boolean - registry: - description: |- - registry represents a single or multiple Quobyte Registry services - specified as a string as host:port pair (multiple entries are separated with commas) - which acts as the central registry for volumes - type: string - tenant: - description: |- - tenant owning the given Quobyte volume in the Backend - Used with dynamically provisioned Quobyte volumes, value is set by the plugin - type: string - user: - description: |- - user to map volume access to - Defaults to serivceaccount user - type: string - volume: - description: volume is a string that references - an already created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: |- - rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md - properties: - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - type: string - image: - description: |- - image is the rados image name. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - keyring: - default: /etc/ceph/keyring - description: |- - keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - monitors: - description: |- - monitors is a collection of Ceph monitors. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - items: - type: string - type: array - x-kubernetes-list-type: atomic - pool: - default: rbd - description: |- - pool is the rados pool name. - Default is rbd. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: boolean - secretRef: - description: |- - secretRef is name of the authentication secret for RBDUser. If provided - overrides keyring. - Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - user: - default: admin - description: |- - user is the rados user name. - Default is admin. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - required: - - image - - monitors - type: object - scaleIO: - description: scaleIO represents a ScaleIO persistent - volume attached and mounted on Kubernetes nodes. - properties: - fsType: - default: xfs - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". - Default is "xfs". - type: string - gateway: - description: gateway is the host address of the - ScaleIO API Gateway. - type: string - protectionDomain: - description: protectionDomain is the name of the - ScaleIO Protection Domain for the configured storage. - type: string - readOnly: - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef references to the secret for ScaleIO user and other - sensitive information. If this is not provided, Login operation will fail. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - sslEnabled: - description: sslEnabled Flag enable/disable SSL - communication with Gateway, default false - type: boolean - storageMode: - default: ThinProvisioned - description: |- - storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - Default is ThinProvisioned. - type: string - storagePool: - description: storagePool is the ScaleIO Storage - Pool associated with the protection domain. - type: string - system: - description: system is the name of the storage system - as configured in ScaleIO. - type: string - volumeName: - description: |- - volumeName is the name of a volume already created in the ScaleIO system - that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: |- - secret represents a secret that should populate this volume. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - properties: - defaultMode: - description: |- - defaultMode is Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values - for mode bits. Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items If unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - optional: - description: optional field specify whether the - Secret or its keys must be defined - type: boolean - secretName: - description: |- - secretName is the name of the secret in the pod's namespace to use. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - type: string - type: object - storageos: - description: storageOS represents a StorageOS volume - attached and mounted on Kubernetes nodes. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef specifies the secret to use for obtaining the StorageOS API - credentials. If not specified, default values will be attempted. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - volumeName: - description: |- - volumeName is the human-readable name of the StorageOS volume. Volume - names are only unique within a namespace. - type: string - volumeNamespace: - description: |- - volumeNamespace specifies the scope of the volume within StorageOS. If no - namespace is specified then the Pod's namespace will be used. This allows the - Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - Set VolumeName to any name to override the default behaviour. - Set to "default" if you are not using namespaces within StorageOS. - Namespaces that do not pre-exist within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: vsphereVolume represents a vSphere volume - attached and mounted on kubelets host machine - properties: - fsType: - description: |- - fsType is filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: storagePolicyID is the storage Policy - Based Management (SPBM) profile ID associated - with the StoragePolicyName. - type: string - storagePolicyName: - description: storagePolicyName is the storage Policy - Based Management (SPBM) profile name. - type: string - volumePath: - description: volumePath is the path that identifies - vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - containers - type: object - type: object - type: - default: rw - description: 'Type of service to forward traffic to. Default: `rw`.' - enum: - - rw - - ro - type: string - required: - - cluster - - pgbouncer - type: object - status: - description: |- - Most recently observed status of the Pooler. This data may not be up to - date. Populated by the system. Read-only. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - instances: - description: The number of pods trying to be scheduled - format: int32 - type: integer - secrets: - description: The resource version of the config object - properties: - clientCA: - description: The client CA secret version - properties: - name: - description: The name of the secret - type: string - version: - description: The ResourceVersion of the secret - type: string - type: object - pgBouncerSecrets: - description: The version of the secrets used by PgBouncer - properties: - authQuery: - description: The auth query secret version - properties: - name: - description: The name of the secret - type: string - version: - description: The ResourceVersion of the secret - type: string - type: object - type: object - serverCA: - description: The server CA secret version - properties: - name: - description: The name of the secret - type: string - version: - description: The ResourceVersion of the secret - type: string - type: object - serverTLS: - description: The server TLS secret version - properties: - name: - description: The name of the secret - type: string - version: - description: The ResourceVersion of the secret - type: string - type: object - type: object - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.4 - helm.sh/resource-policy: keep - name: scheduledbackups.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.lastScheduleTime - name: Last Backup - type: date - name: v1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - backupOwnerReference: - default: none - description: |- - Indicates which ownerReference should be put inside the created backup resources.
- - none: no owner reference for created backup objects (same behavior as before the field was introduced)
- - self: sets the Scheduled backup object as owner of the backup
- - cluster: set the cluster as owner of the backup
- enum: - - none - - self - - cluster - type: string - cluster: - description: The cluster to backup - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - immediate: - description: If the first backup has to be immediately start after - creation or not - type: boolean - method: - default: barmanObjectStore - description: |- - The backup method to be used, possible options are `barmanObjectStore`, - `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. - enum: - - barmanObjectStore - - volumeSnapshot - - plugin - type: string - online: - description: |- - Whether the default type of backup with volume snapshots is - online/hot (`true`, default) or offline/cold (`false`) - Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' - type: boolean - onlineConfiguration: - description: |- - Configuration parameters to control the online/hot backup with volume snapshots - Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza - properties: - immediateCheckpoint: - description: |- - Control whether the I/O workload for the backup initial checkpoint will - be limited, according to the `checkpoint_completion_target` setting on - the PostgreSQL server. If set to true, an immediate checkpoint will be - used, meaning PostgreSQL will complete the checkpoint as soon as - possible. `false` by default. - type: boolean - waitForArchive: - default: true - description: |- - If false, the function will return immediately after the backup is completed, - without waiting for WAL to be archived. - This behavior is only useful with backup software that independently monitors WAL archiving. - Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. - By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is - enabled. - On a standby, this means that it will wait only when archive_mode = always. - If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger - an immediate segment switch. - type: boolean - type: object - pluginConfiguration: - description: Configuration parameters passed to the plugin managing - this backup - properties: - name: - description: Name is the name of the plugin managing this backup - type: string - parameters: - additionalProperties: - type: string - description: |- - Parameters are the configuration parameters passed to the backup - plugin for this backup - type: object - required: - - name - type: object - schedule: - description: |- - The schedule does not follow the same format used in Kubernetes CronJobs - as it includes an additional seconds specifier, - see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format - type: string - suspend: - description: If this backup is suspended or not - type: boolean - target: - description: |- - The policy to decide which instance should perform this backup. If empty, - it defaults to `cluster.spec.backup.target`. - Available options are empty string, `primary` and `prefer-standby`. - `primary` to have backups run always on primary instances, - `prefer-standby` to have backups run preferably on the most updated - standby, if available. - enum: - - primary - - prefer-standby - type: string - required: - - cluster - - schedule - type: object - status: - description: |- - Most recently observed status of the ScheduledBackup. This data may not be up - to date. Populated by the system. Read-only. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - nextScheduleTime: - description: Next time we will run a backup - format: date-time - type: string - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} -{{- end }} diff --git a/charts/cloudnative-pg/templates/rbac.yaml b/charts/cloudnative-pg/templates/rbac.yaml deleted file mode 100644 index 4452005f1..000000000 --- a/charts/cloudnative-pg/templates/rbac.yaml +++ /dev/null @@ -1,314 +0,0 @@ -# -# Copyright The CloudNativePG Contributors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -{{- if .Values.serviceAccount.create }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "cloudnative-pg.serviceAccountName" . }} - labels: - {{- include "cloudnative-pg.labels" . | nindent 4 }} - {{- with .Values.commonAnnotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} - -{{- if .Values.rbac.create }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ include "cloudnative-pg.fullname" . }} - labels: - {{- include "cloudnative-pg.labels" . | nindent 4 }} - {{- with .Values.commonAnnotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -rules: -- apiGroups: - - "" - resources: - - configmaps - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - - secrets/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - - pods - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - get - - patch -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - update -- apiGroups: - - monitoring.coreos.com - resources: - - podmonitors - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - backups - - clusters - - poolers - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - backups/status - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.cnpg.io - resources: - - clusterimagecatalogs - - imagecatalogs - verbs: - - get - - list - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - clusters/finalizers - - poolers/finalizers - verbs: - - update -- apiGroups: - - postgresql.cnpg.io - resources: - - clusters/status - - poolers/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - - roles - verbs: - - create - - get - - list - - patch - - update - - watch -- apiGroups: - - snapshot.storage.k8s.io - resources: - - volumesnapshots - verbs: - - create - - get - - list - - patch - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ include "cloudnative-pg.fullname" . }} - labels: - {{- include "cloudnative-pg.labels" . | nindent 4 }} - {{- with .Values.commonAnnotations.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ include "cloudnative-pg.fullname" . }} -subjects: -- kind: ServiceAccount - name: {{ include "cloudnative-pg.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ include "cloudnative-pg.fullname" . }}-view - labels: - {{- include "cloudnative-pg.labels" . | nindent 4 }} - {{- if .Values.rbac.aggregateClusterRoles }} - rbac.authorization.k8s.io/aggregate-to-view: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-admin: "true" - {{- end }} -rules: -- apiGroups: - - postgresql.cnpg.io - resources: - - backups - - clusters - - poolers - - scheduledbackups - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ include "cloudnative-pg.fullname" . }}-edit - labels: - {{- include "cloudnative-pg.labels" . | nindent 4 }} - {{- if .Values.rbac.aggregateClusterRoles }} - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-admin: "true" - {{- end }} -rules: -- apiGroups: - - postgresql.cnpg.io - resources: - - backups - - clusters - - poolers - - scheduledbackups - verbs: - - create - - delete - - deletecollection - - patch - - update ---- -{{- end }} diff --git a/charts/cluster/templates/image-catalog-timescaledb-ha.yaml b/charts/cluster/templates/image-catalog-timescaledb-ha.yaml deleted file mode 100644 index 9728f5673..000000000 --- a/charts/cluster/templates/image-catalog-timescaledb-ha.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- if eq (include "cluster.useTimescaleDBDefaults" .) "true" -}} -apiVersion: postgresql.cnpg.io/v1 -kind: ImageCatalog -metadata: - name: {{ include "cluster.fullname" . }}-timescaledb-ha -spec: - images: - - major: 12 - image: timescale/timescaledb-ha:pg12-ts{{ .Values.version.timescaledb }} - - major: 13 - image: timescale/timescaledb-ha:pg13-ts{{ .Values.version.timescaledb }} - - major: 14 - image: timescale/timescaledb-ha:pg14-ts{{ .Values.version.timescaledb }} - - major: 15 - image: timescale/timescaledb-ha:pg15-ts{{ .Values.version.timescaledb }} - - major: 16 - image: timescale/timescaledb-ha:pg16-ts{{ .Values.version.timescaledb }} -{{ end }} diff --git a/charts/paradedb/README.md b/charts/paradedb/README.md index fabac0b69..75bff666e 100644 --- a/charts/paradedb/README.md +++ b/charts/paradedb/README.md @@ -4,21 +4,36 @@ The [ParadeDB](https://github.com/paradedb/paradedb) Helm Chart is based on the Kubernetes, and specifically the CloudNativePG operator, is the recommended approach for deploying ParadeDB in production, with high availability. ParadeDB also provides a [Docker image](https://hub.docker.com/r/paradedb/paradedb) and [prebuilt binaries](https://github.com/paradedb/paradedb/releases) for Debian, Ubuntu and Red Hat Enterprise Linux. -The chart is also available on [ArtifactHub](https://artifacthub.io/packages/helm/paradedb/paradedb). +The chart is also available on [Artifact Hub](https://artifacthub.io/packages/helm/paradedb/paradedb). ## Getting Started First, install [Helm](https://helm.sh/docs/intro/install/). The following steps assume you have a Kubernetes cluster running v1.25+. If you are testing locally, we recommend using [Minikube](https://minikube.sigs.k8s.io/docs/start/). +### Installing the Prometheus Stack + +The ParadeDB Helm chart supports monitoring via Prometheus and Grafana. To enable this, you need to have the Prometheus CRDs installed before installing the CloudNativePG operator. If you do not yet have the Prometheus CRDs installed on your Kubernetes cluster, you can install it with: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm upgrade --atomic --install prometheus-community \ +--create-namespace \ +--namespace prometheus-community \ +--values https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml \ +prometheus-community/kube-prometheus-stack +``` + ### Installing the CloudNativePG Operator -Skip this step if the CNPG operator is already installed in your cluster. +Skip this step if the CloudNativePG operator is already installed in your cluster. If you do not wish to monitor your cluster, omit the `--set` commands. -```console +```bash helm repo add cnpg https://cloudnative-pg.github.io/charts -helm upgrade --install cnpg \ ---namespace cnpg-system \ +helm upgrade --atomic --install cnpg \ --create-namespace \ +--namespace cnpg-system \ +--set monitoring.podMonitorEnabled=true \ +--set monitoring.grafanaDashboard.create=true \ cnpg/cloudnative-pg ``` @@ -31,50 +46,59 @@ type: paradedb mode: standalone cluster: - instances: 2 + instances: 3 storage: size: 256Mi ``` -Then, launch the ParadeDB cluster. +Then, launch the ParadeDB cluster. If you do not wish to monitor your cluster, omit the `--set` command. ```bash helm repo add paradedb https://paradedb.github.io/charts -helm upgrade --install paradedb \ ---namespace paradedb-database \ +helm upgrade --atomic --install paradedb \ +--namespace paradedb \ --create-namespace \ --values values.yaml \ +--set cluster.monitoring.enabled=true \ paradedb/paradedb ``` -If `--values values.yaml` is omitted, the default values will be used. For additional configuration options for the `values.yaml` file, please refer to the [ParadeDB Helm Chart documentation](https://artifacthub.io/packages/helm/paradedb/paradedb#values). For advanced cluster configuration options, please refer to the [CloudNativePG Cluster Chart documentation](charts/paradedb/README.md). - -A more detailed guide on launching the cluster can be found in the [Getting Started docs](<./docs/Getting Started.md>). To get started with ParadeDB, we suggest you follow the [quickstart guide](/documentation/getting-started/quickstart). +If `--values values.yaml` is omitted, the default values will be used. For additional configuration options for the `values.yaml` file, including configuring backups and PgBouncer, please refer to the [ParadeDB Helm Chart documentation](https://artifacthub.io/packages/helm/paradedb/paradedb#values). For advanced cluster configuration options, please refer to the [CloudNativePG Cluster Chart documentation](charts/paradedb/README.md). ### Connecting to a ParadeDB CNPG Cluster The command to connect to the primary instance of the cluster will be printed in your terminal. If you do not modify any settings, it will be: ```bash -kubectl --namespace paradedb-database exec --stdin --tty services/paradedb-rw -- bash +kubectl --namespace paradedb exec --stdin --tty services/paradedb-rw -- bash ``` -This will launch a shell inside the instance. You can connect via `psql` with: +This will launch a Bash shell inside the instance. You can connect to the ParadeDB database via `psql` with: ```bash psql -d paradedb ``` +### Connecting to the Grafana Dashboard + +To connect to the Grafana dashboard for your cluster, we suggested port forwarding the Kubernetes service running Grafana to localhost: + +```bash +kubectl --namespace prometheus-community port-forward svc/prometheus-community-grafana 3000:80 +``` + +You can then access the Grafana dasbhoard at [http://localhost:3000/](http://localhost:3000/) using the credentials `admin` as username and `prom-operator` as password. These default credentials are +defined in the [`kube-stack-config.yaml`](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml) file used as the `values.yaml` file in [Installing the Prometheus CRDs](#installing-the-prometheus-stack) and can be modified by providing your own `values.yaml` file. + ## Development To test changes to the Chart on a local Minikube cluster, follow the instructions from [Getting Started](#getting-started), replacing the `helm upgrade` step by the path to the directory of the modified `Chart.yaml`. ```bash -helm upgrade --install paradedb --namespace paradedb-database --create-namespace ./charts/paradedb +helm upgrade --atomic --install paradedb --namespace paradedb --create-namespace ./charts/paradedb ``` -Cluster Configuration ---------------------- +## Cluster Configuration ### Database types @@ -83,6 +107,7 @@ To use the ParadeDB Helm Chart, specify `paradedb` via the `type` parameter. ### Modes of operation The chart has three modes of operation. These are configured via the `mode` parameter: + * `standalone` - Creates new or updates an existing CNPG cluster. This is the default mode. * `replica` - Creates a replica cluster from an existing CNPG cluster. **_Note_ that this mode is not yet supported.** * `recovery` - Recovers a CNPG cluster from a backup, object store or via pg_basebackup. @@ -99,8 +124,10 @@ providers are supported: * Google Cloud Storage Additionally you can specify the following parameters: + * `backups.retentionPolicy` - The retention policy for backups. Defaults to `30d`. * `backups.scheduledBackups` - An array of scheduled backups containing a name and a crontab schedule. Example: + ```yaml backups: scheduledBackups: @@ -113,13 +140,11 @@ Each backup adapter takes it's own set of parameters, listed in the [Configurati below. Refer to the table for the full list of parameters and place the configuration under the appropriate key: `backup.s3`, `backup.azure`, or `backup.google`. -Recovery --------- +## Recovery There is a separate document outlining the recovery procedure here: **[Recovery](docs/recovery.md)** -Examples --------- +## Examples There are several configuration examples in the [examples](examples) directory. Refer to them for a basic setup and refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentation/current/) for more advanced configurations. @@ -179,7 +204,7 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | cluster.monitoring.customQueries | list | `[]` | Custom Prometheus metrics Will be stored in the ConfigMap | | cluster.monitoring.customQueriesSecret | list | `[]` | The list of secrets containing the custom queries | | cluster.monitoring.disableDefaultQueries | bool | `false` | Whether the default queries should be injected. Set it to true if you don't want to inject default queries into the cluster. | -| cluster.monitoring.enabled | bool | `false` | Whether to enable monitoring | +| cluster.monitoring.enabled | bool | `true` | Whether to enable monitoring | | cluster.monitoring.podMonitor.enabled | bool | `true` | Whether to enable the PodMonitor | | cluster.monitoring.podMonitor.metricRelabelings | list | `[]` | The list of metric relabelings for the PodMonitor. Applied to samples before ingestion. | | cluster.monitoring.podMonitor.relabelings | list | `[]` | The list of relabelings for the PodMonitor. Applied to samples before scraping. | @@ -187,7 +212,7 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | cluster.monitoring.prometheusRule.excludeRules | list | `[]` | Exclude specified rules | | cluster.postgresGID | int | `-1` | The GID of the postgres user inside the image, defaults to 26 | | cluster.postgresUID | int | `-1` | The UID of the postgres user inside the image, defaults to 26 | -| cluster.postgresql.parameters | object | `{}` | PostgreSQL configuration options (postgresql.conf) | +| cluster.postgresql.parameters | object | `{"cron.database_name":"postgres"}` | PostgreSQL configuration options (postgresql.conf) | | cluster.postgresql.pg_hba | list | `[]` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | | cluster.postgresql.pg_ident | list | `[]` | PostgreSQL User Name Maps rules (lines to be appended to the pg_ident.conf file) | | cluster.postgresql.shared_preload_libraries | list | `[]` | Lists of shared preload libraries to add to the default ones | diff --git a/charts/paradedb/README.md.gotmpl b/charts/paradedb/README.md.gotmpl index 879b04577..b55bced91 100644 --- a/charts/paradedb/README.md.gotmpl +++ b/charts/paradedb/README.md.gotmpl @@ -4,21 +4,54 @@ {{ template "chart.deprecationWarning" . }} -This README documents the Helm chart for deploying and managing [ParadeDB](https://github.com/paradedb/paradedb) on Kubernetes via [CloudNativePG](https://cloudnative-pg.io/), including advanced settings. +The [ParadeDB](https://github.com/paradedb/paradedb) Helm Chart is based on the official [CloudNativePG Helm Chart](https://cloudnative-pg.io/). CloudNativePG is a Kubernetes operator that manages the full lifecycle of a highly available PostgreSQL database cluster with a primary/standby architecture using Postgres streaming replication. -Kubernetes, and specifically the CloudNativePG operator, is the recommended approach for deploying ParadeDB in production. ParadeDB also provides a [Docker image](https://hub.docker.com/r/paradedb/paradedb) and [prebuilt binaries](https://github.com/paradedb/paradedb/releases) for Debian, Ubuntu and Red Hat Enterprise Linux. +Kubernetes, and specifically the CloudNativePG operator, is the recommended approach for deploying ParadeDB in production, with high availability. ParadeDB also provides a [Docker image](https://hub.docker.com/r/paradedb/paradedb) and [prebuilt binaries](https://github.com/paradedb/paradedb/releases) for Debian, Ubuntu and Red Hat Enterprise Linux. + +The chart is also available on [Artifact Hub](https://artifacthub.io/packages/helm/paradedb/paradedb). ## Getting Started -### Installing the Operator +First, install [Helm](https://helm.sh/docs/intro/install/). The following steps assume you have a Kubernetes cluster running v1.25+. If you are testing locally, we recommend using [Minikube](https://minikube.sigs.k8s.io/docs/start/). + +### Installing the Prometheus Stack + +The ParadeDB Helm chart supports monitoring via Prometheus and Grafana. To enable this, you need to have the Prometheus CRDs installed before installing the CloudNativePG operator. If you do not yet have the Prometheus CRDs installed on your Kubernetes cluster, you can install it with: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm upgrade --atomic --install prometheus-community \ +--create-namespace \ +--namespace prometheus-community \ +--values https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml \ +prometheus-community/kube-prometheus-stack +``` + +### Installing the CloudNativePG Operator -Skip this step if the CNPG operator is already installed in your cluster. +Skip this step if the CloudNativePG operator is already installed in your cluster. If you do not wish to monitor your cluster, omit the `--set` commands. -```console +```bash helm repo add cnpg https://cloudnative-pg.github.io/charts -helm upgrade --install cnpg \ +helm upgrade --atomic --install cnpg \ +--create-namespace \ --namespace cnpg-system \ +--set monitoring.podMonitorEnabled=true \ +--set monitoring.grafanaDashboard.create=true \ +cnpg/cloudnative-pg +``` + +### Installing the CloudNativePG Operator + +Skip this step if the CloudNativePG operator is already installed in your cluster. If you do not wish to monitor your cluster, omit the `--set` commands. + +```bash +helm repo add cnpg https://cloudnative-pg.github.io/charts +helm upgrade --atomic --install cnpg \ --create-namespace \ +--namespace cnpg-system \ +--set monitoring.podMonitorEnabled=true \ +--set monitoring.grafanaDashboard.create=true \ cnpg/cloudnative-pg ``` @@ -31,26 +64,59 @@ type: paradedb mode: standalone cluster: - instances: 2 + instances: 3 storage: size: 256Mi ``` -You can refer to the other examples in the [`charts/paradedb/examples`](https://github.com/paradedb/charts/tree/main/charts/paradedb/examples) directory. +Then, launch the ParadeDB cluster. If you do not wish to monitor your cluster, omit the `--set` command. -```console +```bash helm repo add paradedb https://paradedb.github.io/charts -helm upgrade --install paradedb \ ---namespace paradedb-database \ +helm upgrade --atomic --install paradedb \ +--namespace paradedb \ --create-namespace \ --values values.yaml \ +--set cluster.monitoring.enabled=true \ paradedb/paradedb ``` -A more detailed guide can be found in the [Getting Started docs](<./docs/Getting Started.md>). +If `--values values.yaml` is omitted, the default values will be used. For additional configuration options for the `values.yaml` file, including configuring backups and PgBouncer, please refer to the [ParadeDB Helm Chart documentation](https://artifacthub.io/packages/helm/paradedb/paradedb#values). For advanced cluster configuration options, please refer to the [CloudNativePG Cluster Chart documentation](charts/paradedb/README.md). + +### Connecting to a ParadeDB CNPG Cluster + +The command to connect to the primary instance of the cluster will be printed in your terminal. If you do not modify any settings, it will be: + +```bash +kubectl --namespace paradedb exec --stdin --tty services/paradedb-rw -- bash +``` + +This will launch a Bash shell inside the instance. You can connect to the ParadeDB database via `psql` with: + +```bash +psql -d paradedb +``` + +### Connecting to the Grafana Dashboard + +To connect to the Grafana dashboard for your cluster, we suggested port forwarding the Kubernetes service running Grafana to localhost: + +```bash +kubectl --namespace prometheus-community port-forward svc/prometheus-community-grafana 3000:80 +``` + +You can then access the Grafana dasbhoard at [http://localhost:3000/](http://localhost:3000/) using the credentials `admin` as username and `prom-operator` as password. These default credentials are +defined in the [`kube-stack-config.yaml`](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml) file used as the `values.yaml` file in [Installing the Prometheus CRDs](#installing-the-prometheus-stack) and can be modified by providing your own `values.yaml` file. -Cluster Configuration ---------------------- +## Development + +To test changes to the Chart on a local Minikube cluster, follow the instructions from [Getting Started](#getting-started), replacing the `helm upgrade` step by the path to the directory of the modified `Chart.yaml`. + +```bash +helm upgrade --atomic --install paradedb --namespace paradedb --create-namespace ./charts/paradedb +``` + +## Cluster Configuration ### Database types @@ -59,6 +125,7 @@ To use the ParadeDB Helm Chart, specify `paradedb` via the `type` parameter. ### Modes of operation The chart has three modes of operation. These are configured via the `mode` parameter: + * `standalone` - Creates new or updates an existing CNPG cluster. This is the default mode. * `replica` - Creates a replica cluster from an existing CNPG cluster. **_Note_ that this mode is not yet supported.** * `recovery` - Recovers a CNPG cluster from a backup, object store or via pg_basebackup. @@ -75,8 +142,10 @@ providers are supported: * Google Cloud Storage Additionally you can specify the following parameters: + * `backups.retentionPolicy` - The retention policy for backups. Defaults to `30d`. * `backups.scheduledBackups` - An array of scheduled backups containing a name and a crontab schedule. Example: + ```yaml backups: scheduledBackups: @@ -89,15 +158,11 @@ Each backup adapter takes it's own set of parameters, listed in the [Configurati below. Refer to the table for the full list of parameters and place the configuration under the appropriate key: `backup.s3`, `backup.azure`, or `backup.google`. - -Recovery --------- +## Recovery There is a separate document outlining the recovery procedure here: **[Recovery](docs/recovery.md)** - -Examples --------- +## Examples There are several configuration examples in the [examples](examples) directory. Refer to them for a basic setup and refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentation/current/) for more advanced configurations. @@ -124,3 +189,7 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat {{ template "helm-docs.versionFooter" . }} + +## License + +ParadeDB is licensed under the [GNU Affero General Public License v3.0](LICENSE) and as commercial software. For commercial licensing, please contact us at [sales@paradedb.com](mailto:sales@paradedb.com). diff --git a/charts/paradedb/test/monitoring/01-monitoring_cluster-assert.yaml b/charts/paradedb/test/monitoring/01-monitoring_cluster-assert.yaml index 575e02082..f14ee753c 100644 --- a/charts/paradedb/test/monitoring/01-monitoring_cluster-assert.yaml +++ b/charts/paradedb/test/monitoring/01-monitoring_cluster-assert.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Cluster metadata: - name: monitoring-cluster + name: monitoring-paradedb labels: foo: bar annotations: @@ -14,7 +14,7 @@ spec: monitoring: disableDefaultQueries: true customQueriesConfigMap: - - name: monitoring-cluster-monitoring + - name: monitoring-paradedb-monitoring key: custom-queries enablePodMonitor: true podMonitorRelabelings: diff --git a/charts/paradedb/test/pooler/01-pooler_cluster-assert.yaml b/charts/paradedb/test/pooler/01-pooler_cluster-assert.yaml index ee35bd9ba..4c3e08125 100644 --- a/charts/paradedb/test/pooler/01-pooler_cluster-assert.yaml +++ b/charts/paradedb/test/pooler/01-pooler_cluster-assert.yaml @@ -8,7 +8,7 @@ status: apiVersion: apps/v1 kind: Deployment metadata: - name: pooler-cluster-pooler-ro + name: pooler-paradedb-pooler-ro status: readyReplicas: 2 --- @@ -27,10 +27,10 @@ spec: apiVersion: postgresql.cnpg.io/v1 kind: Pooler metadata: - name: pooler-cluster-pooler-ro + name: pooler-paradedb-pooler-ro spec: cluster: - name: pooler-cluster + name: pooler-paradedb instances: 2 pgbouncer: poolMode: session From 42d77f438d349cc95b1da05b82ed353cdfb27088 Mon Sep 17 00:00:00 2001 From: Itay Grudev Date: Thu, 7 Nov 2024 01:42:18 +0200 Subject: [PATCH 4/8] Exposed serviceAccountTemplate for IRSA support (#54) --- charts/paradedb/templates/cluster.yaml | 3 +++ charts/paradedb/values.yaml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/charts/paradedb/templates/cluster.yaml b/charts/paradedb/templates/cluster.yaml index c682546c5..c6d13b930 100644 --- a/charts/paradedb/templates/cluster.yaml +++ b/charts/paradedb/templates/cluster.yaml @@ -76,6 +76,9 @@ spec: {{- toYaml . | nindent 6 }} {{ end }} + # -- Configure the generation of the service account + serviceAccountTemplate: {} + monitoring: enablePodMonitor: {{ and .Values.cluster.monitoring.enabled .Values.cluster.monitoring.podMonitor.enabled }} disableDefaultQueries: {{ .Values.cluster.monitoring.disableDefaultQueries }} diff --git a/charts/paradedb/values.yaml b/charts/paradedb/values.yaml index 7a93ff602..515db9627 100644 --- a/charts/paradedb/values.yaml +++ b/charts/paradedb/values.yaml @@ -279,6 +279,9 @@ cluster: # postInitApplicationSQL: [] # postInitTemplateSQL: [] + # -- Configure the generation of the service account + serviceAccountTemplate: {} + additionalLabels: {} annotations: {} From c9793cca21734816312566ca7041339cf53b5917 Mon Sep 17 00:00:00 2001 From: Itay Grudev Date: Thu, 7 Nov 2024 03:31:21 +0200 Subject: [PATCH 5/8] Feat recovery.mode=import (#53) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> Co-authored-by: Philippe Noël <21990816+philippemnoel@users.noreply.github.com> --- charts/paradedb/README.md | 50 +++++++--- charts/paradedb/README.md.gotmpl | 14 --- charts/paradedb/templates/_bootstrap.tpl | 57 +++++------ .../templates/_external_source_cluster.tpl | 33 +++++++ .../00-source-cluster-assert.yaml | 6 ++ .../postgresql-import/00-source-cluster.yaml | 9 ++ .../00-source-superuser-password.yaml | 8 ++ .../01-data_write-assert.yaml | 6 ++ .../test/postgresql-import/01-data_write.yaml | 31 ++++++ .../02-import-cluster-assert.yaml | 6 ++ .../postgresql-import/02-import-cluster.yaml | 30 ++++++ .../03-data_test-assert.yaml | 6 ++ .../test/postgresql-import/03-data_test.yaml | 24 +++++ .../04-import-cluster-schema_only-assert.yaml | 6 ++ .../04-import-cluster-schema_only.yaml | 31 ++++++ .../05-data_test-assert.yaml | 6 ++ .../test/postgresql-import/05-data_test.yaml | 24 +++++ .../test/postgresql-import/chainsaw-test.yaml | 97 +++++++++++++++++++ .../00-source-cluster.yaml | 4 +- .../02-pg_basebackup-cluster.yaml | 4 +- charts/paradedb/values.schema.json | 90 +++++++++++++++++ charts/paradedb/values.yaml | 41 +++++++- 22 files changed, 523 insertions(+), 60 deletions(-) create mode 100644 charts/paradedb/templates/_external_source_cluster.tpl create mode 100644 charts/paradedb/test/postgresql-import/00-source-cluster-assert.yaml create mode 100644 charts/paradedb/test/postgresql-import/00-source-cluster.yaml create mode 100644 charts/paradedb/test/postgresql-import/00-source-superuser-password.yaml create mode 100644 charts/paradedb/test/postgresql-import/01-data_write-assert.yaml create mode 100644 charts/paradedb/test/postgresql-import/01-data_write.yaml create mode 100644 charts/paradedb/test/postgresql-import/02-import-cluster-assert.yaml create mode 100644 charts/paradedb/test/postgresql-import/02-import-cluster.yaml create mode 100644 charts/paradedb/test/postgresql-import/03-data_test-assert.yaml create mode 100644 charts/paradedb/test/postgresql-import/03-data_test.yaml create mode 100644 charts/paradedb/test/postgresql-import/04-import-cluster-schema_only-assert.yaml create mode 100644 charts/paradedb/test/postgresql-import/04-import-cluster-schema_only.yaml create mode 100644 charts/paradedb/test/postgresql-import/05-data_test-assert.yaml create mode 100644 charts/paradedb/test/postgresql-import/05-data_test.yaml create mode 100644 charts/paradedb/test/postgresql-import/chainsaw-test.yaml diff --git a/charts/paradedb/README.md b/charts/paradedb/README.md index 75bff666e..67ada2d92 100644 --- a/charts/paradedb/README.md +++ b/charts/paradedb/README.md @@ -204,7 +204,7 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | cluster.monitoring.customQueries | list | `[]` | Custom Prometheus metrics Will be stored in the ConfigMap | | cluster.monitoring.customQueriesSecret | list | `[]` | The list of secrets containing the custom queries | | cluster.monitoring.disableDefaultQueries | bool | `false` | Whether the default queries should be injected. Set it to true if you don't want to inject default queries into the cluster. | -| cluster.monitoring.enabled | bool | `true` | Whether to enable monitoring | +| cluster.monitoring.enabled | bool | `false` | Whether to enable monitoring | | cluster.monitoring.podMonitor.enabled | bool | `true` | Whether to enable the PodMonitor | | cluster.monitoring.podMonitor.metricRelabelings | list | `[]` | The list of metric relabelings for the PodMonitor. Applied to samples before ingestion. | | cluster.monitoring.podMonitor.relabelings | list | `[]` | The list of relabelings for the PodMonitor. Applied to samples before scraping. | @@ -212,7 +212,7 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | cluster.monitoring.prometheusRule.excludeRules | list | `[]` | Exclude specified rules | | cluster.postgresGID | int | `-1` | The GID of the postgres user inside the image, defaults to 26 | | cluster.postgresUID | int | `-1` | The UID of the postgres user inside the image, defaults to 26 | -| cluster.postgresql.parameters | object | `{"cron.database_name":"postgres"}` | PostgreSQL configuration options (postgresql.conf) | +| cluster.postgresql.parameters | object | `{}` | PostgreSQL configuration options (postgresql.conf) | | cluster.postgresql.pg_hba | list | `[]` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | | cluster.postgresql.pg_ident | list | `[]` | PostgreSQL User Name Maps rules (lines to be appended to the pg_ident.conf file) | | cluster.postgresql.shared_preload_libraries | list | `[]` | Lists of shared preload libraries to add to the default ones | @@ -251,7 +251,27 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | recovery.google.bucket | string | `""` | | | recovery.google.gkeEnvironment | bool | `false` | | | recovery.google.path | string | `"/"` | | -| recovery.method | string | `"backup"` | Available recovery methods: * `backup` - Recovers a CNPG cluster from a CNPG backup (PITR supported) Needs to be on the same cluster in the same namespace. * `object_store` - Recovers a CNPG cluster from a barman object store (PITR supported). * `pg_basebackup` - Recovers a CNPG cluster viaa streaming replication protocol. Useful if you want to migrate databases to CloudNativePG, even from outside Kubernetes. # TODO | +| recovery.import.databases | list | `[]` | Databases to import | +| recovery.import.postImportApplicationSQL | list | `[]` | List of SQL queries to be executed as a superuser in the application database right after is imported. To be used with extreme care. Only available in microservice type. | +| recovery.import.roles | list | `[]` | Roles to import | +| recovery.import.schemaOnly | bool | `false` | When set to true, only the pre-data and post-data sections of pg_restore are invoked, avoiding data import. | +| recovery.import.source.database | string | `"paradedb"` | | +| recovery.import.source.host | string | `""` | | +| recovery.import.source.passwordSecret.create | bool | `false` | Whether to create a secret for the password | +| recovery.import.source.passwordSecret.key | string | `"password"` | The key in the secret containing the password | +| recovery.import.source.passwordSecret.name | string | `""` | Name of the secret containing the password | +| recovery.import.source.passwordSecret.value | string | `""` | The password value to use when creating the secret | +| recovery.import.source.port | int | `5432` | | +| recovery.import.source.sslCertSecret.key | string | `""` | | +| recovery.import.source.sslCertSecret.name | string | `""` | | +| recovery.import.source.sslKeySecret.key | string | `""` | | +| recovery.import.source.sslKeySecret.name | string | `""` | | +| recovery.import.source.sslMode | string | `"verify-full"` | | +| recovery.import.source.sslRootCertSecret.key | string | `""` | | +| recovery.import.source.sslRootCertSecret.name | string | `""` | | +| recovery.import.source.username | string | `""` | | +| recovery.import.type | string | `"microservice"` | One of `microservice` or `monolith.` See: https://cloudnative-pg.io/documentation/1.24/database_import/#how-it-works | +| recovery.method | string | `"backup"` | Available recovery methods: * `backup` - Recovers a CNPG cluster from a CNPG backup (PITR supported) Needs to be on the same cluster in the same namespace. * `object_store` - Recovers a CNPG cluster from a barman object store (PITR supported). * `pg_basebackup` - Recovers a CNPG cluster viaa streaming replication protocol. Useful if you want to migrate databases to CloudNativePG, even from outside Kubernetes. * `import` - Import one or more databases from an existing Postgres cluster. | | recovery.pgBaseBackup.database | string | `"paradedb"` | Name of the database used by the application. Default: `paradedb`. | | recovery.pgBaseBackup.owner | string | `""` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | | recovery.pgBaseBackup.secret | string | `""` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. | @@ -282,18 +302,18 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | type | string | `"paradedb"` | Type of the CNPG database. Available types: * `paradedb` | | version.paradedb | string | `"0.11.0"` | We default to v0.11.0 for testing and local development | | version.postgresql | string | `"16"` | PostgreSQL major version to use | -| poolers[].name | string | `` | Name of the pooler resource | -| poolers[].instances | number | `1` | The number of replicas we want | -| poolers[].type | [PoolerType][PoolerType] | `rw` | Type of service to forward traffic to. Default: `rw`. | -| poolers[].poolMode | [PgBouncerPoolMode][PgBouncerPoolMode] | `session` | The pool mode. Default: `session`. | -| poolers[].authQuerySecret | [LocalObjectReference][LocalObjectReference] | `{}` | The credentials of the user that need to be used for the authentication query. | -| poolers[].authQuery | string | `{}` | The credentials of the user that need to be used for the authentication query. | -| poolers[].parameters | map[string]string | `{}` | Additional parameters to be passed to PgBouncer - please check the CNPG documentation for a list of options you can configure | -| poolers[].template | [PodTemplateSpec][PodTemplateSpec] | `{}` | The template of the Pod to be created | -| poolers[].template | [ServiceTemplateSpec][ServiceTemplateSpec] | `{}` | Template for the Service to be created | -| poolers[].pg_hba | []string | `{}` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | -| poolers[].monitoring.enabled | bool | `false` | Whether to enable monitoring for the Pooler. | -| poolers[].monitoring.podMonitor.enabled | bool | `true` | Create a podMonitor for the Pooler. | +| poolers[].name | string | `` | Name of the pooler resource | +| poolers[].instances | number | `1` | The number of replicas we want | +| poolers[].type | [PoolerType][PoolerType] | `rw` | Type of service to forward traffic to. Default: `rw`. | +| poolers[].poolMode | [PgBouncerPoolMode][PgBouncerPoolMode] | `session` | The pool mode. Default: `session`. | +| poolers[].authQuerySecret | [LocalObjectReference][LocalObjectReference] | `{}` | The credentials of the user that need to be used for the authentication query. | +| poolers[].authQuery | string | `{}` | The credentials of the user that need to be used for the authentication query. | +| poolers[].parameters | map[string]string | `{}` | Additional parameters to be passed to PgBouncer - please check the CNPG documentation for a list of options you can configure | +| poolers[].template | [PodTemplateSpec][PodTemplateSpec] | `{}` | The template of the Pod to be created | +| poolers[].template | [ServiceTemplateSpec][ServiceTemplateSpec] | `{}` | Template for the Service to be created | +| poolers[].pg_hba | []string | `{}` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | +| poolers[].monitoring.enabled | bool | `false` | Whether to enable monitoring for the Pooler. | +| poolers[].monitoring.podMonitor.enabled | bool | `true` | Create a podMonitor for the Pooler. | ## Maintainers diff --git a/charts/paradedb/README.md.gotmpl b/charts/paradedb/README.md.gotmpl index b55bced91..c40de8dc9 100644 --- a/charts/paradedb/README.md.gotmpl +++ b/charts/paradedb/README.md.gotmpl @@ -41,20 +41,6 @@ helm upgrade --atomic --install cnpg \ cnpg/cloudnative-pg ``` -### Installing the CloudNativePG Operator - -Skip this step if the CloudNativePG operator is already installed in your cluster. If you do not wish to monitor your cluster, omit the `--set` commands. - -```bash -helm repo add cnpg https://cloudnative-pg.github.io/charts -helm upgrade --atomic --install cnpg \ ---create-namespace \ ---namespace cnpg-system \ ---set monitoring.podMonitorEnabled=true \ ---set monitoring.grafanaDashboard.create=true \ -cnpg/cloudnative-pg -``` - ### Setting up a ParadeDB CNPG Cluster Create a `values.yaml` and configure it to your requirements. Here is a basic example: diff --git a/charts/paradedb/templates/_bootstrap.tpl b/charts/paradedb/templates/_bootstrap.tpl index 8aa1fa290..f40643c66 100644 --- a/charts/paradedb/templates/_bootstrap.tpl +++ b/charts/paradedb/templates/_bootstrap.tpl @@ -3,7 +3,7 @@ bootstrap: initdb: {{- with .Values.cluster.initdb }} - {{- with (omit . "postInitSQL" "postInitApplicationSQL" "postInitTemplateSQL" "owner") }} + {{- with (omit . "postInitSQL" "postInitApplicationSQL" "postInitTemplateSQL" "owner" "import") }} {{- . | toYaml | nindent 4 }} {{- end }} {{- end }} @@ -70,33 +70,34 @@ bootstrap: {{- end }} externalClusters: -- name: pgBaseBackupSource - connectionParameters: - host: {{ .Values.recovery.pgBaseBackup.source.host | quote }} - port: {{ .Values.recovery.pgBaseBackup.source.port | quote }} - user: {{ .Values.recovery.pgBaseBackup.source.username | quote }} - dbname: {{ .Values.recovery.pgBaseBackup.source.database | quote }} - sslmode: {{ .Values.recovery.pgBaseBackup.source.sslMode | quote }} - {{- if .Values.recovery.pgBaseBackup.source.passwordSecret.name }} - password: - name: {{ default (printf "%s-pg-basebackup-password" (include "cluster.fullname" .)) .Values.recovery.pgBaseBackup.source.passwordSecret.name }} - key: {{ .Values.recovery.pgBaseBackup.source.passwordSecret.key }} - {{- end }} - {{- if .Values.recovery.pgBaseBackup.source.sslKeySecret.name }} - sslKey: - name: {{ .Values.recovery.pgBaseBackup.source.sslKeySecret.name }} - key: {{ .Values.recovery.pgBaseBackup.source.sslKeySecret.key }} - {{- end }} - {{- if .Values.recovery.pgBaseBackup.source.sslCertSecret.name }} - sslCert: - name: {{ .Values.recovery.pgBaseBackup.source.sslCertSecret.name }} - key: {{ .Values.recovery.pgBaseBackup.source.sslCertSecret.key }} - {{- end }} - {{- if .Values.recovery.pgBaseBackup.source.sslRootCertSecret.name }} - sslRootCert: - name: {{ .Values.recovery.pgBaseBackup.source.sslRootCertSecret.name }} - key: {{ .Values.recovery.pgBaseBackup.source.sslRootCertSecret.key }} - {{- end }} + {{- include "cluster.externalSourceCluster" (list "pgBaseBackupSource" .Values.recovery.pgBaseBackup.source) | nindent 2 }} + +{{- else if eq .Values.recovery.method "import" }} + initdb: + {{- with .Values.cluster.initdb }} + {{- with (omit . "owner" "import") }} + {{- . | toYaml | nindent 4 }} + {{- end }} + {{- end }} + {{- if .Values.cluster.initdb.owner }} + owner: {{ tpl .Values.cluster.initdb.owner . }} + {{- end }} + import: + source: + externalCluster: importSource + type: {{ .Values.recovery.import.type }} + databases: {{ .Values.recovery.import.databases | toJson }} + {{ with .Values.recovery.import.roles }} + roles: {{ . | toJson }} + {{- end }} + {{ with .Values.recovery.import.postImportApplicationSQL }} + postImportApplicationSQL: + {{- . | toYaml | nindent 6 }} + {{- end }} + schemaOnly: {{ .Values.recovery.import.schemaOnly }} + +externalClusters: + {{- include "cluster.externalSourceCluster" (list "importSource" .Values.recovery.import.source) | nindent 2 }} {{- else }} recovery: diff --git a/charts/paradedb/templates/_external_source_cluster.tpl b/charts/paradedb/templates/_external_source_cluster.tpl new file mode 100644 index 000000000..6d21e205e --- /dev/null +++ b/charts/paradedb/templates/_external_source_cluster.tpl @@ -0,0 +1,33 @@ +{{- define "cluster.externalSourceCluster" -}} +{{- $name := first . -}} +{{- $config := last . -}} +- name: {{ first . }} + connectionParameters: + host: {{ $config.host | quote }} + port: {{ $config.port | quote }} + user: {{ $config.username | quote }} + {{- with $config.database }} + dbname: {{ . | quote }} + {{- end }} + sslmode: {{ $config.sslMode | quote }} + {{- if $config.passwordSecret.name }} + password: + name: {{ $config.passwordSecret.name }} + key: {{ $config.passwordSecret.key }} + {{- end }} + {{- if $config.sslKeySecret.name }} + sslKey: + name: {{ $config.sslKeySecret.name }} + key: {{ $config.sslKeySecret.key }} + {{- end }} + {{- if $config.sslCertSecret.name }} + sslCert: + name: {{ $config.sslCertSecret.name }} + key: {{ $config.sslCertSecret.key }} + {{- end }} + {{- if $config.sslRootCertSecret.name }} + sslRootCert: + name: {{ $config.sslRootCertSecret.name }} + key: {{ $config.sslRootCertSecret.key }} + {{- end }} +{{- end }} diff --git a/charts/paradedb/test/postgresql-import/00-source-cluster-assert.yaml b/charts/paradedb/test/postgresql-import/00-source-cluster-assert.yaml new file mode 100644 index 000000000..f68d5419a --- /dev/null +++ b/charts/paradedb/test/postgresql-import/00-source-cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: source-paradedb +status: + readyInstances: 1 diff --git a/charts/paradedb/test/postgresql-import/00-source-cluster.yaml b/charts/paradedb/test/postgresql-import/00-source-cluster.yaml new file mode 100644 index 000000000..05c95bb54 --- /dev/null +++ b/charts/paradedb/test/postgresql-import/00-source-cluster.yaml @@ -0,0 +1,9 @@ +type: postgresql +mode: "standalone" +cluster: + instances: 1 + superuserSecret: source-paradedb-superuser + storage: + size: 256Mi +backups: + enabled: false diff --git a/charts/paradedb/test/postgresql-import/00-source-superuser-password.yaml b/charts/paradedb/test/postgresql-import/00-source-superuser-password.yaml new file mode 100644 index 000000000..d08b66f87 --- /dev/null +++ b/charts/paradedb/test/postgresql-import/00-source-superuser-password.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: source-paradedb-superuser +type: Opaque +data: + username: "cG9zdGdyZXM=" + password: "cG9zdGdyZXM=" diff --git a/charts/paradedb/test/postgresql-import/01-data_write-assert.yaml b/charts/paradedb/test/postgresql-import/01-data_write-assert.yaml new file mode 100644 index 000000000..831f963d9 --- /dev/null +++ b/charts/paradedb/test/postgresql-import/01-data_write-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write +status: + succeeded: 1 diff --git a/charts/paradedb/test/postgresql-import/01-data_write.yaml b/charts/paradedb/test/postgresql-import/01-data_write.yaml new file mode 100644 index 000000000..9d538f6cd --- /dev/null +++ b/charts/paradedb/test/postgresql-import/01-data_write.yaml @@ -0,0 +1,31 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-write + env: + - name: DB_USER + valueFrom: + secretKeyRef: + name: source-paradedb-superuser + key: username + - name: DB_PASS + valueFrom: + secretKeyRef: + name: source-paradedb-superuser + key: password + - name: DB_URI + value: postgres://$(DB_USER):$(DB_PASS)@source-paradedb-rw:5432 + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + psql "$DB_URI" -c "CREATE DATABASE mygooddb;" + psql "$DB_URI/mygooddb" -c "CREATE TABLE mygoodtable (id serial PRIMARY KEY);" + psql "$DB_URI/mygooddb" -c "INSERT INTO mygoodtable VALUES (314159265);" diff --git a/charts/paradedb/test/postgresql-import/02-import-cluster-assert.yaml b/charts/paradedb/test/postgresql-import/02-import-cluster-assert.yaml new file mode 100644 index 000000000..86d37e690 --- /dev/null +++ b/charts/paradedb/test/postgresql-import/02-import-cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: import-paradedb +status: + readyInstances: 2 diff --git a/charts/paradedb/test/postgresql-import/02-import-cluster.yaml b/charts/paradedb/test/postgresql-import/02-import-cluster.yaml new file mode 100644 index 000000000..18c88387d --- /dev/null +++ b/charts/paradedb/test/postgresql-import/02-import-cluster.yaml @@ -0,0 +1,30 @@ +type: postgresql +mode: "recovery" +recovery: + method: "import" + import: + type: "microservice" + databases: [ "mygooddb" ] + source: + host: "source-paradedb-rw" + username: "postgres" + passwordSecret: + name: source-paradedb-superuser + key: password + sslMode: "require" + sslKeySecret: + name: source-paradedb-replication + key: tls.key + sslCertSecret: + name: source-paradedb-replication + key: tls.crt + +cluster: + instances: 2 + storage: + size: 256Mi + initdb: + database: mygooddb + +backups: + enabled: false diff --git a/charts/paradedb/test/postgresql-import/03-data_test-assert.yaml b/charts/paradedb/test/postgresql-import/03-data_test-assert.yaml new file mode 100644 index 000000000..04df941e4 --- /dev/null +++ b/charts/paradedb/test/postgresql-import/03-data_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test +status: + succeeded: 1 diff --git a/charts/paradedb/test/postgresql-import/03-data_test.yaml b/charts/paradedb/test/postgresql-import/03-data_test.yaml new file mode 100644 index 000000000..716f6e4b8 --- /dev/null +++ b/charts/paradedb/test/postgresql-import/03-data_test.yaml @@ -0,0 +1,24 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-test + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: import-paradedb-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + test "$(psql "${DB_URI}mygooddb" -t -c 'SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = $$mygoodtable$$)' --csv -q 2>/dev/null)" = "t" + test "$(psql "${DB_URI}mygooddb" -t -c 'SELECT EXISTS (SELECT FROM mygoodtable WHERE id = 314159265)' --csv -q 2>/dev/null)" = "t" diff --git a/charts/paradedb/test/postgresql-import/04-import-cluster-schema_only-assert.yaml b/charts/paradedb/test/postgresql-import/04-import-cluster-schema_only-assert.yaml new file mode 100644 index 000000000..9dd8bd128 --- /dev/null +++ b/charts/paradedb/test/postgresql-import/04-import-cluster-schema_only-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: import-schemaonly-paradedb +status: + readyInstances: 2 diff --git a/charts/paradedb/test/postgresql-import/04-import-cluster-schema_only.yaml b/charts/paradedb/test/postgresql-import/04-import-cluster-schema_only.yaml new file mode 100644 index 000000000..38f87cea3 --- /dev/null +++ b/charts/paradedb/test/postgresql-import/04-import-cluster-schema_only.yaml @@ -0,0 +1,31 @@ +type: postgresql +mode: "recovery" +recovery: + method: "import" + import: + type: "microservice" + databases: [ "mygooddb" ] + schemaOnly: true + source: + host: "source-paradedb-rw" + username: "postgres" + passwordSecret: + name: source-paradedb-superuser + key: password + sslMode: "require" + sslKeySecret: + name: source-paradedb-replication + key: tls.key + sslCertSecret: + name: source-paradedb-replication + key: tls.crt + +cluster: + instances: 2 + storage: + size: 256Mi + initdb: + database: mygooddb + +backups: + enabled: false diff --git a/charts/paradedb/test/postgresql-import/05-data_test-assert.yaml b/charts/paradedb/test/postgresql-import/05-data_test-assert.yaml new file mode 100644 index 000000000..b29534fcc --- /dev/null +++ b/charts/paradedb/test/postgresql-import/05-data_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-schemaonly +status: + succeeded: 1 diff --git a/charts/paradedb/test/postgresql-import/05-data_test.yaml b/charts/paradedb/test/postgresql-import/05-data_test.yaml new file mode 100644 index 000000000..5e1f05f6e --- /dev/null +++ b/charts/paradedb/test/postgresql-import/05-data_test.yaml @@ -0,0 +1,24 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-schemaonly +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-test + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: import-schemaonly-paradedb-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + test "$(psql "${DB_URI}mygooddb" -t -c 'SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = $$mygoodtable$$)' --csv -q 2>/dev/null)" = "t" + test "$(psql "${DB_URI}mygooddb" -t -c 'SELECT EXISTS (SELECT FROM mygoodtable WHERE id = 314159265)' --csv -q 2>/dev/null)" = "f" diff --git a/charts/paradedb/test/postgresql-import/chainsaw-test.yaml b/charts/paradedb/test/postgresql-import/chainsaw-test.yaml new file mode 100644 index 000000000..7ecf350d8 --- /dev/null +++ b/charts/paradedb/test/postgresql-import/chainsaw-test.yaml @@ -0,0 +1,97 @@ +## +# This is a test that provisions a regular (non CNPG) PostgreSQL cluster and attempts to perform a pg_basebackup recovery. +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: postgresql-import +spec: + timeouts: + apply: 1s + assert: 2m + cleanup: 1m + steps: + - name: Install the external PostgreSQL cluster + try: + - apply: + file: ./00-source-superuser-password.yaml + - script: + content: | + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./00-source-cluster.yaml \ + --wait \ + source ../../ + - assert: + file: ./00-source-cluster-assert.yaml + - apply: + file: ./01-data_write.yaml + - assert: + file: ./01-data_write-assert.yaml + - name: Install the import cluster + timeouts: + assert: 5m + try: + - script: + content: | + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./02-import-cluster.yaml \ + --wait \ + import ../../ + - assert: + file: ./02-import-cluster-assert.yaml + catch: + - describe: + apiVersion: postgresql.cnpg.io/v1 + kind: Cluster + - name: Verify the data exists + try: + - apply: + file: ./03-data_test.yaml + - assert: + file: ./03-data_test-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=data-test + - name: Install the schema-only import cluster + timeouts: + assert: 5m + try: + - script: + content: | + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./04-import-cluster-schema_only.yaml \ + --wait \ + import-schemaonly ../../ + - assert: + file: ./04-import-cluster-schema_only-assert.yaml + catch: + - describe: + apiVersion: postgresql.cnpg.io/v1 + kind: Cluster + - name: Verify only the schema exists + try: + - apply: + file: ./05-data_test.yaml + - assert: + file: ./05-data_test-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=data-test-schemaonly + - name: Cleanup + try: + - script: + content: | + helm uninstall --namespace $NAMESPACE source + helm uninstall --namespace $NAMESPACE import + helm uninstall --namespace $NAMESPACE import-schemaonly diff --git a/charts/paradedb/test/postgresql-pg_basebackup/00-source-cluster.yaml b/charts/paradedb/test/postgresql-pg_basebackup/00-source-cluster.yaml index c11fed595..5d0baa1c0 100644 --- a/charts/paradedb/test/postgresql-pg_basebackup/00-source-cluster.yaml +++ b/charts/paradedb/test/postgresql-pg_basebackup/00-source-cluster.yaml @@ -2,5 +2,7 @@ type: postgresql mode: "standalone" cluster: instances: 1 + storage: + size: 256Mi backups: - enabled: false \ No newline at end of file + enabled: false diff --git a/charts/paradedb/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml b/charts/paradedb/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml index 0042bd629..ea7083dbb 100644 --- a/charts/paradedb/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml +++ b/charts/paradedb/test/postgresql-pg_basebackup/02-pg_basebackup-cluster.yaml @@ -17,6 +17,8 @@ recovery: cluster: instances: 2 + storage: + size: 256Mi backups: - enabled: false \ No newline at end of file + enabled: false diff --git a/charts/paradedb/values.schema.json b/charts/paradedb/values.schema.json index c9130b534..b486722e6 100644 --- a/charts/paradedb/values.schema.json +++ b/charts/paradedb/values.schema.json @@ -424,6 +424,96 @@ } } }, + "import": { + "type": "object", + "properties": { + "databases": { + "type": "array" + }, + "postImportApplicationSQL": { + "type": "array" + }, + "roles": { + "type": "array" + }, + "schemaOnly": { + "type": "boolean" + }, + "source": { + "type": "object", + "properties": { + "database": { + "type": "string" + }, + "host": { + "type": "string" + }, + "passwordSecret": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "port": { + "type": "integer" + }, + "sslCertSecret": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "sslKeySecret": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "sslMode": { + "type": "string" + }, + "sslRootCertSecret": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "username": { + "type": "string" + } + } + }, + "type": { + "type": "string" + } + } + }, "method": { "type": "string" }, diff --git a/charts/paradedb/values.yaml b/charts/paradedb/values.yaml index 515db9627..6756e6ef8 100644 --- a/charts/paradedb/values.yaml +++ b/charts/paradedb/values.yaml @@ -28,7 +28,8 @@ recovery: # * `backup` - Recovers a CNPG cluster from a CNPG backup (PITR supported) Needs to be on the same cluster in the same namespace. # * `object_store` - Recovers a CNPG cluster from a barman object store (PITR supported). # * `pg_basebackup` - Recovers a CNPG cluster viaa streaming replication protocol. Useful if you want to - # migrate databases to CloudNativePG, even from outside Kubernetes. # TODO + # migrate databases to CloudNativePG, even from outside Kubernetes. + # * `import` - Import one or more databases from an existing Postgres cluster. method: backup ## -- Point in time recovery target. Specify one of the following: @@ -120,6 +121,44 @@ recovery: name: "" key: "" + # See: https://cloudnative-pg.io/documentation/1.24/cloudnative-pg.v1/#postgresql-cnpg-io-v1-Import + import: + # -- One of `microservice` or `monolith.` + # See: https://cloudnative-pg.io/documentation/1.24/database_import/#how-it-works + type: "microservice" + # -- Databases to import + databases: [] + # -- Roles to import + roles: [] + # -- List of SQL queries to be executed as a superuser in the application database right after is imported. + # To be used with extreme care. Only available in microservice type. + postImportApplicationSQL: [] + # -- When set to true, only the pre-data and post-data sections of pg_restore are invoked, avoiding data import. + schemaOnly: false + source: + host: "" + port: 5432 + username: "" + database: "paradedb" + sslMode: "verify-full" + passwordSecret: + # -- Whether to create a secret for the password + create: false + # -- Name of the secret containing the password + name: "" + # -- The key in the secret containing the password + key: "password" + # -- The password value to use when creating the secret + value: "" + sslKeySecret: + name: "" + key: "" + sslCertSecret: + name: "" + key: "" + sslRootCertSecret: + name: "" + key: "" cluster: # -- Number of instances From a5509af1192945472810cb5698dad9b69c2bba56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20No=C3=ABl?= <21990816+philippemnoel@users.noreply.github.com> Date: Thu, 7 Nov 2024 15:12:12 -0500 Subject: [PATCH 6/8] feat: Default to 0.12.0 (#56) --- charts/paradedb/Chart.yaml | 4 ++-- charts/paradedb/README.md | 2 +- charts/paradedb/examples/image-catalog-ref.yaml | 2 +- charts/paradedb/examples/image-catalog.yaml | 2 +- charts/paradedb/examples/paradedb.yaml | 2 +- charts/paradedb/values.yaml | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/charts/paradedb/Chart.yaml b/charts/paradedb/Chart.yaml index f8458a5e4..b55e6e166 100644 --- a/charts/paradedb/Chart.yaml +++ b/charts/paradedb/Chart.yaml @@ -20,8 +20,8 @@ icon: https://raw.githubusercontent.com/paradedb/paradedb/main/docs/logo/light.s type: application # The Chart version, set in the publish CI workflow from GitHub Actions Variables -# We default to v0.11.0 for testing and local development -version: 0.11.0 +# We default to v0.12.0 for testing and local development +version: 0.12.0 sources: - https://github.com/paradedb/charts diff --git a/charts/paradedb/README.md b/charts/paradedb/README.md index 67ada2d92..b6bca59c9 100644 --- a/charts/paradedb/README.md +++ b/charts/paradedb/README.md @@ -300,7 +300,7 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | recovery.secret.create | bool | `true` | Whether to create a secret for the backup credentials | | recovery.secret.name | string | `""` | Name of the backup credentials secret | | type | string | `"paradedb"` | Type of the CNPG database. Available types: * `paradedb` | -| version.paradedb | string | `"0.11.0"` | We default to v0.11.0 for testing and local development | +| version.paradedb | string | `"0.12.0"` | We default to v0.12.0 for testing and local development | | version.postgresql | string | `"16"` | PostgreSQL major version to use | | poolers[].name | string | `` | Name of the pooler resource | | poolers[].instances | number | `1` | The number of replicas we want | diff --git a/charts/paradedb/examples/image-catalog-ref.yaml b/charts/paradedb/examples/image-catalog-ref.yaml index f3f6bd8f8..04c3eeade 100644 --- a/charts/paradedb/examples/image-catalog-ref.yaml +++ b/charts/paradedb/examples/image-catalog-ref.yaml @@ -2,7 +2,7 @@ type: postgresql mode: standalone version: major: "16" - paradedb: "0.11.0" + paradedb: "0.12.0" cluster: instances: 1 imageCatalogRef: diff --git a/charts/paradedb/examples/image-catalog.yaml b/charts/paradedb/examples/image-catalog.yaml index c4bbc2114..d01e04225 100644 --- a/charts/paradedb/examples/image-catalog.yaml +++ b/charts/paradedb/examples/image-catalog.yaml @@ -2,7 +2,7 @@ type: postgresql mode: standalone version: major: "16" - paradedb: "0.11.0" + paradedb: "0.12.0" cluster: instances: 1 backups: diff --git a/charts/paradedb/examples/paradedb.yaml b/charts/paradedb/examples/paradedb.yaml index 4241445da..73dca88cc 100644 --- a/charts/paradedb/examples/paradedb.yaml +++ b/charts/paradedb/examples/paradedb.yaml @@ -2,7 +2,7 @@ type: paradedb mode: standalone version: postgresql: "16.3" - paradedb: "0.11.0" + paradedb: "0.12.0" cluster: instances: 1 backups: diff --git a/charts/paradedb/values.yaml b/charts/paradedb/values.yaml index 6756e6ef8..7f545db8f 100644 --- a/charts/paradedb/values.yaml +++ b/charts/paradedb/values.yaml @@ -12,8 +12,8 @@ version: # -- PostgreSQL major version to use postgresql: "16" # -- The ParadeDB version, set in the publish CI workflow from the latest paradedb/paradedb GitHub tag - # -- We default to v0.11.0 for testing and local development - paradedb: "0.11.0" + # -- We default to v0.12.0 for testing and local development + paradedb: "0.12.0" ### # -- Cluster mode of operation. Available modes: From c264c8c63e88b5c9a9c2e4a71f1254a2a9e3c6cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20No=C3=ABl?= <21990816+philippemnoel@users.noreply.github.com> Date: Thu, 7 Nov 2024 16:06:05 -0500 Subject: [PATCH 7/8] docs: Update docs following BYOC (#52) --- .prettierignore | 2 +- README.md | 63 +++++++++------------ charts/paradedb/README.md | 97 ++++++++++++++------------------ charts/paradedb/README.md.gotmpl | 95 +++++++++++-------------------- 4 files changed, 102 insertions(+), 155 deletions(-) diff --git a/.prettierignore b/.prettierignore index 070ddcc82..23f1068ec 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,5 +1,5 @@ /.github/actions/ /.github/workflows/lint.yml /.github/workflows/tests-*.yml -.github/workflows/tests-*.yaml +/.github/workflows/tests-*.yaml /charts/ diff --git a/README.md b/README.md index ea6587b07..a85e919cb 100644 --- a/README.md +++ b/README.md @@ -26,44 +26,45 @@ # ParadeDB Helm Chart -The [ParadeDB](https://github.com/paradedb/paradedb) Helm Chart is based on the official [CloudNativePG Helm Chart](https://cloudnative-pg.io/). CloudNativePG is a Kubernetes operator that manages the full lifecycle of a highly available PostgreSQL database cluster with a primary/standby architecture using Postgres streaming replication. +The [ParadeDB](https://github.com/paradedb/paradedb) Helm Chart is based on the official [CloudNativePG Helm Chart](https://cloudnative-pg.io/). CloudNativePG is a Kubernetes operator that manages the full lifecycle of a highly available PostgreSQL database cluster with a primary/standby architecture using Postgres streaming (physical) replication. Kubernetes, and specifically the CloudNativePG operator, is the recommended approach for deploying ParadeDB in production, with high availability. ParadeDB also provides a [Docker image](https://hub.docker.com/r/paradedb/paradedb) and [prebuilt binaries](https://github.com/paradedb/paradedb/releases) for Debian, Ubuntu and Red Hat Enterprise Linux. +The ParadeDB Helm Chart supports Postgres 13+ and ships with Postgres 16 by default. + The chart is also available on [Artifact Hub](https://artifacthub.io/packages/helm/paradedb/paradedb). -## Getting Started +## Usage -First, install [Helm](https://helm.sh/docs/intro/install/). The following steps assume you have a Kubernetes cluster running v1.25+. If you are testing locally, we recommend using [Minikube](https://minikube.sigs.k8s.io/docs/start/). +### ParadeDB Bring-Your-Own-Cloud (BYOC) -### Installing the Prometheus Stack +The most reliable way to run ParadeDB in production is with ParadeDB BYOC, an end-to-end managed solution that runs in the customer’s cloud account. It deploys on managed Kubernetes services and uses the ParadeDB Helm Chart. -The ParadeDB Helm chart supports monitoring via Prometheus and Grafana. To enable this, you need to have the Prometheus CRDs installed before installing the CloudNativePG operator. If you do not yet have the Prometheus CRDs installed on your Kubernetes cluster, you can install it with: +ParadeDB BYOC includes built-in integration with managed PostgreSQL services, such as AWS RDS, via logical replication. It also provides monitoring, logging and alerting through Prometheus and Grafana. The ParadeDB team manages the underlying infrastructure and lifecycle of the cluster. -```bash -helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -helm upgrade --atomic --install prometheus-community \ ---create-namespace \ ---namespace prometheus-community \ ---values https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml \ -prometheus-community/kube-prometheus-stack -``` +You can read more about the optimal architecture for running ParadeDB in production [here](https://docs.paradedb.com/deploy/overview) and you can contact sales [here](mailto:sales@paradedb.com). + +### Self-Hosted + +First, install [Helm](https://helm.sh/docs/intro/install/). The following steps assume you have a Kubernetes cluster running v1.25+. If you are testing locally, we recommend using [Minikube](https://minikube.sigs.k8s.io/docs/start/). -### Installing the CloudNativePG Operator +#### Monitoring -Skip this step if the CloudNativePG operator is already installed in your cluster. If you do not wish to monitor your cluster, omit the `--set` commands. +The ParadeDB Helm chart supports monitoring via Prometheus and Grafana. To enable monitoring, you need to have the Prometheus CRDs installed before installing the CloudNativePG operator. The Promotheus CRDs can be found [here](https://prometheus-community.github.io/helm-charts). + +#### Installing the CloudNativePG Operator + +Skip this step if the CloudNativePG operator is already installed in your cluster. For advanced CloudNativePG configuration and monitoring, please refer to the [CloudNativePG Cluster Chart documentation](https://github.com/cloudnative-pg/charts/blob/main/charts/cloudnative-pg/README.md#values). ```bash helm repo add cnpg https://cloudnative-pg.github.io/charts helm upgrade --atomic --install cnpg \ --create-namespace \ --namespace cnpg-system \ ---set monitoring.podMonitorEnabled=true \ ---set monitoring.grafanaDashboard.create=true \ cnpg/cloudnative-pg ``` -### Setting up a ParadeDB CNPG Cluster +#### Setting up a ParadeDB CNPG Cluster Create a `values.yaml` and configure it to your requirements. Here is a basic example: @@ -77,7 +78,7 @@ cluster: size: 256Mi ``` -Then, launch the ParadeDB cluster. If you do not wish to monitor your cluster, omit the `--set` command. +Then, launch the ParadeDB cluster. ```bash helm repo add paradedb https://paradedb.github.io/charts @@ -85,40 +86,28 @@ helm upgrade --atomic --install paradedb \ --namespace paradedb \ --create-namespace \ --values values.yaml \ ---set cluster.monitoring.enabled=true \ paradedb/paradedb ``` -If `--values values.yaml` is omitted, the default values will be used. For additional configuration options for the `values.yaml` file, including configuring backups and PgBouncer, please refer to the [ParadeDB Helm Chart documentation](https://artifacthub.io/packages/helm/paradedb/paradedb#values). For advanced cluster configuration options, please refer to the [CloudNativePG Cluster Chart documentation](charts/paradedb/README.md). +If `--values values.yaml` is omitted, the default values will be used. For advanced ParadeDB configuration and monitoring, please refer to the [ParadeDB Chart documentation](https://github.com/paradedb/charts/tree/dev/charts/paradedb#values). -### Connecting to a ParadeDB CNPG Cluster +#### Connecting to a ParadeDB CNPG Cluster -The command to connect to the primary instance of the cluster will be printed in your terminal. If you do not modify any settings, it will be: +You can launch a Bash shell inside a specific pod via: ```bash -kubectl --namespace paradedb exec --stdin --tty services/paradedb-rw -- bash +kubectl exec --stdin --tty -n paradedb -- bash ``` -This will launch a Bash shell inside the instance. You can connect to the ParadeDB database via `psql` with: +The primary is called `paradedb-1`. The replicas are called `paradedb-2` onwards depending on the number of replicas you configured. You can connect to the ParadeDB database with `psql` via: ```bash psql -d paradedb ``` -### Connecting to the Grafana Dashboard - -To connect to the Grafana dashboard for your cluster, we suggested port forwarding the Kubernetes service running Grafana to localhost: - -```bash -kubectl --namespace prometheus-community port-forward svc/prometheus-community-grafana 3000:80 -``` - -You can then access the Grafana dasbhoard at [http://localhost:3000/](http://localhost:3000/) using the credentials `admin` as username and `prom-operator` as password. These default credentials are -defined in the [`kube-stack-config.yaml`](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml) file used as the `values.yaml` file in [Installing the Prometheus CRDs](#installing-the-prometheus-stack) and can be modified by providing your own `values.yaml` file. - ## Development -To test changes to the Chart on a local Minikube cluster, follow the instructions from [Getting Started](#getting-started), replacing the `helm upgrade` step by the path to the directory of the modified `Chart.yaml`. +To test changes to the Chart on a local Minikube cluster, follow the instructions from [Self Hosted](#self-hosted) replacing the `helm upgrade` step by the path to the directory of the modified `Chart.yaml`. ```bash helm upgrade --atomic --install paradedb --namespace paradedb --create-namespace ./charts/paradedb diff --git a/charts/paradedb/README.md b/charts/paradedb/README.md index b6bca59c9..01a71a23c 100644 --- a/charts/paradedb/README.md +++ b/charts/paradedb/README.md @@ -1,43 +1,44 @@ -# ParadeDB CloudNativePG Cluster +# ParadeDB Helm Chart -The [ParadeDB](https://github.com/paradedb/paradedb) Helm Chart is based on the official [CloudNativePG Helm Chart](https://cloudnative-pg.io/). CloudNativePG is a Kubernetes operator that manages the full lifecycle of a highly available PostgreSQL database cluster with a primary/standby architecture using Postgres streaming replication. +The [ParadeDB](https://github.com/paradedb/paradedb) Helm Chart is based on the official [CloudNativePG Helm Chart](https://cloudnative-pg.io/). CloudNativePG is a Kubernetes operator that manages the full lifecycle of a highly available PostgreSQL database cluster with a primary/standby architecture using Postgres streaming (physical) replication. Kubernetes, and specifically the CloudNativePG operator, is the recommended approach for deploying ParadeDB in production, with high availability. ParadeDB also provides a [Docker image](https://hub.docker.com/r/paradedb/paradedb) and [prebuilt binaries](https://github.com/paradedb/paradedb/releases) for Debian, Ubuntu and Red Hat Enterprise Linux. +The ParadeDB Helm Chart supports Postgres 13+ and ships with Postgres 16 by default. + The chart is also available on [Artifact Hub](https://artifacthub.io/packages/helm/paradedb/paradedb). -## Getting Started +## Usage -First, install [Helm](https://helm.sh/docs/intro/install/). The following steps assume you have a Kubernetes cluster running v1.25+. If you are testing locally, we recommend using [Minikube](https://minikube.sigs.k8s.io/docs/start/). +### ParadeDB Bring-Your-Own-Cloud (BYOC) -### Installing the Prometheus Stack +The most reliable way to run ParadeDB in production is with ParadeDB BYOC, an end-to-end managed solution that runs in the customer’s cloud account. It deploys on managed Kubernetes services and uses the ParadeDB Helm Chart. -The ParadeDB Helm chart supports monitoring via Prometheus and Grafana. To enable this, you need to have the Prometheus CRDs installed before installing the CloudNativePG operator. If you do not yet have the Prometheus CRDs installed on your Kubernetes cluster, you can install it with: +ParadeDB BYOC includes built-in integration with managed PostgreSQL services, such as AWS RDS, via logical replication. It also provides monitoring, logging and alerting through Prometheus and Grafana. The ParadeDB team manages the underlying infrastructure and lifecycle of the cluster. -```bash -helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -helm upgrade --atomic --install prometheus-community \ ---create-namespace \ ---namespace prometheus-community \ ---values https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml \ -prometheus-community/kube-prometheus-stack -``` +You can read more about the optimal architecture for running ParadeDB in production [here](https://docs.paradedb.com/deploy/overview) and you can contact sales [here](mailto:sales@paradedb.com). + +### Self-Hosted + +First, install [Helm](https://helm.sh/docs/intro/install/). The following steps assume you have a Kubernetes cluster running v1.25+. If you are testing locally, we recommend using [Minikube](https://minikube.sigs.k8s.io/docs/start/). -### Installing the CloudNativePG Operator +#### Monitoring -Skip this step if the CloudNativePG operator is already installed in your cluster. If you do not wish to monitor your cluster, omit the `--set` commands. +The ParadeDB Helm chart supports monitoring via Prometheus and Grafana. To enable monitoring, you need to have the Prometheus CRDs installed before installing the CloudNativePG operator. The Promotheus CRDs can be found [here](https://prometheus-community.github.io/helm-charts). + +#### Installing the CloudNativePG Operator + +Skip this step if the CloudNativePG operator is already installed in your cluster. For advanced CloudNativePG configuration and monitoring, please refer to the [CloudNativePG Cluster Chart documentation](https://github.com/cloudnative-pg/charts/blob/main/charts/cloudnative-pg/README.md#values). ```bash helm repo add cnpg https://cloudnative-pg.github.io/charts helm upgrade --atomic --install cnpg \ --create-namespace \ --namespace cnpg-system \ ---set monitoring.podMonitorEnabled=true \ ---set monitoring.grafanaDashboard.create=true \ cnpg/cloudnative-pg ``` -### Setting up a ParadeDB CNPG Cluster +#### Setting up a ParadeDB CNPG Cluster Create a `values.yaml` and configure it to your requirements. Here is a basic example: @@ -51,7 +52,7 @@ cluster: size: 256Mi ``` -Then, launch the ParadeDB cluster. If you do not wish to monitor your cluster, omit the `--set` command. +Then, launch the ParadeDB cluster. ```bash helm repo add paradedb https://paradedb.github.io/charts @@ -59,52 +60,40 @@ helm upgrade --atomic --install paradedb \ --namespace paradedb \ --create-namespace \ --values values.yaml \ ---set cluster.monitoring.enabled=true \ paradedb/paradedb ``` -If `--values values.yaml` is omitted, the default values will be used. For additional configuration options for the `values.yaml` file, including configuring backups and PgBouncer, please refer to the [ParadeDB Helm Chart documentation](https://artifacthub.io/packages/helm/paradedb/paradedb#values). For advanced cluster configuration options, please refer to the [CloudNativePG Cluster Chart documentation](charts/paradedb/README.md). +If `--values values.yaml` is omitted, the default values will be used. For advanced ParadeDB configuration and monitoring, please refer to the [ParadeDB Chart documentation](https://github.com/paradedb/charts/tree/dev/charts/paradedb#values). -### Connecting to a ParadeDB CNPG Cluster +#### Connecting to a ParadeDB CNPG Cluster -The command to connect to the primary instance of the cluster will be printed in your terminal. If you do not modify any settings, it will be: +You can launch a Bash shell inside a specific pod via: ```bash -kubectl --namespace paradedb exec --stdin --tty services/paradedb-rw -- bash +kubectl exec --stdin --tty -n paradedb -- bash ``` -This will launch a Bash shell inside the instance. You can connect to the ParadeDB database via `psql` with: +The primary is called `paradedb-1`. The replicas are called `paradedb-2` onwards depending on the number of replicas you configured. You can connect to the ParadeDB database with `psql` via: ```bash psql -d paradedb ``` -### Connecting to the Grafana Dashboard - -To connect to the Grafana dashboard for your cluster, we suggested port forwarding the Kubernetes service running Grafana to localhost: - -```bash -kubectl --namespace prometheus-community port-forward svc/prometheus-community-grafana 3000:80 -``` - -You can then access the Grafana dasbhoard at [http://localhost:3000/](http://localhost:3000/) using the credentials `admin` as username and `prom-operator` as password. These default credentials are -defined in the [`kube-stack-config.yaml`](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml) file used as the `values.yaml` file in [Installing the Prometheus CRDs](#installing-the-prometheus-stack) and can be modified by providing your own `values.yaml` file. - ## Development -To test changes to the Chart on a local Minikube cluster, follow the instructions from [Getting Started](#getting-started), replacing the `helm upgrade` step by the path to the directory of the modified `Chart.yaml`. +To test changes to the Chart on a local Minikube cluster, follow the instructions from [Self Hosted](#self-hosted) replacing the `helm upgrade` step by the path to the directory of the modified `Chart.yaml`. ```bash helm upgrade --atomic --install paradedb --namespace paradedb --create-namespace ./charts/paradedb ``` -## Cluster Configuration +## Advanced Cluster Configuration -### Database types +### Database Types To use the ParadeDB Helm Chart, specify `paradedb` via the `type` parameter. -### Modes of operation +### Modes of Operation The chart has three modes of operation. These are configured via the `mode` parameter: @@ -112,7 +101,7 @@ The chart has three modes of operation. These are configured via the `mode` para * `replica` - Creates a replica cluster from an existing CNPG cluster. **_Note_ that this mode is not yet supported.** * `recovery` - Recovers a CNPG cluster from a backup, object store or via pg_basebackup. -### Backup configuration +### Backup Configuration CNPG implements disaster recovery via [Barman](https://pgbarman.org/). The following section configures the barman object store where backups will be stored. Barman performs backups of the cluster filesystem base backup and WALs. Both are @@ -302,18 +291,18 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | type | string | `"paradedb"` | Type of the CNPG database. Available types: * `paradedb` | | version.paradedb | string | `"0.12.0"` | We default to v0.12.0 for testing and local development | | version.postgresql | string | `"16"` | PostgreSQL major version to use | -| poolers[].name | string | `` | Name of the pooler resource | -| poolers[].instances | number | `1` | The number of replicas we want | -| poolers[].type | [PoolerType][PoolerType] | `rw` | Type of service to forward traffic to. Default: `rw`. | -| poolers[].poolMode | [PgBouncerPoolMode][PgBouncerPoolMode] | `session` | The pool mode. Default: `session`. | -| poolers[].authQuerySecret | [LocalObjectReference][LocalObjectReference] | `{}` | The credentials of the user that need to be used for the authentication query. | -| poolers[].authQuery | string | `{}` | The credentials of the user that need to be used for the authentication query. | -| poolers[].parameters | map[string]string | `{}` | Additional parameters to be passed to PgBouncer - please check the CNPG documentation for a list of options you can configure | -| poolers[].template | [PodTemplateSpec][PodTemplateSpec] | `{}` | The template of the Pod to be created | -| poolers[].template | [ServiceTemplateSpec][ServiceTemplateSpec] | `{}` | Template for the Service to be created | -| poolers[].pg_hba | []string | `{}` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | -| poolers[].monitoring.enabled | bool | `false` | Whether to enable monitoring for the Pooler. | -| poolers[].monitoring.podMonitor.enabled | bool | `true` | Create a podMonitor for the Pooler. | +| poolers[].name | string | `` | Name of the pooler resource | +| poolers[].instances | number | `1` | The number of replicas we want | +| poolers[].type | [PoolerType][PoolerType] | `rw` | Type of service to forward traffic to. Default: `rw`. | +| poolers[].poolMode | [PgBouncerPoolMode][PgBouncerPoolMode] | `session` | The pool mode. Default: `session`. | +| poolers[].authQuerySecret | [LocalObjectReference][LocalObjectReference] | `{}` | The credentials of the user that need to be used for the authentication query. | +| poolers[].authQuery | string | `{}` | The credentials of the user that need to be used for the authentication query. | +| poolers[].parameters | map[string]string | `{}` | Additional parameters to be passed to PgBouncer - please check the CNPG documentation for a list of options you can configure | +| poolers[].template | [PodTemplateSpec][PodTemplateSpec] | `{}` | The template of the Pod to be created | +| poolers[].template | [ServiceTemplateSpec][ServiceTemplateSpec] | `{}` | Template for the Service to be created | +| poolers[].pg_hba | []string | `{}` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | +| poolers[].monitoring.enabled | bool | `false` | Whether to enable monitoring for the Pooler. | +| poolers[].monitoring.podMonitor.enabled | bool | `true` | Create a podMonitor for the Pooler. | ## Maintainers diff --git a/charts/paradedb/README.md.gotmpl b/charts/paradedb/README.md.gotmpl index c40de8dc9..5b2b50ef7 100644 --- a/charts/paradedb/README.md.gotmpl +++ b/charts/paradedb/README.md.gotmpl @@ -1,47 +1,44 @@ -# ParadeDB CloudNativePG Cluster +# ParadeDB Helm Chart +The [ParadeDB](https://github.com/paradedb/paradedb) Helm Chart is based on the official [CloudNativePG Helm Chart](https://cloudnative-pg.io/). CloudNativePG is a Kubernetes operator that manages the full lifecycle of a highly available PostgreSQL database cluster with a primary/standby architecture using Postgres streaming (physical) replication. -{{ template "chart.deprecationWarning" . }} +Kubernetes, and specifically the CloudNativePG operator, is the recommended approach for deploying ParadeDB in production, with high availability. ParadeDB also provides a [Docker image](https://hub.docker.com/r/paradedb/paradedb) and [prebuilt binaries](https://github.com/paradedb/paradedb/releases) for Debian, Ubuntu and Red Hat Enterprise Linux. +The ParadeDB Helm Chart supports Postgres 13+ and ships with Postgres 16 by default. -The [ParadeDB](https://github.com/paradedb/paradedb) Helm Chart is based on the official [CloudNativePG Helm Chart](https://cloudnative-pg.io/). CloudNativePG is a Kubernetes operator that manages the full lifecycle of a highly available PostgreSQL database cluster with a primary/standby architecture using Postgres streaming replication. +The chart is also available on [Artifact Hub](https://artifacthub.io/packages/helm/paradedb/paradedb). -Kubernetes, and specifically the CloudNativePG operator, is the recommended approach for deploying ParadeDB in production, with high availability. ParadeDB also provides a [Docker image](https://hub.docker.com/r/paradedb/paradedb) and [prebuilt binaries](https://github.com/paradedb/paradedb/releases) for Debian, Ubuntu and Red Hat Enterprise Linux. +## Usage -The chart is also available on [Artifact Hub](https://artifacthub.io/packages/helm/paradedb/paradedb). +### ParadeDB Bring-Your-Own-Cloud (BYOC) -## Getting Started +The most reliable way to run ParadeDB in production is with ParadeDB BYOC, an end-to-end managed solution that runs in the customer’s cloud account. It deploys on managed Kubernetes services and uses the ParadeDB Helm Chart. -First, install [Helm](https://helm.sh/docs/intro/install/). The following steps assume you have a Kubernetes cluster running v1.25+. If you are testing locally, we recommend using [Minikube](https://minikube.sigs.k8s.io/docs/start/). +ParadeDB BYOC includes built-in integration with managed PostgreSQL services, such as AWS RDS, via logical replication. It also provides monitoring, logging and alerting through Prometheus and Grafana. The ParadeDB team manages the underlying infrastructure and lifecycle of the cluster. -### Installing the Prometheus Stack +You can read more about the optimal architecture for running ParadeDB in production [here](https://docs.paradedb.com/deploy/overview) and you can contact sales [here](mailto:sales@paradedb.com). -The ParadeDB Helm chart supports monitoring via Prometheus and Grafana. To enable this, you need to have the Prometheus CRDs installed before installing the CloudNativePG operator. If you do not yet have the Prometheus CRDs installed on your Kubernetes cluster, you can install it with: +### Self-Hosted -```bash -helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -helm upgrade --atomic --install prometheus-community \ ---create-namespace \ ---namespace prometheus-community \ ---values https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml \ -prometheus-community/kube-prometheus-stack -``` +First, install [Helm](https://helm.sh/docs/intro/install/). The following steps assume you have a Kubernetes cluster running v1.25+. If you are testing locally, we recommend using [Minikube](https://minikube.sigs.k8s.io/docs/start/). + +#### Monitoring + +The ParadeDB Helm chart supports monitoring via Prometheus and Grafana. To enable monitoring, you need to have the Prometheus CRDs installed before installing the CloudNativePG operator. The Promotheus CRDs can be found [here](https://prometheus-community.github.io/helm-charts). -### Installing the CloudNativePG Operator +#### Installing the CloudNativePG Operator -Skip this step if the CloudNativePG operator is already installed in your cluster. If you do not wish to monitor your cluster, omit the `--set` commands. +Skip this step if the CloudNativePG operator is already installed in your cluster. For advanced CloudNativePG configuration and monitoring, please refer to the [CloudNativePG Cluster Chart documentation](https://github.com/cloudnative-pg/charts/blob/main/charts/cloudnative-pg/README.md#values). ```bash helm repo add cnpg https://cloudnative-pg.github.io/charts helm upgrade --atomic --install cnpg \ --create-namespace \ --namespace cnpg-system \ ---set monitoring.podMonitorEnabled=true \ ---set monitoring.grafanaDashboard.create=true \ cnpg/cloudnative-pg ``` -### Setting up a ParadeDB CNPG Cluster +#### Setting up a ParadeDB CNPG Cluster Create a `values.yaml` and configure it to your requirements. Here is a basic example: @@ -55,7 +52,7 @@ cluster: size: 256Mi ``` -Then, launch the ParadeDB cluster. If you do not wish to monitor your cluster, omit the `--set` command. +Then, launch the ParadeDB cluster. ```bash helm repo add paradedb https://paradedb.github.io/charts @@ -63,52 +60,40 @@ helm upgrade --atomic --install paradedb \ --namespace paradedb \ --create-namespace \ --values values.yaml \ ---set cluster.monitoring.enabled=true \ paradedb/paradedb ``` -If `--values values.yaml` is omitted, the default values will be used. For additional configuration options for the `values.yaml` file, including configuring backups and PgBouncer, please refer to the [ParadeDB Helm Chart documentation](https://artifacthub.io/packages/helm/paradedb/paradedb#values). For advanced cluster configuration options, please refer to the [CloudNativePG Cluster Chart documentation](charts/paradedb/README.md). +If `--values values.yaml` is omitted, the default values will be used. For advanced ParadeDB configuration and monitoring, please refer to the [ParadeDB Chart documentation](https://github.com/paradedb/charts/tree/dev/charts/paradedb#values). -### Connecting to a ParadeDB CNPG Cluster +#### Connecting to a ParadeDB CNPG Cluster -The command to connect to the primary instance of the cluster will be printed in your terminal. If you do not modify any settings, it will be: +You can launch a Bash shell inside a specific pod via: ```bash -kubectl --namespace paradedb exec --stdin --tty services/paradedb-rw -- bash +kubectl exec --stdin --tty -n paradedb -- bash ``` -This will launch a Bash shell inside the instance. You can connect to the ParadeDB database via `psql` with: +The primary is called `paradedb-1`. The replicas are called `paradedb-2` onwards depending on the number of replicas you configured. You can connect to the ParadeDB database with `psql` via: ```bash psql -d paradedb ``` -### Connecting to the Grafana Dashboard - -To connect to the Grafana dashboard for your cluster, we suggested port forwarding the Kubernetes service running Grafana to localhost: - -```bash -kubectl --namespace prometheus-community port-forward svc/prometheus-community-grafana 3000:80 -``` - -You can then access the Grafana dasbhoard at [http://localhost:3000/](http://localhost:3000/) using the credentials `admin` as username and `prom-operator` as password. These default credentials are -defined in the [`kube-stack-config.yaml`](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml) file used as the `values.yaml` file in [Installing the Prometheus CRDs](#installing-the-prometheus-stack) and can be modified by providing your own `values.yaml` file. - ## Development -To test changes to the Chart on a local Minikube cluster, follow the instructions from [Getting Started](#getting-started), replacing the `helm upgrade` step by the path to the directory of the modified `Chart.yaml`. +To test changes to the Chart on a local Minikube cluster, follow the instructions from [Self Hosted](#self-hosted) replacing the `helm upgrade` step by the path to the directory of the modified `Chart.yaml`. ```bash helm upgrade --atomic --install paradedb --namespace paradedb --create-namespace ./charts/paradedb ``` -## Cluster Configuration +## Advanced Cluster Configuration -### Database types +### Database Types To use the ParadeDB Helm Chart, specify `paradedb` via the `type` parameter. -### Modes of operation +### Modes of Operation The chart has three modes of operation. These are configured via the `mode` parameter: @@ -116,7 +101,7 @@ The chart has three modes of operation. These are configured via the `mode` para * `replica` - Creates a replica cluster from an existing CNPG cluster. **_Note_ that this mode is not yet supported.** * `recovery` - Recovers a CNPG cluster from a backup, object store or via pg_basebackup. -### Backup configuration +### Backup Configuration CNPG implements disaster recovery via [Barman](https://pgbarman.org/). The following section configures the barman object store where backups will be stored. Barman performs backups of the cluster filesystem base backup and WALs. Both are @@ -153,28 +138,12 @@ There is a separate document outlining the recovery procedure here: **[Recovery] There are several configuration examples in the [examples](examples) directory. Refer to them for a basic setup and refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentation/current/) for more advanced configurations. - -{{ template "chart.requirementsSection" . }} - +## Values {{ template "chart.valuesSection" . }} -| poolers[].name | string | `` | Name of the pooler resource | -| poolers[].instances | number | `1` | The number of replicas we want | -| poolers[].type | [PoolerType][PoolerType] | `rw` | Type of service to forward traffic to. Default: `rw`. | -| poolers[].poolMode | [PgBouncerPoolMode][PgBouncerPoolMode] | `session` | The pool mode. Default: `session`. | -| poolers[].authQuerySecret | [LocalObjectReference][LocalObjectReference] | `{}` | The credentials of the user that need to be used for the authentication query. | -| poolers[].authQuery | string | `{}` | The credentials of the user that need to be used for the authentication query. | -| poolers[].parameters | map[string]string | `{}` | Additional parameters to be passed to PgBouncer - please check the CNPG documentation for a list of options you can configure | -| poolers[].template | [PodTemplateSpec][PodTemplateSpec] | `{}` | The template of the Pod to be created | -| poolers[].template | [ServiceTemplateSpec][ServiceTemplateSpec] | `{}` | Template for the Service to be created | -| poolers[].pg_hba | []string | `{}` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | -| poolers[].monitoring.enabled | bool | `false` | Whether to enable monitoring for the Pooler. | -| poolers[].monitoring.podMonitor.enabled | bool | `true` | Create a podMonitor for the Pooler. | -{{ template "chart.maintainersSection" . }} - -{{ template "helm-docs.versionFooter" . }} +{{ template "chart.maintainersSection" . }} ## License From 542548b9cdc54250827965e840e9cb5b62fc0afe Mon Sep 17 00:00:00 2001 From: Itay Grudev Date: Tue, 12 Nov 2024 17:21:23 +0200 Subject: [PATCH 8/8] ParadeDB Enterprise --- charts/paradedb/README.md | 33 +++++++------ charts/paradedb/README.md.gotmpl | 14 +++++- charts/paradedb/templates/_bootstrap.tpl | 6 +-- charts/paradedb/templates/_helpers.tpl | 6 +++ charts/paradedb/templates/cluster.yaml | 2 +- ...01-paradedb-NCC-1701-D_cluster-assert.yaml | 6 +++ .../01-paradedb-NCC-1701-D_cluster.yaml | 13 ++++++ .../02-paradedb_test-assert.yaml | 6 +++ .../paradedb-enterprise/02-paradedb_test.yaml | 46 +++++++++++++++++++ .../paradedb-enterprise/chainsaw-test.yaml | 45 ++++++++++++++++++ charts/paradedb/values.yaml | 1 + 11 files changed, 158 insertions(+), 20 deletions(-) create mode 100644 charts/paradedb/test/paradedb-enterprise/01-paradedb-NCC-1701-D_cluster-assert.yaml create mode 100644 charts/paradedb/test/paradedb-enterprise/01-paradedb-NCC-1701-D_cluster.yaml create mode 100644 charts/paradedb/test/paradedb-enterprise/02-paradedb_test-assert.yaml create mode 100644 charts/paradedb/test/paradedb-enterprise/02-paradedb_test.yaml create mode 100644 charts/paradedb/test/paradedb-enterprise/chainsaw-test.yaml diff --git a/charts/paradedb/README.md b/charts/paradedb/README.md index 01a71a23c..191493281 100644 --- a/charts/paradedb/README.md +++ b/charts/paradedb/README.md @@ -91,7 +91,19 @@ helm upgrade --atomic --install paradedb --namespace paradedb --create-namespace ### Database Types -To use the ParadeDB Helm Chart, specify `paradedb` via the `type` parameter. +To create a ParadeDBcluster, you must specify either `paradedb` or `paradedb-enterprise` via the `type` parameter. + +> [!IMPORTANT] +> When using `paradedb-enterprise` you must also specify `cluster.imagePullSecrets` containing the Docker registry credentials. +> You can create one with: +> kubectl -n NAMESPACE create secret docker-registry paradedb-enterprise-registry-cred --docker-server="https://index.docker.io/v1/" --docker-username="USERNAME" --docker-password="ACCESS_TOKEN" +> Then you need to set: +> ```yaml +> type: paradedb-enterprise +> cluster: +> imagePullSecrets: +> - name: paradedb-enterprise-registry-cred +> ``` ### Modes of Operation @@ -140,6 +152,8 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat ## Values +## Values + | Key | Type | Default | Description | |-----|------|---------|-------------| | backups.azure.connectionString | string | `""` | | @@ -201,7 +215,7 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | cluster.monitoring.prometheusRule.excludeRules | list | `[]` | Exclude specified rules | | cluster.postgresGID | int | `-1` | The GID of the postgres user inside the image, defaults to 26 | | cluster.postgresUID | int | `-1` | The UID of the postgres user inside the image, defaults to 26 | -| cluster.postgresql.parameters | object | `{}` | PostgreSQL configuration options (postgresql.conf) | +| cluster.postgresql.parameters | object | `{"cron.database_name":"postgres"}` | PostgreSQL configuration options (postgresql.conf) | | cluster.postgresql.pg_hba | list | `[]` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | | cluster.postgresql.pg_ident | list | `[]` | PostgreSQL User Name Maps rules (lines to be appended to the pg_ident.conf file) | | cluster.postgresql.shared_preload_libraries | list | `[]` | Lists of shared preload libraries to add to the default ones | @@ -210,6 +224,7 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | cluster.priorityClassName | string | `""` | | | cluster.resources | object | `{}` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. We strongly advise you use the same setting for limits and requests so that your cluster pods are given a Guaranteed QoS. See: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/ | | cluster.roles | list | `[]` | This feature enables declarative management of existing roles, as well as the creation of new roles if they are not already present in the database. See: https://cloudnative-pg.io/documentation/current/declarative_role_management/ | +| cluster.serviceAccountTemplate | object | `{}` | Configure the generation of the service account | | cluster.storage.size | string | `"8Gi"` | | | cluster.storage.storageClass | string | `""` | | | cluster.superuserSecret | string | `""` | | @@ -288,21 +303,9 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | recovery.s3.secretKey | string | `""` | | | recovery.secret.create | bool | `true` | Whether to create a secret for the backup credentials | | recovery.secret.name | string | `""` | Name of the backup credentials secret | -| type | string | `"paradedb"` | Type of the CNPG database. Available types: * `paradedb` | +| type | string | `"paradedb"` | Type of the CNPG database. Available types: * `paradedb` * `paradedb-enterprise` | | version.paradedb | string | `"0.12.0"` | We default to v0.12.0 for testing and local development | | version.postgresql | string | `"16"` | PostgreSQL major version to use | -| poolers[].name | string | `` | Name of the pooler resource | -| poolers[].instances | number | `1` | The number of replicas we want | -| poolers[].type | [PoolerType][PoolerType] | `rw` | Type of service to forward traffic to. Default: `rw`. | -| poolers[].poolMode | [PgBouncerPoolMode][PgBouncerPoolMode] | `session` | The pool mode. Default: `session`. | -| poolers[].authQuerySecret | [LocalObjectReference][LocalObjectReference] | `{}` | The credentials of the user that need to be used for the authentication query. | -| poolers[].authQuery | string | `{}` | The credentials of the user that need to be used for the authentication query. | -| poolers[].parameters | map[string]string | `{}` | Additional parameters to be passed to PgBouncer - please check the CNPG documentation for a list of options you can configure | -| poolers[].template | [PodTemplateSpec][PodTemplateSpec] | `{}` | The template of the Pod to be created | -| poolers[].template | [ServiceTemplateSpec][ServiceTemplateSpec] | `{}` | Template for the Service to be created | -| poolers[].pg_hba | []string | `{}` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | -| poolers[].monitoring.enabled | bool | `false` | Whether to enable monitoring for the Pooler. | -| poolers[].monitoring.podMonitor.enabled | bool | `true` | Create a podMonitor for the Pooler. | ## Maintainers diff --git a/charts/paradedb/README.md.gotmpl b/charts/paradedb/README.md.gotmpl index 5b2b50ef7..97ae7dc40 100644 --- a/charts/paradedb/README.md.gotmpl +++ b/charts/paradedb/README.md.gotmpl @@ -91,7 +91,19 @@ helm upgrade --atomic --install paradedb --namespace paradedb --create-namespace ### Database Types -To use the ParadeDB Helm Chart, specify `paradedb` via the `type` parameter. +To create a ParadeDBcluster, you must specify either `paradedb` or `paradedb-enterprise` via the `type` parameter. + +> [!IMPORTANT] +> When using `paradedb-enterprise` you must also specify `cluster.imagePullSecrets` containing the Docker registry credentials. +> You can create one with: +> kubectl -n NAMESPACE create secret docker-registry paradedb-enterprise-registry-cred --docker-server="https://index.docker.io/v1/" --docker-username="USERNAME" --docker-password="ACCESS_TOKEN" +> Then you need to set: +> ```yaml +> type: paradedb-enterprise +> cluster: +> imagePullSecrets: +> - name: paradedb-enterprise-registry-cred +> ``` ### Modes of Operation diff --git a/charts/paradedb/templates/_bootstrap.tpl b/charts/paradedb/templates/_bootstrap.tpl index f40643c66..bb3886a3c 100644 --- a/charts/paradedb/templates/_bootstrap.tpl +++ b/charts/paradedb/templates/_bootstrap.tpl @@ -11,7 +11,7 @@ bootstrap: owner: {{ tpl .Values.cluster.initdb.owner . }} {{- end }} postInitSQL: - {{- if eq .Values.type "paradedb" }} + {{- if or (eq .Values.type "paradedb") (eq .Values.type "paradedb-enterprise") }} - CREATE EXTENSION IF NOT EXISTS pg_cron; {{- end }} {{- with .Values.cluster.initdb }} @@ -20,7 +20,7 @@ bootstrap: {{- end -}} {{- end }} postInitApplicationSQL: - {{- if eq .Values.type "paradedb" }} + {{- if or (eq .Values.type "paradedb") (eq .Values.type "paradedb-enterprise") }} - CREATE EXTENSION IF NOT EXISTS pg_search; - CREATE EXTENSION IF NOT EXISTS pg_analytics; - CREATE EXTENSION IF NOT EXISTS pg_ivm; @@ -37,7 +37,7 @@ bootstrap: {{- end -}} {{- end }} postInitTemplateSQL: - {{- if eq .Values.type "paradedb" }} + {{- if or (eq .Values.type "paradedb") (eq .Values.type "paradedb-enterprise") }} - CREATE EXTENSION IF NOT EXISTS pg_search; - CREATE EXTENSION IF NOT EXISTS pg_analytics; - CREATE EXTENSION IF NOT EXISTS pg_ivm; diff --git a/charts/paradedb/templates/_helpers.tpl b/charts/paradedb/templates/_helpers.tpl index 041e90353..3a225f8d5 100644 --- a/charts/paradedb/templates/_helpers.tpl +++ b/charts/paradedb/templates/_helpers.tpl @@ -69,6 +69,8 @@ If a custom imageName is available, use it, otherwise use the defaults based on {{- printf "ghcr.io/cloudnative-pg/postgresql:%s" .Values.version.postgresql -}} {{- else if eq .Values.type "paradedb" -}} {{- printf "paradedb/paradedb:%s-v%s" .Values.version.postgresql .Values.version.paradedb -}} + {{- else if eq .Values.type "paradedb-enterprise" -}} + {{- printf "paradedb/paradedb-enterprise:%s-v%s" .Values.version.postgresql .Values.version.paradedb -}} {{- else -}} {{ fail "Invalid cluster type!" }} {{- end }} @@ -103,6 +105,8 @@ Postgres UID {{- .Values.cluster.postgresUID }} {{- else if eq .Values.type "paradedb" -}} {{- 999 -}} + {{- else if eq .Values.type "paradedb-enterprise" -}} + {{- 999 -}} {{- else -}} {{- 26 -}} {{- end -}} @@ -116,6 +120,8 @@ Postgres GID {{- .Values.cluster.postgresGID }} {{- else if eq .Values.type "paradedb" -}} {{- 999 -}} + {{- else if eq .Values.type "paradedb-enterprise" -}} + {{- 999 -}} {{- else -}} {{- 26 -}} {{- end -}} diff --git a/charts/paradedb/templates/cluster.yaml b/charts/paradedb/templates/cluster.yaml index c6d13b930..8359c134c 100644 --- a/charts/paradedb/templates/cluster.yaml +++ b/charts/paradedb/templates/cluster.yaml @@ -53,7 +53,7 @@ spec: {{ end }} postgresql: shared_preload_libraries: - {{- if eq .Values.type "paradedb" }} + {{- if or (eq .Values.type "paradedb") (eq .Values.type "paradedb-enterprise") }} - pg_search - pg_analytics - pg_cron diff --git a/charts/paradedb/test/paradedb-enterprise/01-paradedb-NCC-1701-D_cluster-assert.yaml b/charts/paradedb/test/paradedb-enterprise/01-paradedb-NCC-1701-D_cluster-assert.yaml new file mode 100644 index 000000000..75c7d8771 --- /dev/null +++ b/charts/paradedb/test/paradedb-enterprise/01-paradedb-NCC-1701-D_cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: paradedb-ncc-1701-d +status: + readyInstances: 1 diff --git a/charts/paradedb/test/paradedb-enterprise/01-paradedb-NCC-1701-D_cluster.yaml b/charts/paradedb/test/paradedb-enterprise/01-paradedb-NCC-1701-D_cluster.yaml new file mode 100644 index 000000000..cc0d6fdd1 --- /dev/null +++ b/charts/paradedb/test/paradedb-enterprise/01-paradedb-NCC-1701-D_cluster.yaml @@ -0,0 +1,13 @@ +type: paradedb-enterprise +mode: standalone + +cluster: + instances: 1 + storage: + size: 256Mi + imagePullSecrets: + - name: paradedb-enterprise-registry-cred + + +backups: + enabled: false diff --git a/charts/paradedb/test/paradedb-enterprise/02-paradedb_test-assert.yaml b/charts/paradedb/test/paradedb-enterprise/02-paradedb_test-assert.yaml new file mode 100644 index 000000000..34cf73f0c --- /dev/null +++ b/charts/paradedb/test/paradedb-enterprise/02-paradedb_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: paradedb-enterprise-test +status: + succeeded: 1 diff --git a/charts/paradedb/test/paradedb-enterprise/02-paradedb_test.yaml b/charts/paradedb/test/paradedb-enterprise/02-paradedb_test.yaml new file mode 100644 index 000000000..601918280 --- /dev/null +++ b/charts/paradedb/test/paradedb-enterprise/02-paradedb_test.yaml @@ -0,0 +1,46 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: paradedb-enterprise-test +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-test + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: paradedb-ncc-1701-d-app + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + psql "$DB_URI" <<-EOSQL + CALL paradedb.create_bm25_test_table( + schema_name => 'public', + table_name => 'mock_items' + ); + CALL paradedb.create_bm25( + index_name => 'search_idx', + table_name => 'mock_items', + key_field => 'id', + text_fields => paradedb.field('description') || paradedb.field('category'), + numeric_fields => paradedb.field('rating'), + boolean_fields => paradedb.field('in_stock'), + datetime_fields => paradedb.field('created_at'), + json_fields => paradedb.field('metadata'), + range_fields => paradedb.field('weight_range') + ); + EOSQL + RESULT=$(psql "$DB_URI" -t) <<-EOSQL + SELECT description + FROM mock_items + WHERE description @@@ '"bluetooth speaker"~1' + LIMIT 1; + EOSQL + echo -$RESULT- + test "$RESULT" = " Bluetooth-enabled speaker" diff --git a/charts/paradedb/test/paradedb-enterprise/chainsaw-test.yaml b/charts/paradedb/test/paradedb-enterprise/chainsaw-test.yaml new file mode 100644 index 000000000..2aa74fe67 --- /dev/null +++ b/charts/paradedb/test/paradedb-enterprise/chainsaw-test.yaml @@ -0,0 +1,45 @@ +## +# This test sets up a ParadeDB Enterprise Cluster and ensures that ParadeDB extensions are available +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: paradedb +spec: + timeouts: + apply: 1s + assert: 7m + cleanup: 1m + steps: + - name: Install a standalone ParadeDB CNPG Cluster + try: + - script: + content: | + kubectl -n $NAMESPACE create secret docker-registry paradedb-enterprise-registry-cred --docker-server="https://index.docker.io/v1/" --docker-username="paradedb" --docker-password="$PARADEDB_ENTERPRISE_DOCKER_PAT" + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./01-paradedb-NCC-1701-D_cluster.yaml \ + --wait \ + paradedb-ncc-1701-d ../../ + - assert: + file: ./01-paradedb-NCC-1701-D_cluster-assert.yaml + - name: Verify ParadeDB extensions are installed + timeouts: + apply: 1s + assert: 30s + try: + - apply: + file: 02-paradedb_test.yaml + - assert: + file: 02-paradedb_test-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=data-test + - name: Cleanup + try: + - script: + content: | + helm uninstall --namespace $NAMESPACE paradedb-ncc-1701-d diff --git a/charts/paradedb/values.yaml b/charts/paradedb/values.yaml index 7f545db8f..57fddf2d6 100644 --- a/charts/paradedb/values.yaml +++ b/charts/paradedb/values.yaml @@ -6,6 +6,7 @@ fullnameOverride: "" ### # -- Type of the CNPG database. Available types: # * `paradedb` +# * `paradedb-enterprise` type: paradedb version: