diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml
index b24f7f16db..4c0e929786 100644
--- a/.github/ISSUE_TEMPLATE/bug.yml
+++ b/.github/ISSUE_TEMPLATE/bug.yml
@@ -48,12 +48,11 @@ body:
label: Version
description: What is the version of CloudNativePG you are running?
options:
- - 1.23.1
- - 1.22.3
- - 1.21.5
+ - 1.23.2
+ - 1.22.4
- trunk (main)
+ - older in 1.23.x
- older in 1.22.x
- - older in 1.21.x
- older minor (unsupported)
validations:
required: true
diff --git a/.github/e2e_test_timeout.json b/.github/e2e_test_timeout.json
index ea3c073ef8..5d46c47ef8 100644
--- a/.github/e2e_test_timeout.json
+++ b/.github/e2e_test_timeout.json
@@ -1,7 +1,7 @@
{
"local": {
- "failover": 240,
- "namespaceCreation": 30,
+ "failover": 240,
+ "namespaceCreation": 30,
"clusterIsReady": 600,
"clusterIsReadyQuick": 300,
"clusterIsReadySlow": 800,
@@ -13,11 +13,12 @@
"walsInMinio": 60,
"minioInstallation": 300,
"backupIsReady": 180,
- "drainNode": 900
+ "drainNode": 900,
+ "short": 5
},
"aks": {
- "failover": 240,
- "namespaceCreation": 30,
+ "failover": 240,
+ "namespaceCreation": 30,
"clusterIsReady": 600,
"clusterIsReadyQuick": 300,
"clusterIsReadySlow": 800,
@@ -29,11 +30,12 @@
"walsInMinio": 60,
"minioInstallation": 300,
"backupIsReady": 180,
- "drainNode": 900
+ "drainNode": 900,
+ "short": 10
},
"eks": {
- "failover": 240,
- "namespaceCreation": 30,
+ "failover": 240,
+ "namespaceCreation": 30,
"clusterIsReady": 600,
"clusterIsReadyQuick": 300,
"clusterIsReadySlow": 800,
@@ -45,11 +47,12 @@
"walsInMinio": 60,
"minioInstallation": 300,
"backupIsReady": 180,
- "drainNode": 900
+ "drainNode": 900,
+ "short": 10
},
"gke": {
- "failover": 240,
- "namespaceCreation": 30,
+ "failover": 240,
+ "namespaceCreation": 30,
"clusterIsReady": 600,
"clusterIsReadyQuick": 300,
"clusterIsReadySlow": 800,
@@ -61,11 +64,12 @@
"walsInMinio": 60,
"minioInstallation": 300,
"backupIsReady": 180,
- "drainNode": 900
+ "drainNode": 900,
+ "short": 10
},
"openshift": {
- "failover": 240,
- "namespaceCreation": 30,
+ "failover": 240,
+ "namespaceCreation": 30,
"clusterIsReady": 600,
"clusterIsReadyQuick": 300,
"clusterIsReadySlow": 800,
@@ -77,6 +81,7 @@
"walsInMinio": 60,
"minioInstallation": 300,
"backupIsReady": 180,
- "drainNode": 900
+ "drainNode": 900,
+ "short": 10
}
-}
\ No newline at end of file
+}
diff --git a/.github/eks_versions.json b/.github/eks_versions.json
index 0f74f8c9b5..9fbe6428a3 100644
--- a/.github/eks_versions.json
+++ b/.github/eks_versions.json
@@ -1,4 +1,5 @@
[
+ "1.30",
"1.29",
"1.28",
"1.27"
diff --git a/.github/pg_versions.json b/.github/pg_versions.json
index 1b9294dc4e..7fd3ddb4a4 100644
--- a/.github/pg_versions.json
+++ b/.github/pg_versions.json
@@ -1,4 +1,8 @@
{
+ "17": [
+ "17beta1",
+ "17beta1-1"
+ ],
"16": [
"16.3",
"16.2"
diff --git a/.github/renovate.json5 b/.github/renovate.json5
index 6c8b1ae5be..964ebc860c 100644
--- a/.github/renovate.json5
+++ b/.github/renovate.json5
@@ -375,6 +375,16 @@
],
"separateMajorMinor": "false",
"pinDigests": false
+ },
+ {
+// PR group for spellcheck
+ "groupName": "spellcheck",
+ "matchPackagePrefixes": [
+ "jonasbn/github-action-spellcheck",
+ "rojopolis/spellcheck-github-actions",
+ ],
+ "separateMajorMinor": "false",
+ "pinDigests": false,
}
]
}
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index 49302a33eb..ac27b8dfb7 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -36,8 +36,8 @@ env:
GOLANG_VERSION: "1.22.x"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.23.0"
- ROOK_VERSION: "v1.14.4"
- EXTERNAL_SNAPSHOTTER_VERSION: "v8.0.0"
+ ROOK_VERSION: "v1.14.5"
+ EXTERNAL_SNAPSHOTTER_VERSION: "v8.0.1"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
BUILD_PUSH_PROVENANCE: ""
BUILD_PUSH_CACHE_FROM: ""
@@ -47,7 +47,7 @@ env:
REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
REPOSITORY_OWNER: "cloudnative-pg"
SLACK_USERNAME: "cnpg-bot"
- BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --rm-dist --id manager"
+ BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --clean --id manager"
# Keep in mind that adding more platforms (architectures) will increase the building
# time even if we use the ghcache for the building process.
PLATFORMS: "linux/amd64,linux/arm64"
@@ -338,7 +338,7 @@ jobs:
echo PWD=$(pwd) >> $GITHUB_ENV
-
name: Run GoReleaser
- uses: goreleaser/goreleaser-action@v5
+ uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser
version: latest
@@ -374,7 +374,7 @@ jobs:
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
- name: Login to ghcr.io
+ name: Login into docker registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
@@ -456,7 +456,7 @@ jobs:
# NOTE: we only fire this in TEST DEPTH = 4, as that is the level of the
# upgrade test
name: Build binary for upgrade test
- uses: goreleaser/goreleaser-action@v5
+ uses: goreleaser/goreleaser-action@v6
if: |
always() && !cancelled() &&
needs.evaluate_options.outputs.test_level == '4'
@@ -928,7 +928,7 @@ jobs:
creds: ${{ secrets.AZURE_CREDENTIALS }}
-
name: Install kubectl
- uses: azure/setup-kubectl@v3.2
+ uses: azure/setup-kubectl@v4
with:
version: v${{ env.K8S_VERSION }}
-
@@ -957,6 +957,7 @@ jobs:
# create and login to the AKS cluster
az aks create --resource-group ${{ secrets.AZURE_RESOURCEGROUP }} \
--name ${AZURE_AKS} \
+ --tier standard \
--node-count 3 -k v${K8S_VERSION} --generate-ssh-keys --enable-addons monitoring \
--workspace-resource-id ${{ secrets.AZURE_WORKSPACE_RESOURCE_ID }} \
--aks-custom-headers EnableAzureDiskFileCSIDriver=true
@@ -1958,6 +1959,72 @@ jobs:
find -type f -name "cloudnative-pg-catalog.yaml"
cat cloudnative-pg-catalog.yaml
KUBECONFIG=$(pwd)/hack/auth/kubeconfig bash -x hack/e2e/run-e2e-ocp.sh
+
+ -
+ # Summarize the failed E2E tests cases if there are any
+ name: Report failed E2E tests
+ if: failure()
+ run: |
+ set +x
+ chmod +x .github/report-failed-test.sh
+ ./.github/report-failed-test.sh
+ -
+ # Create an individual artifact for each E2E test, which will be used to
+ # generate E2E test summary in the follow-up job 'summarize-e2e-tests'
+ name: Create individual artifact for each E2E test
+ if: (always() && !cancelled())
+ env:
+ RUNNER: "openshift"
+ RUN_ID: ${{ github.run_id }}
+ REPOSITORY: ${{ github.repository }}
+ GIT_REF: ${{ needs.evaluate_options.outputs.git_ref }}
+ run: |
+ set +x
+ python .github/generate-test-artifacts.py \
+ -o testartifacts-${{ env.MATRIX }} \
+ -f tests/e2e/out/report.json \
+ --environment=true
+ if [ -f tests/e2e/out/upgrade_report.json ]; then
+ python .github/generate-test-artifacts.py \
+ -o testartifacts-${{ env.MATRIX }} \
+ -f tests/e2e/out/upgrade_report.json \
+ --environment=true
+ fi
+ -
+ name: Archive test artifacts
+ if: (always() && !cancelled())
+ uses: actions/upload-artifact@v4
+ with:
+ name: testartifacts-${{ env.MATRIX }}
+ path: testartifacts-${{ env.MATRIX }}/
+ retention-days: 7
+ -
+ name: Cleanup test artifacts
+ if: always()
+ run:
+ rm -rf testartifacts-${{ env.MATRIX }}/
+ -
+ name: Cleanup ginkgo JSON report
+ # Delete report.json after the analysis. File should always exist.
+ # Delete upgrade_report.json. It may not exist depending on test level.
+ if: always()
+ run: |
+ if [ -f tests/e2e/out/upgrade_report.json ]; then
+ rm tests/e2e/out/upgrade_report.json
+ fi
+ if [ -f tests/e2e/out/report.json ]; then
+ rm tests/e2e/out/report.json
+ fi
+ -
+ name: Archive e2e failure contexts
+ if: failure()
+ uses: actions/upload-artifact@v4
+ with:
+ name: test-failure-contexts-${{ matrix.id }}
+ path: |
+ tests/*/out/
+ retention-days: 7
+ if-no-files-found: ignore
-
name: Destroy OpenShift Cluster ${{ matrix.k8s_version }}
if: always()
@@ -1973,6 +2040,7 @@ jobs:
- e2e-eks
- e2e-aks
- e2e-gke
+ - e2e-openshift
if: |
(always() && !cancelled()) &&
((
@@ -1990,6 +2058,10 @@ jobs:
(
needs.e2e-gke.result == 'success' ||
needs.e2e-gke.result == 'failure'
+ ) ||
+ (
+ needs.e2e-openshift.result == 'success' ||
+ needs.e2e-openshift.result == 'failure'
))
runs-on: ubuntu-22.04
steps:
@@ -2014,7 +2086,7 @@ jobs:
- name: Compute the E2E test summary
id: generate-summary
- uses: cloudnative-pg/ciclops@v1.2.1
+ uses: cloudnative-pg/ciclops@v1.3.0
with:
artifact_directory: test-artifacts/data
@@ -2026,24 +2098,35 @@ jobs:
path: ${{ steps.generate-summary.outputs.Overflow }}
retention-days: 7
- - name: If there are alerts, send them over Slack
+ - name: Send the Ciclops view over Slack
+ # Send the Ciclops thermometer on every scheduled run on `main`.
+ # or when there are systematic failures in release branches
uses: rtCamp/action-slack-notify@v2
if: |
- steps.generate-summary.outputs.alerts &&
github.repository_owner == env.REPOSITORY_OWNER &&
(
github.event_name == 'schedule' ||
- startsWith(needs.evaluate_options.outputs.git_ref, 'refs/heads/release-')
+ (
+ steps.generate-summary.outputs.alerts &&
+ startsWith(needs.evaluate_options.outputs.git_ref, 'refs/heads/release-')
+ )
)
env:
- SLACK_COLOR: "danger"
+ # SLACK_COLOR is where we distinguish a run with/without alerts. It's where the
+ # action has hooks for conditionality in the message body (yeah, weird)
+ SLACK_COLOR: ${{ steps.generate-summary.outputs.alerts && 'failure' || 'success' }}
SLACK_ICON: https://avatars.githubusercontent.com/u/85171364?size=48
SLACK_USERNAME: ${{ env.SLACK_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
- SLACK_TITLE: CIclops found systematic failures in the E2E tests in ${{github.repository}} repository
- SLACK_MESSAGE: |
- :warning: ${{steps.generate-summary.outputs.alerts}}
- <${{ github.server_url }}/${{github.repository}}/actions/runs/${{ github.run_id }}|See full CI run>
+ SLACK_TITLE: CICLOPS view for ${{ github.repository }}
+ SLACK_MESSAGE_ON_SUCCESS: |
+ ${{ steps.generate-summary.outputs.thermometer }}
+ SLACK_MESSAGE_ON_FAILURE: |
+ ${{ steps.generate-summary.outputs.thermometer }}
+ :warning: *Systematic failures!*
+ ${{ steps.generate-summary.outputs.alerts }}
+ SLACK_FOOTER: |
+ <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|*See full CI run*>
- name: Delete the downloaded files
run: rm -rf test-artifacts
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index 0f96e8790b..e0fa8f8b64 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -17,7 +17,7 @@ on:
# set up environment variables to be used across all the jobs
env:
GOLANG_VERSION: "1.22.x"
- GOLANGCI_LINT_VERSION: "v1.58"
+ GOLANGCI_LINT_VERSION: "v1.59"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.23.0"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
@@ -29,8 +29,8 @@ env:
BUILD_PUSH_PROVENANCE: ""
BUILD_PUSH_CACHE_FROM: ""
BUILD_PUSH_CACHE_TO: ""
- BUILD_PLUGIN_RELEASE_ARGS: "build --skip=validate --rm-dist --id kubectl-cnpg --timeout 60m"
- BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --rm-dist --id manager"
+ BUILD_PLUGIN_RELEASE_ARGS: "build --skip=validate --clean --id kubectl-cnpg --timeout 60m"
+ BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --clean --id manager"
REPOSITORY_OWNER: "cloudnative-pg"
REGISTRY: "ghcr.io"
REGISTRY_USER: ${{ github.actor }}
@@ -470,7 +470,7 @@ jobs:
echo PWD=$(pwd) >> $GITHUB_ENV
- name: Run GoReleaser to build kubectl plugin
- uses: goreleaser/goreleaser-action@v5
+ uses: goreleaser/goreleaser-action@v6
if: |
github.event_name == 'schedule' ||
(
@@ -510,7 +510,7 @@ jobs:
SLACK_MESSAGE: Building kubernetes plugin failed!
- name: Run GoReleaser
- uses: goreleaser/goreleaser-action@v5
+ uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser
version: latest
@@ -564,7 +564,7 @@ jobs:
with:
registry: ${{ env.REGISTRY }}
username: ${{ env.REGISTRY_USER }}
- password: ${{ env.REGISTRY_PASSWORD }}
+ password: ${{ env.REGISTRY_PASSWORD }}
- name: Build for scan distroless image
uses: docker/build-push-action@v5
@@ -733,12 +733,12 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- - name: Login to ghcr.io
+ - name: Login into docker registry
uses: docker/login-action@v3
with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
+ registry: ${{ env.REGISTRY }}
+ username: ${{ env.REGISTRY_USER }}
+ password: ${{ env.REGISTRY_PASSWORD }}
- name: Create bundle
env:
@@ -842,12 +842,12 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- - name: Login to ghcr.io
+ - name: Login into docker registry
uses: docker/login-action@v3
with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
+ registry: ${{ env.REGISTRY }}
+ username: ${{ env.REGISTRY_USER }}
+ password: ${{ env.REGISTRY_PASSWORD }}
- name: Install Go
uses: actions/setup-go@v5
@@ -888,12 +888,12 @@ jobs:
repository: k8s-operatorhub/community-operators
persist-credentials: false
- - name: Login to ghcr.io
+ - name: Login into docker registry
uses: redhat-actions/podman-login@v1
with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
+ registry: ${{ env.REGISTRY }}
+ username: ${{ env.REGISTRY_USER }}
+ password: ${{ env.REGISTRY_PASSWORD }}
- name: Download the bundle
uses: actions/download-artifact@v4
diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml
index 3e2092e5a6..34428a4851 100644
--- a/.github/workflows/release-publish.yml
+++ b/.github/workflows/release-publish.yml
@@ -138,11 +138,11 @@ jobs:
echo PWD=$(pwd) >> $GITHUB_ENV
-
name: Run GoReleaser
- uses: goreleaser/goreleaser-action@v5
+ uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser
version: latest
- args: release --rm-dist --timeout 60m
+ args: release --clean --timeout 60m
env:
DATE: ${{ env.DATE }}
COMMIT: ${{ env.COMMIT }}
diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml
index aac3be00c7..1c06595fa2 100644
--- a/.github/workflows/require-labels.yml
+++ b/.github/workflows/require-labels.yml
@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Require labels
- uses: docker://agilepathway/pull-request-label-checker:v1.6.32
+ uses: docker://agilepathway/pull-request-label-checker:v1.6.38
with:
any_of: "ok to merge :ok_hand:"
none_of: "do not merge"
diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml
index 070289e221..1bcedf7c0b 100644
--- a/.github/workflows/spellcheck.yml
+++ b/.github/workflows/spellcheck.yml
@@ -28,4 +28,4 @@ jobs:
uses: actions/checkout@v4
- name: Spellcheck
- uses: rojopolis/spellcheck-github-actions@0.36.0
+ uses: rojopolis/spellcheck-github-actions@0.37.0
diff --git a/.goreleaser.yml b/.goreleaser.yml
index ca00fa753a..0ae062c0ea 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -1,3 +1,4 @@
+version: 2
project_name: cnpg
release:
@@ -100,7 +101,7 @@ snapshot:
name_template: "{{ .Tag }}-next"
changelog:
- skip: true
+ disable: true
signs:
- artifacts: checksum
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 2d7dc9e240..f44fb1683b 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -74,6 +74,7 @@ ClientCertsCASecret
ClientReplicationSecret
CloudNativePG
CloudNativePG's
+ClusterBinding
ClusterCondition
ClusterConditionType
ClusterIP
@@ -85,6 +86,7 @@ ClusterRole's
ClusterServiceVersion
ClusterSpec
ClusterStatus
+CodeQL
CodeReady
ColumnName
CompressionType
@@ -208,6 +210,7 @@ Linkerd
Linode
ListMeta
Liveness
+LivenessProbeTimeout
LoadBalancer
LocalObjectReference
MAPPEDMETRIC
@@ -733,6 +736,7 @@ goodwithtech
googleCredentials
goroutines
gosec
+govulncheck
grafana
gzip
hashicorp
@@ -831,6 +835,7 @@ linux
listmeta
liveness
livenessProbe
+livenessProbeTimeout
lm
localeCType
localeCollate
diff --git a/ADOPTERS.md b/ADOPTERS.md
index 78bd30e53d..f777552517 100644
--- a/ADOPTERS.md
+++ b/ADOPTERS.md
@@ -40,3 +40,4 @@ This list is sorted in chronological order, based on the submission date.
| [Ænix](https://aenix.io) | @kvaps | 2024-02-11 | Ænix provides consulting services for cloud providers and uses CloudNativePG in free PaaS platform [Cozystack](https://cozystack.io) for running PostgreSQL-as-a-Service. |
| [IBM](https://www.ibm.com) | @pgodowski | 2024-02-20 | IBM uses CloudNativePG as the embedded SQL database within the family of [IBM Cloud Pak](https://www.ibm.com/cloud-paks) products, running as customer-managed software on top of [OpenShift Container Platform](https://www.redhat.com/en/technologies/cloud-computing/openshift/container-platform). |
| [Google Cloud](https://cloud.google.com/) | @mastersingh24 | 2024-03-12 | Leverage the full potential of cutting-edge PostgreSQL and CloudNativePG on [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) with EDB Community 360 PostgreSQL available in the [Google Cloud Marketplace](https://console.cloud.google.com/marketplace/product/public-edb-ppas/edb-postgresql). |
+| [Syself](https://syself.com) | @batistein | 2024-05-06 | Syself offers a simplified, multi-cloud Managed Kubernetes platform based on Cluster API and uses CloudNativePG for managing Postgres clusters in our internal infrastructure. |
diff --git a/Makefile b/Makefile
index a69b4758d4..a08904b49e 100644
--- a/Makefile
+++ b/Makefile
@@ -43,12 +43,12 @@ BUILD_IMAGE ?= true
POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \")
KUSTOMIZE_VERSION ?= v5.4.2
CONTROLLER_TOOLS_VERSION ?= v0.15.0
-GORELEASER_VERSION ?= v1.26.2
-SPELLCHECK_VERSION ?= 0.36.0
+GORELEASER_VERSION ?= v2.0.0
+SPELLCHECK_VERSION ?= 0.37.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.34.2
-OPM_VERSION ?= v1.43.0
-PREFLIGHT_VERSION ?= 1.9.6
+OPM_VERSION ?= v1.43.1
+PREFLIGHT_VERSION ?= 1.9.7
OPENSHIFT_VERSIONS ?= v4.11-v4.15
ARCH ?= amd64
@@ -311,7 +311,7 @@ go-licenses: ## Download go-licenses locally if necessary.
GO_RELEASER = $(LOCALBIN)/goreleaser
go-releaser: ## Download go-releaser locally if necessary.
- $(call go-install-tool,$(GO_RELEASER),github.com/goreleaser/goreleaser@$(GORELEASER_VERSION))
+ $(call go-install-tool,$(GO_RELEASER),github.com/goreleaser/goreleaser/v2@$(GORELEASER_VERSION))
.PHONY: govulncheck
GOVULNCHECK = $(LOCALBIN)/govulncheck
diff --git a/api/v1/backup_types.go b/api/v1/backup_types.go
index 68c0b460de..acce1ae8eb 100644
--- a/api/v1/backup_types.go
+++ b/api/v1/backup_types.go
@@ -21,7 +21,7 @@ import (
"sort"
"strings"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
@@ -326,11 +326,11 @@ func (backupStatus *BackupStatus) SetAsCompleted() {
}
// SetAsStarted marks a certain backup as started
-func (backupStatus *BackupStatus) SetAsStarted(targetPod *corev1.Pod, method BackupMethod) {
+func (backupStatus *BackupStatus) SetAsStarted(podName, containerID string, method BackupMethod) {
backupStatus.Phase = BackupPhaseStarted
backupStatus.InstanceID = &InstanceID{
- PodName: targetPod.Name,
- ContainerID: targetPod.Status.ContainerStatuses[0].ContainerID,
+ PodName: podName,
+ ContainerID: containerID,
}
backupStatus.Method = method
}
diff --git a/api/v1/backup_types_test.go b/api/v1/backup_types_test.go
index d20a9d6096..3828a1eac7 100644
--- a/api/v1/backup_types_test.go
+++ b/api/v1/backup_types_test.go
@@ -19,7 +19,7 @@ package v1
import (
"time"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
@@ -46,7 +46,7 @@ var _ = Describe("BackupStatus structure", func() {
},
}
- status.SetAsStarted(&pod, BackupMethodBarmanObjectStore)
+ status.SetAsStarted(pod.Name, pod.Status.ContainerStatuses[0].ContainerID, BackupMethodBarmanObjectStore)
Expect(status.Phase).To(BeEquivalentTo(BackupPhaseStarted))
Expect(status.InstanceID).ToNot(BeNil())
Expect(status.InstanceID.PodName).To(Equal("cluster-example-1"))
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index 9764550b0c..d4ab1a4eb4 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -29,6 +29,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
@@ -121,6 +122,10 @@ const (
// PGBouncerPoolerUserName is the name of the role to be used for
PGBouncerPoolerUserName = "cnpg_pooler_pgbouncer"
+
+ // MissingWALDiskSpaceExitCode is the exit code the instance manager
+ // will use to signal that there's no more WAL disk space
+ MissingWALDiskSpaceExitCode = 4
)
// SnapshotOwnerReference defines the reference type for the owner of the snapshot.
@@ -395,6 +400,13 @@ type ClusterSpec struct {
// +optional
FailoverDelay int32 `json:"failoverDelay,omitempty"`
+ // LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance
+ // to successfully respond to the liveness probe (default 30).
+ // The Liveness probe failure threshold is derived from this value using the formula:
+ // ceiling(livenessProbe / 10).
+ // +optional
+ LivenessProbeTimeout *int32 `json:"livenessProbeTimeout,omitempty"`
+
// Affinity/Anti-affinity rules for Pods
// +optional
Affinity AffinityConfiguration `json:"affinity,omitempty"`
@@ -1319,7 +1331,6 @@ const (
// DefaultMaxSwitchoverDelay is the default for the pg_ctl timeout in seconds when a primary PostgreSQL instance
// is gracefully shutdown during a switchover.
- // It is greater than one year in seconds, big enough to simulate an infinite timeout
DefaultMaxSwitchoverDelay = 3600
// DefaultStartupDelay is the default value for startupDelay, startupDelay will be used to calculate the
@@ -3438,6 +3449,11 @@ func (cluster *Cluster) GetTablespaceConfiguration(name string) *TablespaceConfi
return nil
}
+// GetServerCASecretObjectKey returns a types.NamespacedName pointing to the secret
+func (cluster *Cluster) GetServerCASecretObjectKey() types.NamespacedName {
+ return types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.GetServerCASecretName()}
+}
+
// IsBarmanBackupConfigured returns true if one of the possible backup destination
// is configured, false otherwise
func (backupConfiguration *BackupConfiguration) IsBarmanBackupConfigured() bool {
diff --git a/api/v1/cluster_webhook.go b/api/v1/cluster_webhook.go
index c78f384f66..9a0b723843 100644
--- a/api/v1/cluster_webhook.go
+++ b/api/v1/cluster_webhook.go
@@ -23,7 +23,7 @@ import (
"strconv"
"strings"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@@ -1195,13 +1195,25 @@ func (r *Cluster) validateConfiguration() field.ErrorList {
}
}
- if r.Spec.Instances > 1 && r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLogHints] == "off" {
- result = append(
- result,
- field.Invalid(
- field.NewPath("spec", "postgresql", "parameters", postgres.ParameterWalLogHints),
- r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLogHints],
- "`wal_log_hints` must be set to `on` when `instances` > 1"))
+ walLogHintsValue, walLogHintsSet := r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLogHints]
+ if walLogHintsSet {
+ walLogHintsActivated, err := postgres.ParsePostgresConfigBoolean(walLogHintsValue)
+ if err != nil {
+ result = append(
+ result,
+ field.Invalid(
+ field.NewPath("spec", "postgresql", "parameters", postgres.ParameterWalLogHints),
+ walLogHintsValue,
+ "invalid `wal_log_hints`. Must be a postgres boolean"))
+ }
+ if r.Spec.Instances > 1 && !walLogHintsActivated {
+ result = append(
+ result,
+ field.Invalid(
+ field.NewPath("spec", "postgresql", "parameters", postgres.ParameterWalLogHints),
+ r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLogHints],
+ "`wal_log_hints` must be set to `on` when `instances` > 1"))
+ }
}
// verify the postgres setting min_wal_size < max_wal_size < volume size
@@ -2397,15 +2409,28 @@ func (r *Cluster) validatePgFailoverSlots() field.ErrorList {
}
const hotStandbyFeedbackKey = "hot_standby_feedback"
+ hotStandbyFeedbackActivated := false
hotStandbyFeedback, hasHotStandbyFeedback := r.Spec.PostgresConfiguration.Parameters[hotStandbyFeedbackKey]
+ if hasHotStandbyFeedback {
+ var err error
+ hotStandbyFeedbackActivated, err = postgres.ParsePostgresConfigBoolean(hotStandbyFeedback)
+ if err != nil {
+ result = append(
+ result,
+ field.Invalid(
+ field.NewPath("spec", "postgresql", "parameters", hotStandbyFeedbackKey),
+ hotStandbyFeedback,
+ fmt.Sprintf("invalid `%s` value. Must be a postgres boolean", hotStandbyFeedbackKey)))
+ }
+ }
- if !hasHotStandbyFeedback || hotStandbyFeedback != "on" {
+ if !hotStandbyFeedbackActivated {
result = append(
result,
field.Invalid(
field.NewPath("spec", "postgresql", "parameters", hotStandbyFeedbackKey),
hotStandbyFeedback,
- fmt.Sprintf("%s must be 'on' to use %s", hotStandbyFeedbackKey, pgFailoverSlots.Name)))
+ fmt.Sprintf("`%s` must be enabled to use %s extension", hotStandbyFeedbackKey, pgFailoverSlots.Name)))
}
if r.Spec.ReplicationSlots == nil {
diff --git a/api/v1/cluster_webhook_test.go b/api/v1/cluster_webhook_test.go
index a8e452b936..29b1e80800 100644
--- a/api/v1/cluster_webhook_test.go
+++ b/api/v1/cluster_webhook_test.go
@@ -19,7 +19,7 @@ package v1
import (
"strings"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -1328,6 +1328,20 @@ var _ = Describe("configuration change validation", func() {
})
Describe("wal_log_hints", func() {
+ It("should reject wal_log_hints set to an invalid value", func() {
+ cluster := Cluster{
+ Spec: ClusterSpec{
+ Instances: 1,
+ PostgresConfiguration: PostgresConfiguration{
+ Parameters: map[string]string{
+ "wal_log_hints": "foo",
+ },
+ },
+ },
+ }
+ Expect(cluster.validateConfiguration()).To(HaveLen(1))
+ })
+
It("should allow wal_log_hints set to off for clusters having just one instance", func() {
cluster := Cluster{
ObjectMeta: metav1.ObjectMeta{
@@ -1396,7 +1410,7 @@ var _ = Describe("configuration change validation", func() {
Instances: 3,
PostgresConfiguration: PostgresConfiguration{
Parameters: map[string]string{
- "wal_log_hints": "on",
+ "wal_log_hints": "true",
},
},
},
@@ -3592,6 +3606,25 @@ var _ = Describe("Managed Extensions validation", func() {
Expect(cluster.validateManagedExtensions()).To(BeEmpty())
})
+ It("should fail if hot_standby_feedback is set to an invalid value", func() {
+ cluster := &Cluster{
+ Spec: ClusterSpec{
+ ReplicationSlots: &ReplicationSlotsConfiguration{
+ HighAvailability: &ReplicationSlotsHAConfiguration{
+ Enabled: ptr.To(true),
+ },
+ },
+ PostgresConfiguration: PostgresConfiguration{
+ Parameters: map[string]string{
+ "hot_standby_feedback": "foo",
+ "pg_failover_slots.synchronize_slot_names": "my_slot",
+ },
+ },
+ },
+ }
+ Expect(cluster.validatePgFailoverSlots()).To(HaveLen(2))
+ })
+
It("should succeed if pg_failover_slots and its prerequisites are enabled", func() {
cluster := &Cluster{
Spec: ClusterSpec{
@@ -3629,7 +3662,7 @@ var _ = Describe("Managed Extensions validation", func() {
Spec: ClusterSpec{
PostgresConfiguration: PostgresConfiguration{
Parameters: map[string]string{
- "hot_standby_feedback": "on",
+ "hot_standby_feedback": "yes",
"pg_failover_slots.synchronize_slot_names": "my_slot",
},
},
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
index ac994824e5..5b905e3434 100644
--- a/api/v1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -826,6 +826,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(corev1.EphemeralVolumeSource)
(*in).DeepCopyInto(*out)
}
+ if in.LivenessProbeTimeout != nil {
+ in, out := &in.LivenessProbeTimeout, &out.LivenessProbeTimeout
+ *out = new(int32)
+ **out = **in
+ }
in.Affinity.DeepCopyInto(&out.Affinity)
if in.TopologySpreadConstraints != nil {
in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
diff --git a/cmd/kubectl-cnpg/main.go b/cmd/kubectl-cnpg/main.go
index fffb8f7f89..f54ccefe39 100644
--- a/cmd/kubectl-cnpg/main.go
+++ b/cmd/kubectl-cnpg/main.go
@@ -66,6 +66,8 @@ func main() {
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
logFlags.ConfigureLogging()
+ plugin.ConfigureColor(cmd)
+
// If we're invoking the completion command we shouldn't try to create
// a Kubernetes client and we just let the Cobra flow to continue
if cmd.Name() == "completion" || cmd.Name() == "version" ||
@@ -80,27 +82,34 @@ func main() {
logFlags.AddFlags(rootCmd.PersistentFlags())
configFlags.AddFlags(rootCmd.PersistentFlags())
- rootCmd.AddCommand(certificate.NewCmd())
- rootCmd.AddCommand(destroy.NewCmd())
- rootCmd.AddCommand(fence.NewCmd())
- rootCmd.AddCommand(fio.NewCmd())
- rootCmd.AddCommand(hibernate.NewCmd())
- rootCmd.AddCommand(install.NewCmd())
- rootCmd.AddCommand(maintenance.NewCmd())
- rootCmd.AddCommand(pgbench.NewCmd())
- rootCmd.AddCommand(promote.NewCmd())
- rootCmd.AddCommand(reload.NewCmd())
- rootCmd.AddCommand(report.NewCmd())
- rootCmd.AddCommand(restart.NewCmd())
- rootCmd.AddCommand(status.NewCmd())
- rootCmd.AddCommand(versions.NewCmd())
- rootCmd.AddCommand(backup.NewCmd())
- rootCmd.AddCommand(psql.NewCmd())
- rootCmd.AddCommand(snapshot.NewCmd())
- rootCmd.AddCommand(logs.NewCmd())
- rootCmd.AddCommand(pgadmin.NewCmd())
- rootCmd.AddCommand(publication.NewCmd())
- rootCmd.AddCommand(subscription.NewCmd())
+ subcommands := []*cobra.Command{
+ backup.NewCmd(),
+ certificate.NewCmd(),
+ destroy.NewCmd(),
+ fence.NewCmd(),
+ fio.NewCmd(),
+ hibernate.NewCmd(),
+ install.NewCmd(),
+ logs.NewCmd(),
+ maintenance.NewCmd(),
+ pgadmin.NewCmd(),
+ pgbench.NewCmd(),
+ promote.NewCmd(),
+ psql.NewCmd(),
+ publication.NewCmd(),
+ reload.NewCmd(),
+ report.NewCmd(),
+ restart.NewCmd(),
+ snapshot.NewCmd(),
+ status.NewCmd(),
+ subscription.NewCmd(),
+ versions.NewCmd(),
+ }
+
+ for _, cmd := range subcommands {
+ plugin.AddColorControlFlag(cmd)
+ rootCmd.AddCommand(cmd)
+ }
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
diff --git a/cmd/main.go b/cmd/main.go
deleted file mode 120000
index 9dc22243d8..0000000000
--- a/cmd/main.go
+++ /dev/null
@@ -1 +0,0 @@
-../internal/cmd/manager/controller/controller.go
\ No newline at end of file
diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
index 33a44276dd..b381d2091c 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
@@ -2899,6 +2899,14 @@ spec:
description: Number of instances required in the cluster
minimum: 1
type: integer
+ livenessProbeTimeout:
+ description: |-
+ LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance
+ to successfully respond to the liveness probe (default 30).
+ The Liveness probe failure threshold is derived from this value using the formula:
+ ceiling(livenessProbe / 10).
+ format: int32
+ type: integer
logLevel:
default: info
description: 'The instances'' log level, one of the following values:
diff --git a/config/olm-rbac/role_global.yaml b/config/olm-rbac/role_global.yaml
index ca6d6488be..42d88c4447 100644
--- a/config/olm-rbac/role_global.yaml
+++ b/config/olm-rbac/role_global.yaml
@@ -4,14 +4,6 @@ kind: ClusterRole
metadata:
name: manager
rules:
-- apiGroups:
- - ""
- resources:
- - namespaces
- verbs:
- - get
- - list
- - watch
- apiGroups:
- ""
resources:
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index 9b5f4888fd..51a2dac502 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -31,14 +31,6 @@ rules:
verbs:
- create
- patch
-- apiGroups:
- - ""
- resources:
- - namespaces
- verbs:
- - get
- - list
- - watch
- apiGroups:
- ""
resources:
diff --git a/contribute/e2e_testing_environment/README.md b/contribute/e2e_testing_environment/README.md
index ca51c75ea3..29fc1b5a69 100644
--- a/contribute/e2e_testing_environment/README.md
+++ b/contribute/e2e_testing_environment/README.md
@@ -203,7 +203,7 @@ exported, it will select all medium test cases from the feature type provided.
| `storage` |
| `security` |
| `maintenance` |
-| `prometheus` |
+| `tablespaces` |
ex:
```shell
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index 819a949509..6f6511b68d 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -1746,6 +1746,16 @@ after the primary PostgreSQL instance in the cluster was detected
to be unhealthy
+livenessProbeTimeout
+int32
+ |
+
+ LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance
+to successfully respond to the liveness probe (default 30).
+The Liveness probe failure threshold is derived from this value using the formula:
+ceiling(livenessProbe / 10).
+ |
+
affinity
AffinityConfiguration
|
diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md
index 6c3ca0f014..8fa864fddd 100644
--- a/docs/src/installation_upgrade.md
+++ b/docs/src/installation_upgrade.md
@@ -7,12 +7,12 @@
The operator can be installed like any other resource in Kubernetes,
through a YAML manifest applied via `kubectl`.
-You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.23/releases/cnpg-1.23.1.yaml)
+You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.23/releases/cnpg-1.23.2.yaml)
for this minor release as follows:
```sh
kubectl apply --server-side -f \
- https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.23/releases/cnpg-1.23.1.yaml
+ https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.23/releases/cnpg-1.23.2.yaml
```
You can verify that with:
diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md
index df99e0cc29..d285838ce7 100644
--- a/docs/src/instance_manager.md
+++ b/docs/src/instance_manager.md
@@ -34,6 +34,10 @@ broken state and needs to be restarted. The value in `startDelay` is used
to delay the probe's execution, preventing an
instance with a long startup time from being restarted.
+The amount of time needed for a Pod to be classified as not alive is
+configurable in the `.spec.livenessProbeTimeout` parameter, that
+defaults to 30 seconds.
+
The interval (in seconds) after the Pod has started before the liveness
probe starts working is expressed in the `.spec.startDelay` parameter,
which defaults to 3600 seconds. The correct value for your cluster is
@@ -94,3 +98,43 @@ the WAL files. By default it is set to `3600` (1 hour).
In case of primary pod failure, the cluster will go into failover mode.
Please refer to the ["Failover" section](failover.md) for details.
+
+## Disk Full Failure
+
+Storage exhaustion is a well known issue for PostgreSQL clusters.
+The [PostgreSQL documentation](https://www.postgresql.org/docs/current/disk-full.html)
+highlights the possible failure scenarios and the importance of monitoring disk
+usage to prevent it from becoming full.
+
+The same applies to CloudNativePG and Kubernetes as well: the
+["Monitoring" section](monitoring.md#predefined-set-of-metrics)
+provides details on checking the disk space used by WAL segments and standard
+metrics on disk usage exported to Prometheus.
+
+!!! Important
+ In a production system, it is critical to monitor the database
+ continuously. Exhausted disk storage can lead to a database server shutdown.
+
+!!! Note
+ The detection of exhausted storage relies on a storage class that
+ accurately reports disk size and usage. This may not be the case in simulated
+ Kubernetes environments like Kind or with test storage class implementations
+ such as `csi-driver-host-path`.
+
+If the disk containing the WALs becomes full and no more WAL segments can be
+stored, PostgreSQL will stop working. CloudNativePG correctly detects this issue
+by verifying that there is enough space to store the next WAL segment,
+and avoids triggering a failover, which could complicate recovery.
+
+That allows a human administrator to address the root cause.
+
+In such a case, if supported by the storage class, the quickest course of action
+is currently to:
+1. Expand the storage size of the full PVC
+2. Increase the size in the `Cluster` resource to the same value
+
+Once the issue is resolved and there is sufficient free space for WAL segments,
+the Pod will restart and the cluster will become healthy.
+
+See also the ["Volume expansion" section](storage.md#volume-expansion) of the
+documentation.
diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md
index fcf457fdc7..6f759bbc31 100755
--- a/docs/src/kubectl-plugin.md
+++ b/docs/src/kubectl-plugin.md
@@ -171,6 +171,10 @@ Once the plugin was installed and deployed, you can start using it like this:
kubectl cnpg
```
+!!! Note
+ The plugin automatically detects if the standard output channel is connected to a terminal.
+ In such cases, it may add ANSI colors to the command output. To disable colors, use the
+ `--color=never` option with the command.
### Generation of installation manifests
The `cnpg` plugin can be used to generate the YAML manifest for the
diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md
index 5442c6b495..b3a647595e 100644
--- a/docs/src/monitoring.md
+++ b/docs/src/monitoring.md
@@ -441,6 +441,29 @@ A list of basic monitoring queries can be found in the
[`default-monitoring.yaml` file](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/config/manager/default-monitoring.yaml)
that is already installed in your CloudNativePG deployment (see ["Default set of metrics"](#default-set-of-metrics)).
+#### Example of a user defined metric with predicate query
+
+The `predicate_query` option allows the user to execute the `query` to collect the metrics only under the specified conditions.
+To do so the user needs to provide a predicate query that returns at most one row with a single `boolean` column.
+
+The predicate query is executed in the same transaction as the main query and against the same databases.
+
+```yaml
+some_query: |
+ predicate_query: |
+ SELECT
+ some_bool as predicate
+ FROM some_table
+ query: |
+ SELECT
+ count(*) as rows
+ FROM some_table
+ metrics:
+ - rows:
+ usage: "GAUGE"
+ description: "number of rows"
+```
+
#### Example of a user defined metric running on multiple databases
If the `target_databases` option lists more than one database
@@ -546,6 +569,8 @@ Here is a short description of all the available fields:
- `target_databases`: a list of databases to run the `query` against,
or a [shell-like pattern](#example-of-a-user-defined-metric-running-on-multiple-databases)
to enable auto discovery. Overwrites the default database if provided.
+ - `predicate_query`: a SQL query that returns at most one row and one `boolean` column to run on the target database.
+ The system evaluates the predicate and if `true` executes the `query`.
- `metrics`: section containing a list of all exported columns, defined as follows:
- ``: the name of the column returned by the query
- `usage`: one of the values described below
diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md
index 074cec1cad..1a4aa0ff39 100644
--- a/docs/src/operator_capability_levels.md
+++ b/docs/src/operator_capability_levels.md
@@ -100,9 +100,10 @@ PVC template in the CR's `storage` parameter.
For better performance and finer control, you can also choose to host your
cluster's write-ahead log (WAL, also known as `pg_wal`) on a separate volume,
preferably on different storage.
-The [`cnp-bench`](https://github.com/EnterpriseDB/cnp-bench) open source
-project can be used to benchmark both the storage and the database prior to
-production.
+The ["Benchmarking"](benchmarking.md) section of the documentation provides
+detailed instructions on benchmarking both storage and the database before
+production. It relies on the `cnpg` plugin to ensure optimal performance and
+reliability.
### Replica configuration
diff --git a/docs/src/quickstart.md b/docs/src/quickstart.md
index d1fee6ce7b..383375e923 100644
--- a/docs/src/quickstart.md
+++ b/docs/src/quickstart.md
@@ -1,7 +1,8 @@
# Quickstart
-This section describes how to test a PostgreSQL cluster on your laptop/computer
-using CloudNativePG on a local Kubernetes cluster in [Kind](https://kind.sigs.k8s.io/) or
+This section guides you through testing a PostgreSQL cluster on your local machine by
+deploying CloudNativePG on a local Kubernetes cluster
+using either [Kind](https://kind.sigs.k8s.io/) or
[Minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/).
!!! Warning
@@ -204,7 +205,7 @@ After completion, you will have Prometheus, Grafana and Alert Manager installed
- The Grafana installation will be watching for a Grafana dashboard `ConfigMap`.
!!! Seealso
- For further information about the above command see the [helm install](https://helm.sh/docs/helm/helm_install/)
+ For further information about the above command, refer to the [helm install](https://helm.sh/docs/helm/helm_install/)
documentation.
You can see several Custom Resources have been created:
@@ -265,7 +266,7 @@ kubectl port-forward svc/prometheus-community-kube-prometheus 9090
Then access the Prometheus console locally at: [`http://localhost:9090/`](http://localhost:9090/)
-Assuming that the monitoring stack was successfully deployed, and you have a Cluster with `enablePodMonitor: true`
+Assuming that the monitoring stack was successfully deployed, and you have a Cluster with `enablePodMonitor: true`,
you should find a series of metrics relating to CloudNativePG clusters. Again, please
refer to the [*monitoring section*](monitoring.md) for more information.
@@ -314,5 +315,5 @@ file and manually importing it via the GUI.
![local grafana](images/grafana-local.png)
-Note that in our example setup, both Prometheus and Grafana will pick up
-any other CloudNativePG clusters deployed with Monitoring activated.
+Note that in our local setup, Prometheus and Grafana are configured to automatically discover
+and monitor any CloudNativePG clusters deployed with the Monitoring feature enabled.
diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md
index 1e61de4ed3..39d48c5222 100644
--- a/docs/src/release_notes.md
+++ b/docs/src/release_notes.md
@@ -2,6 +2,7 @@
History of user-visible changes for CloudNativePG, classified for each minor release.
+
- [CloudNativePG 1.23](release_notes/v1.23.md)
- [CloudNativePG 1.22](release_notes/v1.22.md)
- [CloudNativePG 1.21](release_notes/v1.21.md)
diff --git a/docs/src/release_notes/v1.21.md b/docs/src/release_notes/v1.21.md
index 02537dc6fb..a7110d33c2 100644
--- a/docs/src/release_notes/v1.21.md
+++ b/docs/src/release_notes/v1.21.md
@@ -6,16 +6,82 @@ For a complete list of changes, please refer to the
[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.21)
on the release branch in GitHub.
-## Version 1.21.5
+## Version 1.21.6
-**Release date:** Apr 24, 2024
+**Release date:** Jun 12, 2024
!!! Warning
- Version 1.21 is approaching its End-of-Life (EOL) on May 24, 2024.
- If you haven't already, please begin planning for an upgrade promptly to
- ensure continued support and security.
+ This is expected to be the last release in the 1.21.X series.
+ Users are encouraged to update to a newer minor version soon.
+
+### Enhancements:
+
+- Enabled configuration of standby-sensitive parameters during recovery using a
+ physical backup (#4564)
+
+- Enabled the configuration of the liveness probe timeout via the
+ `.spec.livenessProbeTimeout` option (#4719)
+
+- `cnpg` plugin for `kubectl`:
+
+ - Enhanced support for ANSI colors in the plugin by adding the `--color`
+ option, which accepts `always`, `never`, and `auto` (default) as values
+ (#4775)
+ - The plugin is now available on Homebrew for macOS users (#4602)
+
+### Fixes:
+
+- Prevented fenced instances from entering an unnecessary loop and consuming
+ all available CPU (#4625)
+
+- Resolved an issue where the instance manager on the primary would
+ indefinitely wait for the instance to start after encountering a failure
+ following a stop operation (#4434)
+
+- Fixed an issue where the interaction between `hot_standby_feedback` and
+ managed cluster-level replication slots was preventing the autovacuum from
+ operating correctly; this issue was causing disk space to remain occupied by
+ dead tuples (#4811)
+
+- Fixed a panic in the backup controller that occurred when pod container
+ statuses were missing (#4765)
+
+- Prevented unnecessary shutdown of the instance manager (#4670)
+
+- Prevented unnecessary reloads of PostgreSQL configuration when unchanged (#4531)
+
+- Prevented unnecessary reloads of the ident map by ensuring a consistent and
+ unique method of writing its content (#4648)
+
+- Avoided conflicts during phase registration by patching the status of the
+ resource instead of updating it (#4637)
+
+- Implemented a timeout when restarting PostgreSQL and lifting fencing (#4504)
+
+- Ensured that a replica cluster is restarted after promotion to properly set
+ the archive mode (#4399)
+
+- Removed an unneeded concurrent keep-alive routine that was causing random
+ failures in volume snapshot backups (#4768)
+
+- Ensured correct parsing of the additional rows field returned when the
+ `pgaudit.log_rows` option was enabled, preventing audit logs from being
+ incorrectly routed to the normal log stream (#4394)
+
+- `cnpg` plugin for `kubectl`:
+
+ - Resolved an issue with listing PDBs using the `cnpg status` command (#4530)
+
+### Changes
+
+- Default operand image set to PostgreSQL 16.3 (#4584)
+- Removed all RBAC requirements on namespace objects (#4753)
+
+## Version 1.21.5
+
+**Release date:** Apr 24, 2024
-Enhancements:
+### Enhancements:
- Users can now configure the `wal_log_hints` PostgreSQL parameter (#4218)
(#4218)
@@ -26,7 +92,7 @@ Enhancements:
- Error detection when invoking `barman-cloud-wal-restore` in `recovery`
bootstrap (#4101)
-Fixes:
+### Fixes:
- Ensured that before a switchover, the elected replica is in streaming
replication (#4288)
@@ -43,7 +109,7 @@ Fixes:
- Gracefully handle databases with no sequences in `sync-sequences` command
(#4346)
-Changes:
+### Changes:
- The Grafana dashboard now resides at
https://github.com/cloudnative-pg/grafana-dashboards (#4154)
@@ -102,7 +168,7 @@ Changes:
**Release date:** Feb 2, 2024
-Enhancements:
+### Enhancements:
- Tailor ephemeral volume storage in a Postgres cluster using a claim template
through the `ephemeralVolumeSource` option (#3678)
@@ -113,7 +179,7 @@ Enhancements:
- Allow customization of PostgreSQL's ident map file via the
`.spec.postgresql.pg_ident` stanza, through a list of user name maps (#3534)
-Fixes:
+### Fixes:
- Prevent an unrecoverable issue with `pg_rewind` failing due to
`postgresql.auto.conf` being read-only on clusters where the `ALTER SYSTEM`
@@ -133,7 +199,7 @@ Fixes:
**Release date:** Dec 21, 2023
-Security:
+### Security:
- By default, TLSv1.3 is now enforced on all PostgreSQL 12 or higher
installations. Additionally, users can configure the `ssl_ciphers`,
@@ -141,7 +207,7 @@ Security:
- Integration of Docker image scanning with Dockle and Snyk to enhance security
measures (#3300).
-Enhancements:
+### Enhancements:
- Improved reconciliation of external clusters (#3533).
- Introduction of the ability to enable/disable the `ALTER SYSTEM` command (#3535).
@@ -162,7 +228,7 @@ Enhancements:
- Addition of the `cnpg.io/podRole` label with a value of 'pooler' to every
pooler deployment, differentiating them from instance pods (#3396).
-Fixes:
+### Fixes:
- Reconciliation of metadata, annotations, and labels of `PodDisruptionBudget`
resources (#3312 and #3434).
@@ -181,7 +247,7 @@ Fixes:
- Reconciliation of the service of a `Pooler` and addition of the required labels (#3349).
- Extension of `Pooler` labels to the deployment as well, not just the pods (#3350).
-Changes:
+### Changes:
- Default operand image set to PostgreSQL 16.1 (#3270).
@@ -189,7 +255,7 @@ Changes:
**Release date:** Nov 3, 2023
-Enhancements:
+### Enhancements:
- Introduce support for online/hot backups with volume snapshots by using the
PostgreSQL API for physical online base backups. Default configuration for
@@ -212,7 +278,7 @@ Enhancements:
- Allow the configuration of `max_prepared_statements` with the pgBouncer
`Pooler` resource (#3174)
-Fixes:
+### Fixes:
- Suspend WAL archiving during a switchover and resume it when it is completed
(#3227)
@@ -224,7 +290,7 @@ Fixes:
- Reduce the number of labels in `VolumeSnapshots` resources and render them
into more appropriate annotations (#3151)
-Changes:
+### Changes:
- Volume snapshot backups, introduced in 1.21.0, are now online/hot by default;
in order to restore offline/cold backups set `.spec.backup.volumeSnapshot` to
@@ -232,7 +298,7 @@ Changes:
- Stop using the `postgresql.auto.conf` file inside PGDATA to control Postgres
replication settings, and replace it with a file named `override.conf` (#2812)
-Technical enhancements:
+### Technical enhancements:
- Use extended query protocol for PostgreSQL in the instance manager (#3152)
@@ -247,7 +313,7 @@ Technical enhancements:
carefully read the "Important Changes" section below, as well as the
[upgrade documentation](../installation_upgrade.md).
-Features:
+### Features:
- **Volume Snapshot support for backup and recovery:** leverage the standard
Kubernetes API on Volume Snapshots to take advantage of capabilities like
@@ -260,7 +326,7 @@ Features:
through the *stable* channel. Many thanks to EDB for donating the bundle of
their "EDB Postgres for Kubernetes" operator and adapting it for CloudNativePG.
-Important Changes:
+### Important Changes:
- Change the default value of `stopDelay` to 1800 seconds instead of 30 seconds
(#2848)
@@ -279,11 +345,11 @@ Important Changes:
- Stop supporting the `postgresql` label - replaced by `cnpg.io/cluster` in
1.18 (#2744)
-Security:
+### Security:
- Add a default `seccompProfile` to the operator deployment (#2926)
-Enhancements:
+### Enhancements:
- Enable bootstrap of a replica cluster from a consistent set of volume
snapshots (#2647)
@@ -306,7 +372,7 @@ Enhancements:
- Add primary timestamp and uptime to the kubectl plugin's `status` command
(#2953)
-Fixes:
+### Fixes:
- Ensure that the primary instance is always recreated first by prioritizing
ready PVCs with a primary role (#2544)
@@ -340,7 +406,7 @@ Fixes:
Grafana dashboard
- Enforce `standard_conforming_strings` in metric collection (#2888)
-Changes:
+### Changes:
- Set the default operand image to PostgreSQL 16.0
- Fencing now uses PostgreSQL's fast shutdown instead of smart shutdown to halt
@@ -354,7 +420,7 @@ Changes:
- Add the `cnpg.io/instanceRole` label while deprecating the existing `role`
label (#2915)
-Technical enhancements:
+### Technical enhancements:
- Replace `k8s-api-docgen` with `gen-crd-api-reference-docs` to automatically
build the API reference documentation (#2606)
diff --git a/docs/src/release_notes/v1.22.md b/docs/src/release_notes/v1.22.md
index 0e6c7f8507..0004fb36ea 100644
--- a/docs/src/release_notes/v1.22.md
+++ b/docs/src/release_notes/v1.22.md
@@ -6,11 +6,83 @@ For a complete list of changes, please refer to the
[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.22)
on the release branch in GitHub.
+## Version 1.22.4
+
+**Release date:** Jun 12, 2024
+
+!!! Warning
+ Version 1.22 is approaching its End-of-Life (EOL) on Jul 24, 2024.
+ If you haven't already, please begin planning for an upgrade promptly to
+ ensure continued support and security.
+
+### Enhancements:
+
+- Enabled configuration of standby-sensitive parameters during recovery using a
+ physical backup (#4564)
+
+- Enabled the configuration of the liveness probe timeout via the
+ `.spec.livenessProbeTimeout` option (#4719)
+
+- `cnpg` plugin for `kubectl`:
+
+ - Enhanced support for ANSI colors in the plugin by adding the `--color`
+ option, which accepts `always`, `never`, and `auto` (default) as values
+ (#4775)
+ - The plugin is now available on Homebrew for macOS users (#4602)
+
+### Fixes:
+
+- Prevented fenced instances from entering an unnecessary loop and consuming
+ all available CPU (#4625)
+
+- Resolved an issue where the instance manager on the primary would
+ indefinitely wait for the instance to start after encountering a failure
+ following a stop operation (#4434)
+
+- Fixed an issue where the interaction between `hot_standby_feedback` and
+ managed cluster-level replication slots was preventing the autovacuum from
+ operating correctly; this issue was causing disk space to remain occupied by
+ dead tuples (#4811)
+
+- Fixed a panic in the backup controller that occurred when pod container
+ statuses were missing (#4765)
+
+- Prevented unnecessary shutdown of the instance manager (#4670)
+
+- Prevented unnecessary reloads of PostgreSQL configuration when unchanged (#4531)
+
+- Prevented unnecessary reloads of the ident map by ensuring a consistent and
+ unique method of writing its content (#4648)
+
+- Avoided conflicts during phase registration by patching the status of the
+ resource instead of updating it (#4637)
+
+- Implemented a timeout when restarting PostgreSQL and lifting fencing (#4504)
+
+- Ensured that a replica cluster is restarted after promotion to properly set
+ the archive mode (#4399)
+
+- Removed an unneeded concurrent keep-alive routine that was causing random
+ failures in volume snapshot backups (#4768)
+
+- Ensured correct parsing of the additional rows field returned when the
+ `pgaudit.log_rows` option was enabled, preventing audit logs from being
+ incorrectly routed to the normal log stream (#4394)
+
+- `cnpg` plugin for `kubectl`:
+
+ - Resolved an issue with listing PDBs using the `cnpg status` command (#4530)
+
+### Changes
+
+- Default operand image set to PostgreSQL 16.3 (#4584)
+- Removed all RBAC requirements on namespace objects (#4753)
+
## Version 1.22.3
**Release date:** Apr 24, 2024
-Enhancements:
+### Enhancements:
- Users can now configure the `wal_log_hints` PostgreSQL parameter (#4218)
(#4218)
@@ -21,7 +93,7 @@ Enhancements:
- Error detection when invoking `barman-cloud-wal-restore` in `recovery`
bootstrap (#4101)
-Fixes:
+### Fixes:
- Ensured that before a switchover, the elected replica is in streaming
replication (#4288)
@@ -38,7 +110,7 @@ Fixes:
- Gracefully handle databases with no sequences in `sync-sequences` command
(#4346)
-Changes:
+### Changes:
- The Grafana dashboard now resides at
https://github.com/cloudnative-pg/grafana-dashboards (#4154)
@@ -136,7 +208,7 @@ Fixes:
previous version who wish to retain the old behavior: please refer to the
[upgrade documentation](../installation_upgrade.md) for detailed instructions.
-Features:
+### Features:
- **Declarative Tablespaces**: Introducing the `tablespaces` stanza in the
`Cluster` spec, enabling comprehensive lifecycle management of PostgreSQL
@@ -147,7 +219,7 @@ Features:
operations, by incorporating the name into the `temp_tablespaces` PostgreSQL
parameter (#3464).
-Security:
+### Security:
- By default, TLSv1.3 is now enforced on all PostgreSQL 12 or higher
installations. Additionally, users can configure the `ssl_ciphers`,
@@ -155,7 +227,7 @@ Security:
- Integration of Docker image scanning with Dockle and Snyk to enhance security
measures (#3300).
-Enhancements:
+### Enhancements:
- Improved reconciliation of external clusters (#3533).
- Introduction of the ability to enable/disable the `ALTER SYSTEM` command (#3535).
diff --git a/docs/src/release_notes/v1.23.md b/docs/src/release_notes/v1.23.md
index 043cf13510..f1b96be524 100644
--- a/docs/src/release_notes/v1.23.md
+++ b/docs/src/release_notes/v1.23.md
@@ -6,11 +6,78 @@ For a complete list of changes, please refer to the
[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.23)
on the release branch in GitHub.
+## Version 1.23.2
+
+**Release date:** Jun 12, 2024
+
+### Enhancements:
+
+- Enabled configuration of standby-sensitive parameters during recovery using a
+ physical backup (#4564)
+
+- Enabled the configuration of the liveness probe timeout via the
+ `.spec.livenessProbeTimeout` option (#4719)
+
+- `cnpg` plugin for `kubectl`:
+
+ - Enhanced support for ANSI colors in the plugin by adding the `--color`
+ option, which accepts `always`, `never`, and `auto` (default) as values
+ (#4775)
+ - The plugin is now available on Homebrew for macOS users (#4602)
+
+### Fixes:
+
+- Prevented fenced instances from entering an unnecessary loop and consuming
+ all available CPU (#4625)
+
+- Resolved an issue where the instance manager on the primary would
+ indefinitely wait for the instance to start after encountering a failure
+ following a stop operation (#4434)
+
+- Fixed an issue where the interaction between `hot_standby_feedback` and
+ managed cluster-level replication slots was preventing the autovacuum from
+ operating correctly; this issue was causing disk space to remain occupied by
+ dead tuples (#4811)
+
+- Fixed a panic in the backup controller that occurred when pod container
+ statuses were missing (#4765)
+
+- Prevented unnecessary shutdown of the instance manager (#4670)
+
+- Prevented unnecessary reloads of PostgreSQL configuration when unchanged (#4531)
+
+- Prevented unnecessary reloads of the ident map by ensuring a consistent and
+ unique method of writing its content (#4648)
+
+- Avoided conflicts during phase registration by patching the status of the
+ resource instead of updating it (#4637)
+
+- Implemented a timeout when restarting PostgreSQL and lifting fencing (#4504)
+
+- Ensured that a replica cluster is restarted after promotion to properly set
+ the archive mode (#4399)
+
+- Removed an unneeded concurrent keep-alive routine that was causing random
+ failures in volume snapshot backups (#4768)
+
+- Ensured correct parsing of the additional rows field returned when the
+ `pgaudit.log_rows` option was enabled, preventing audit logs from being
+ incorrectly routed to the normal log stream (#4394)
+
+- `cnpg` plugin for `kubectl`:
+
+ - Resolved an issue with listing PDBs using the `cnpg status` command (#4530)
+
+### Changes
+
+- Default operand image set to PostgreSQL 16.3 (#4584)
+- Removed all RBAC requirements on namespace objects (#4753)
+
## Version 1.23.1
**Release date:** Apr 29, 2024
-Fixes:
+### Fixes:
- Corrects the reconciliation of `PodMonitor` resources, which was
failing due to a regression (#4286)
@@ -24,7 +91,7 @@ Fixes:
minor release at a time, rather than two. Additionally, we've extended the
supplementary support period for the previous minor release to 3 months.
-Features:
+### Features:
- **PostgreSQL Image Catalogs:** Introduced `ClusterImageCatalog` and
`ImageCatalog` CRDs to manage operand container images based on PostgreSQL
@@ -41,7 +108,7 @@ Features:
useful for single-instance deployments. This feature is intended to replace the
node maintenance window feature.
-Enhancements:
+### Enhancements:
- Users now have the capability to transition an existing cluster into replica
mode, simplifying cross-datacenter switchover operations (#4261)
@@ -58,7 +125,7 @@ Enhancements:
- Error detection when invoking `barman-cloud-wal-restore` in `recovery`
bootstrap (#4101)
-Fixes:
+### Fixes:
- Ensured that before a switchover, the elected replica is in streaming
replication (#4288)
@@ -75,9 +142,10 @@ Fixes:
- Gracefully handle databases with no sequences in `sync-sequences` command
(#4346)
-Changes:
+### Changes:
- Operator images are now based on `gcr.io/distroless/static-debian12:nonroot`
(#4201)
- The Grafana dashboard now resides at
https://github.com/cloudnative-pg/grafana-dashboards (#4154)
+
diff --git a/docs/src/release_notes/v1.24.md b/docs/src/release_notes/v1.24.md
new file mode 100644
index 0000000000..807126a551
--- /dev/null
+++ b/docs/src/release_notes/v1.24.md
@@ -0,0 +1,37 @@
+# Release notes for CloudNativePG 1.24
+
+History of user-visible changes in the 1.24 minor release of CloudNativePG.
+
+For a complete list of changes, please refer to the
+[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.24)
+on the release branch in GitHub.
+
+## Version 1.24.0 RC1
+
+**Release date:** Jun 2x, 2024
+
+### Important changes:
+
+TODO
+
+### Features:
+
+- TODO: prevent failovers when disk space is exhausted (#4404)
+
+### Enhancements:
+
+- Enhanced control over exported metrics by making them subject to the value
+ returned by a custom query, which is run within the same transaction and
+ defined in the `predicate_query` field (#4503)
+
+### Fixes:
+
+TODO
+
+### Security:
+
+- TODO: add TLS communication between operator and instance manager (#4442)
+
+### Changes:
+
+TODO
diff --git a/docs/src/samples/cluster-restore-pvc.yaml b/docs/src/samples/cluster-restore-pvc.yaml
deleted file mode 100644
index 0caf0f1974..0000000000
--- a/docs/src/samples/cluster-restore-pvc.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: postgresql.cnpg.io/v1
-kind: Cluster
-metadata:
- name: cluster-restore
-spec:
- instances: 3
-
- storage:
- size: 1Gi
-
- bootstrap:
- recovery:
- volumeSnapshots:
- storage:
- name: cluster-example-3
- kind: PersistentVolumeClaim
- apiGroup: ""
-
diff --git a/docs/src/security.md b/docs/src/security.md
index 2bd27e8126..265e2852b0 100644
--- a/docs/src/security.md
+++ b/docs/src/security.md
@@ -16,46 +16,78 @@ that are analyzed at 3 different layers: Code, Container and Cluster.
## Code
-Source code of CloudNativePG is *systematically scanned* for static analysis purposes,
-including **security problems**, using a popular open-source linter for Go called
-[GolangCI-Lint](https://github.com/golangci/golangci-lint) directly in the CI/CD pipeline.
-GolangCI-Lint can run several *linters* on the same source code.
-
-One of these is [Golang Security Checker](https://github.com/securego/gosec), or simply `gosec`,
-a linter that scans the abstract syntactic tree of the source against a set of rules aimed at
-the discovery of well-known vulnerabilities, threats, and weaknesses hidden in
-the code such as hard-coded credentials, integer overflows and SQL injections - to name a few.
+CloudNativePG's source code undergoes systematic static analysis, including
+checks for security vulnerabilities, using the popular open-source linter for
+Go, [GolangCI-Lint](https://github.com/golangci/golangci-lint), directly
+integrated into the CI/CD pipeline. GolangCI-Lint can run multiple linters on
+the same source code.
+
+The following tools are used to identify security issues:
+
+- **[Golang Security Checker](https://github.com/securego/gosec) (`gosec`):** A
+ linter that scans the abstract syntax tree of the source code against a set
+ of rules designed to detect known vulnerabilities, threats, and weaknesses,
+ such as hard-coded credentials, integer overflows, and SQL injections.
+ GolangCI-Lint runs `gosec` as part of its suite.
+
+- **[govulncheck](https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck):** This
+ tool runs in the CI/CD pipeline and reports known vulnerabilities affecting
+ Go code or the compiler. If the operator is built with a version of the Go
+ compiler containing a known vulnerability, `govulncheck` will detect it.
+
+- **[CodeQL](https://codeql.github.com/):** Provided by GitHub, this tool scans
+ for security issues and blocks any pull request with detected
+ vulnerabilities. CodeQL is configured to review only Go code, excluding other
+ languages in the repository such as Python or Bash.
+
+- **[Snyk](https://snyk.io/):** Conducts nightly code scans in a scheduled job
+ and generates weekly reports highlighting any new findings related to code
+ security and licensing issues.
+
+The CloudNativePG repository has the *"Private vulnerability reporting"* option
+enabled in the [Security section](https://github.com/cloudnative-pg/cloudnative-pg/security).
+This feature allows users to safely report security issues that require careful
+handling before being publicly disclosed. If you discover any security bug,
+please use this medium to report it.
!!! Important
- A failure in the static code analysis phase of the CI/CD pipeline is a blocker
- for the entire delivery of CloudNativePG, meaning that each commit is validated
- against all the linters defined by GolangCI-Lint.
+ A failure in the static code analysis phase of the CI/CD pipeline will
+ block the entire delivery process of CloudNativePG. Every commit must pass all
+ the linters defined by GolangCI-Lint.
## Container
-Every container image that is part of CloudNativePG is automatically built via CI/CD pipelines following every commit.
-Such images include not only the operator's, but also the operands' - specifically every supported PostgreSQL version.
-Within the pipelines, images are scanned with:
+Every container image in CloudNativePG is automatically built via CI/CD
+pipelines following every commit. These images include not only the operator's
+image but also the operands' images, specifically for every supported
+PostgreSQL version. During the CI/CD process, images undergo scanning with the
+following tools:
-- [Dockle](https://github.com/goodwithtech/dockle): for best practices in terms
- of the container build process
+- **[Dockle](https://github.com/goodwithtech/dockle):** Ensures best practices
+ in the container build process.
+- **[Snyk](https://snyk.io/):** Detects security issues within the container
+ and reports findings via the GitHub interface.
!!! Important
- All operand images are automatically rebuilt once a day by our pipelines in case
- of security updates at the base image and package level, providing **patch level updates**
- for the container images that the community distributes.
+ All operand images are automatically rebuilt daily by our pipelines to
+ incorporate security updates at the base image and package level, providing
+ **patch-level updates** for the container images distributed to the community.
+
+### Guidelines and Frameworks for Container Security
-The following guidelines and frameworks have been taken into account for container-level security:
+The following guidelines and frameworks have been considered for ensuring
+container-level security:
-- the ["Container Image Creation and Deployment Guide"](https://dl.dod.cyber.mil/wp-content/uploads/devsecops/pdf/DevSecOps_Enterprise_Container_Image_Creation_and_Deployment_Guide_2.6-Public-Release.pdf),
- developed by the Defense Information Systems Agency (DISA) of the United States Department of Defense (DoD)
-- the ["CIS Benchmark for Docker"](https://www.cisecurity.org/benchmark/docker/),
- developed by the Center for Internet Security (CIS)
+- **["Container Image Creation and Deployment Guide"](https://dl.dod.cyber.mil/wp-content/uploads/devsecops/pdf/DevSecOps_Enterprise_Container_Image_Creation_and_Deployment_Guide_2.6-Public-Release.pdf):**
+ Developed by the Defense Information Systems Agency (DISA) of the United States
+ Department of Defense (DoD).
+- **["CIS Benchmark for Docker"](https://www.cisecurity.org/benchmark/docker/):**
+ Developed by the Center for Internet Security (CIS).
-!!! Seealso "About the Container level security"
- Please refer to ["Security and Containers in CloudNativePG"](https://www.enterprisedb.com/blog/security-and-containers-cloud-native-postgresql)
- blog article for more information about the approach that EDB has taken on
- security at the container level in CloudNativePG.
+!!! Seealso "About Container-Level Security"
+ For more information on the approach that EDB has taken regarding security
+ at the container level in CloudNativePG, please refer to the blog article
+ ["Security and Containers in CloudNativePG"](https://www.enterprisedb.com/blog/security-and-containers-cloud-native-postgresql).
## Cluster
@@ -297,13 +329,13 @@ section of the Kubernetes documentation for further information.
CloudNativePG exposes ports at operator, instance manager and operand
levels, as listed in the table below:
-System | Port number | Exposing | Name | Certificates | Authentication
-:--------------- | :----------- | :------------------ | :------------------ | :------------ | :--------------
-operator | 9443 | webhook server | `webhook-server` | TLS | Yes
-operator | 8080 | metrics | `metrics` | no TLS | No
-instance manager | 9187 | metrics | `metrics` | no TLS | No
-instance manager | 8000 | status | `status` | no TLS | No
-operand | 5432 | PostgreSQL instance | `postgresql` | optional TLS | Yes
+System | Port number | Exposing | Name | Certificates | Authentication
+:--------------- | :----------- | :------------------ | :------------------ | :---------------------------- | :--------------
+operator | 9443 | webhook server | `webhook-server` | TLS | Yes
+operator | 8080 | metrics | `metrics` | no TLS | No
+instance manager | 9187 | metrics | `metrics` | no TLS | No
+instance manager | 8000 | status | `status` | TLS (no TLS in old releases) | No
+operand | 5432 | PostgreSQL instance | `postgresql` | optional TLS | Yes
### PostgreSQL
@@ -367,3 +399,4 @@ For further detail on how `pg_ident.conf` is managed by the operator, see the
CloudNativePG delegates encryption at rest to the underlying storage class. For
data protection in production environments, we highly recommend that you choose
a storage class that supports encryption at rest.
+
diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md
index 4f9c6b7c7a..32781b0497 100644
--- a/docs/src/supported_releases.md
+++ b/docs/src/supported_releases.md
@@ -68,7 +68,6 @@ Git tags for versions are prepended with `v`.
|-----------------|----------------------|-------------------|---------------------|-------------------------------|---------------------------|-----------------------------|
| 1.23.x | Yes | April 24, 2024 | ~ October, 2024 | 1.27, 1.28, 1.29 | | 12 - 16 |
| 1.22.x | Yes | December 21, 2023 | July 24, 2024 | 1.26, 1.27, 1.28 | 1.29 | 12 - 16 |
-| 1.21.x | Yes | October 12, 2023 | May 24, 2024 | 1.25, 1.26, 1.27, 1.28 | 1.29 | 12 - 16 |
| main | No, development only | | | | | 12 - 16 |
The list of supported Kubernetes versions in the table depends on what
@@ -98,10 +97,11 @@ version of PostgreSQL, we might not be able to help you.
## Upcoming releases
-| Version | Release date | End of life | Supported Kubernetes versions |
-|-----------------|-----------------------|---------------------------|-------------------------------|
-| 1.23.0 | April 23, 2024 | - | - |
-| 1.24.0 | June/July, 2024 | - | - |
+| Version | Release date | End of life |
+|-----------------|-----------------------|---------------------------|
+| 1.24.0 | July 16, 2024 | Jan/Feb, 2025 |
+| 1.25.0 | Oct/Nov, 2024 | Apr/May, 2025 |
+| 1.26.0 | Feb, 2025 | Jul/Aug, 2025 |
!!! Note
Feature freeze happens one week before the release
@@ -116,6 +116,7 @@ version of PostgreSQL, we might not be able to help you.
| Version | Release date | End of life | Compatible Kubernetes versions |
|-----------------|-------------------|---------------------|--------------------------------|
+| 1.21.x | October 12, 2023 | Jun 12, 2024 | 1.25, 1.26, 1.27, 1.28 |
| 1.20.x | April 27, 2023 | January 21, 2024 | 1.24, 1.25, 1.26, 1.27 |
| 1.19.x | February 14, 2023 | November 3, 2023 | 1.23, 1.24, 1.25, 1.26 |
| 1.18.x | Nov 10, 2022 | June 12, 2023 | 1.23, 1.24, 1.25, 1.26, 1.27 |
diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md
index fff326d7a3..6b628337ae 100644
--- a/docs/src/troubleshooting.md
+++ b/docs/src/troubleshooting.md
@@ -629,14 +629,25 @@ kubectl cp POD:/var/lib/postgresql/data/pgdata/core.14177 core.14177
You now have the file. Make sure you free the space on the server by
removing the core dumps.
-## Some common issues
+## Some known issues
### Storage is full
-If one or more pods in the cluster are in `CrashloopBackoff` and logs
-suggest this could be due to a full disk, you probably have to increase the
-size of the instance's `PersistentVolumeClaim`. Please look at the
-["Volume expansion" section](storage.md#volume-expansion) in the documentation.
+In case the storage is full, the PostgreSQL pods will not be able to write new
+data, or, in case of the disk containing the WAL segments being full, PostgreSQL
+will shut down.
+
+If you see messages in the logs about the disk being full, you should increase
+the size of the affected PVC. You can do this by editing the PVC and changing
+the `spec.resources.requests.storage` field. After that, you should also update
+the Cluster resource with the new size to apply the same change to all the pods.
+Please look at the ["Volume expansion" section](storage.md#volume-expansion) in the documentation.
+
+If the space for WAL segments is exhausted, the pod will be crash-looping and
+the cluster status will report `Not enough disk space`. Increasing the size in
+the PVC and then in the Cluster resource will solve the issue. See also
+the ["Disk Full Failure" section](instance_manager.md#disk-full-failure)
+
### Pods are stuck in `Pending` state
diff --git a/go.mod b/go.mod
index 0e340d8005..f259efbc16 100644
--- a/go.mod
+++ b/go.mod
@@ -2,7 +2,7 @@ module github.com/cloudnative-pg/cloudnative-pg
go 1.22.0
-toolchain go1.22.3
+toolchain go1.22.4
require (
github.com/DATA-DOG/go-sqlmock v1.5.2
@@ -16,13 +16,13 @@ require (
github.com/go-logr/logr v1.4.2
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0
- github.com/jackc/pgx/v5 v5.5.5
+ github.com/jackc/pgx/v5 v5.6.0
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
- github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0
+ github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0
github.com/lib/pq v1.10.9
github.com/logrusorgru/aurora/v4 v4.0.0
github.com/mitchellh/go-ps v1.0.0
- github.com/onsi/ginkgo/v2 v2.18.0
+ github.com/onsi/ginkgo/v2 v2.19.0
github.com/onsi/gomega v1.33.1
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0
github.com/prometheus/client_golang v1.19.1
@@ -34,8 +34,8 @@ require (
go.uber.org/atomic v1.11.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
- golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
golang.org/x/sys v0.20.0
+ golang.org/x/term v0.20.0
google.golang.org/grpc v1.64.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.30.1
@@ -45,7 +45,7 @@ require (
k8s.io/client-go v0.30.1
k8s.io/klog/v2 v2.120.1
k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0
- sigs.k8s.io/controller-runtime v0.18.2
+ sigs.k8s.io/controller-runtime v0.18.3
sigs.k8s.io/yaml v1.4.0
)
@@ -97,10 +97,10 @@ require (
github.com/xlab/treeprint v1.2.0 // indirect
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
golang.org/x/crypto v0.23.0 // indirect
+ golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/oauth2 v0.18.0 // indirect
golang.org/x/sync v0.7.0 // indirect
- golang.org/x/term v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.21.0 // indirect
diff --git a/go.sum b/go.sum
index 8f757713b4..f9ccecf1b8 100644
--- a/go.sum
+++ b/go.sum
@@ -113,8 +113,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
-github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
-github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
+github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY=
+github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -133,8 +133,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0 h1:j3YK74myEQRxR/srciTpOrm221SAvz6J5OVWbyfeXFo=
-github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0/go.mod h1:FlyYFe32mPxKEPaRXKNxfX576d1AoCzstYDoOOnyMA4=
+github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 h1:mjQG0Vakr2h246kEDR85U8y8ZhPgT3bguTCajRa/jaw=
+github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
@@ -160,8 +160,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/onsi/ginkgo/v2 v2.18.0 h1:W9Y7IWXxPUpAit9ieMOLI7PJZGaW22DTKgiVAuhDTLc=
-github.com/onsi/ginkgo/v2 v2.18.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
+github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
+github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
@@ -231,8 +231,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
-golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
+golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc h1:O9NuF4s+E/PvMIy+9IUZB9znFwUIXEWSstNjek6VpVg=
+golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -361,8 +361,8 @@ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7F
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak=
k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/controller-runtime v0.18.2 h1:RqVW6Kpeaji67CY5nPEfRz6ZfFMk0lWQlNrLqlNpx+Q=
-sigs.k8s.io/controller-runtime v0.18.2/go.mod h1:tuAt1+wbVsXIT8lPtk5RURxqAnq7xkpv2Mhttslg7Hw=
+sigs.k8s.io/controller-runtime v0.18.3 h1:B5Wmmo8WMWK7izei+2LlXLVDGzMwAHBNLX68lwtlSR4=
+sigs.k8s.io/controller-runtime v0.18.3/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=
diff --git a/hack/e2e/run-e2e-local.sh b/hack/e2e/run-e2e-local.sh
index 590cb86515..0dbf4f6a71 100755
--- a/hack/e2e/run-e2e-local.sh
+++ b/hack/e2e/run-e2e-local.sh
@@ -83,6 +83,7 @@ RC_GINKGO=0
export TEST_SKIP_UPGRADE=true
ginkgo --nodes=4 --timeout 3h --poll-progress-after=1200s --poll-progress-interval=150s \
${LABEL_FILTERS:+--label-filter "${LABEL_FILTERS}"} \
+ --force-newlines \
--output-dir "${ROOT_DIR}/tests/e2e/out/" \
--json-report "report.json" -v "${ROOT_DIR}/tests/e2e/..." || RC_GINKGO=$?
diff --git a/hack/e2e/run-e2e.sh b/hack/e2e/run-e2e.sh
index 268c0292c0..e5e839878c 100755
--- a/hack/e2e/run-e2e.sh
+++ b/hack/e2e/run-e2e.sh
@@ -108,6 +108,7 @@ if [[ "${TEST_UPGRADE_TO_V1}" != "false" ]] && [[ "${TEST_CLOUD_VENDOR}" != "ocp
unset DEBUG
unset TEST_SKIP_UPGRADE
ginkgo --nodes=1 --timeout 90m --poll-progress-after=1200s --poll-progress-interval=150s --label-filter "${LABEL_FILTERS}" \
+ --github-output --force-newlines \
--focus-file "${ROOT_DIR}/tests/e2e/upgrade_test.go" --output-dir "${ROOT_DIR}/tests/e2e/out" \
--json-report "upgrade_report.json" -v "${ROOT_DIR}/tests/e2e/..." || RC_GINKGO1=$?
@@ -144,6 +145,7 @@ RC_GINKGO2=0
export TEST_SKIP_UPGRADE=true
ginkgo --nodes=4 --timeout 3h --poll-progress-after=1200s --poll-progress-interval=150s \
${LABEL_FILTERS:+--label-filter "${LABEL_FILTERS}"} \
+ --github-output --force-newlines \
--output-dir "${ROOT_DIR}/tests/e2e/out/" \
--json-report "report.json" -v "${ROOT_DIR}/tests/e2e/..." || RC_GINKGO2=$?
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index af92167bc5..50243c995e 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -27,10 +27,10 @@ fi
KIND_NODE_DEFAULT_VERSION=v1.30.0
K3D_NODE_DEFAULT_VERSION=v1.30.0
CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.13.0
-EXTERNAL_SNAPSHOTTER_VERSION=v8.0.0
-EXTERNAL_PROVISIONER_VERSION=v5.0.0
-EXTERNAL_RESIZER_VERSION=v1.11.0
-EXTERNAL_ATTACHER_VERSION=v4.6.0
+EXTERNAL_SNAPSHOTTER_VERSION=v8.0.1
+EXTERNAL_PROVISIONER_VERSION=v5.0.1
+EXTERNAL_RESIZER_VERSION=v1.11.1
+EXTERNAL_ATTACHER_VERSION=v4.6.1
K8S_VERSION=${K8S_VERSION-}
KUBECTL_VERSION=${KUBECTL_VERSION-}
CSI_DRIVER_HOST_PATH_VERSION=${CSI_DRIVER_HOST_PATH_VERSION:-$CSI_DRIVER_HOST_PATH_DEFAULT_VERSION}
diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go
index 683666dad0..66b8d8efe1 100644
--- a/internal/cmd/manager/controller/controller.go
+++ b/internal/cmd/manager/controller/controller.go
@@ -206,12 +206,6 @@ func RunController(
return err
}
- // Retrieve the Kubernetes cluster system UID
- if err = utils.DetectKubeSystemUID(ctx, kubeClient); err != nil {
- setupLog.Error(err, "unable to retrieve the Kubernetes cluster system UID")
- return err
- }
-
// Detect the available architectures
if err = utils.DetectAvailableArchitectures(); err != nil {
setupLog.Error(err, "unable to detect the available instance's architectures")
@@ -219,7 +213,6 @@ func RunController(
}
setupLog.Info("Kubernetes system metadata",
- "systemUID", utils.GetKubeSystemUID(),
"haveSCC", utils.HaveSecurityContextConstraints(),
"haveSeccompProfile", utils.HaveSeccompSupport(),
"haveVolumeSnapshot", utils.HaveVolumeSnapshot(),
diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go
index ee0115589d..676a3c9a18 100644
--- a/internal/cmd/manager/instance/run/cmd.go
+++ b/internal/cmd/manager/instance/run/cmd.go
@@ -19,6 +19,8 @@ package run
import (
"context"
+ "errors"
+ "fmt"
"os"
"path/filepath"
@@ -54,7 +56,13 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
)
-var scheme = runtime.NewScheme()
+var (
+ scheme = runtime.NewScheme()
+
+ // errNoFreeWALSpace is raised when there's not enough disk space
+ // to store two WAL files
+ errNoFreeWALSpace = fmt.Errorf("no free disk space for WALs")
+)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
@@ -67,6 +75,7 @@ func NewCmd() *cobra.Command {
var podName string
var clusterName string
var namespace string
+ var statusPortTLS bool
cmd := &cobra.Command{
Use: "run [flags]",
@@ -84,10 +93,17 @@ func NewCmd() *cobra.Command {
instance.Namespace = namespace
instance.PodName = podName
instance.ClusterName = clusterName
+ instance.StatusPortTLS = statusPortTLS
- return retry.OnError(retry.DefaultRetry, isRunSubCommandRetryable, func() error {
+ err := retry.OnError(retry.DefaultRetry, isRunSubCommandRetryable, func() error {
return runSubCommand(ctx, instance)
})
+
+ if errors.Is(err, errNoFreeWALSpace) {
+ os.Exit(apiv1.MissingWALDiskSpaceExitCode)
+ }
+
+ return err
},
PostRunE: func(cmd *cobra.Command, _ []string) error {
if err := istio.TryInvokeQuitEndpoint(cmd.Context()); err != nil {
@@ -105,7 +121,8 @@ func NewCmd() *cobra.Command {
"current cluster in k8s, used to coordinate switchover and failover")
cmd.Flags().StringVar(&namespace, "namespace", os.Getenv("NAMESPACE"), "The namespace of "+
"the cluster and of the Pod in k8s")
-
+ cmd.Flags().BoolVar(&statusPortTLS, "status-port-tls", false,
+ "Enable TLS for communicating with the operator")
return cmd
}
@@ -117,6 +134,15 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error {
"version", versions.Version,
"build", versions.Info)
+ setupLog.Info("Checking for free disk space for WALs before starting PostgreSQL")
+ hasDiskSpaceForWals, err := instance.CheckHasDiskSpaceForWAL(ctx)
+ if err != nil {
+ setupLog.Error(err, "Error while checking if there is enough disk space for WALs, skipping")
+ } else if !hasDiskSpaceForWals {
+ setupLog.Info("Detected low-disk space condition, avoid starting the instance")
+ return errNoFreeWALSpace
+ }
+
mgr, err := ctrl.NewManager(config.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Cache: cache.Options{
@@ -269,5 +295,14 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error {
return makeUnretryableError(err)
}
+ setupLog.Info("Checking for free disk space for WALs after PostgreSQL finished")
+ hasDiskSpaceForWals, err = instance.CheckHasDiskSpaceForWAL(ctx)
+ if err != nil {
+ setupLog.Error(err, "Error while checking if there is enough disk space for WALs, skipping")
+ } else if !hasDiskSpaceForWals {
+ setupLog.Info("Detected low-disk space condition")
+ return errNoFreeWALSpace
+ }
+
return nil
}
diff --git a/internal/cmd/manager/instance/run/lifecycle/run.go b/internal/cmd/manager/instance/run/lifecycle/run.go
index 9bdca9cdcc..8c0dc276a0 100644
--- a/internal/cmd/manager/instance/run/lifecycle/run.go
+++ b/internal/cmd/manager/instance/run/lifecycle/run.go
@@ -20,7 +20,6 @@ import (
"context"
"database/sql"
"fmt"
- "os"
"sync"
"github.com/jackc/pgx/v5"
@@ -145,10 +144,9 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan
}
contextLogger.Debug("Verifying connection to DB")
- err = instance.WaitForSuperuserConnectionAvailable(ctx)
- if err != nil {
+ if err := instance.WaitForSuperuserConnectionAvailable(ctx); err != nil {
contextLogger.Error(err, "DB not available")
- os.Exit(1)
+ return fmt.Errorf("while verifying super user DB connection: %w", err)
}
contextLogger.Debug("Validating DB configuration")
diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go
index ee1cbc602a..a5d35058ba 100644
--- a/internal/cmd/plugin/backup/cmd.go
+++ b/internal/cmd/plugin/backup/cmd.go
@@ -19,11 +19,11 @@ package backup
import (
"context"
"fmt"
+ "slices"
"strconv"
"time"
"github.com/spf13/cobra"
- "golang.org/x/exp/slices"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/internal/cmd/plugin/color.go b/internal/cmd/plugin/color.go
new file mode 100644
index 0000000000..913b3d3545
--- /dev/null
+++ b/internal/cmd/plugin/color.go
@@ -0,0 +1,101 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/logrusorgru/aurora/v4"
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+)
+
+// colorConfiguration represents how the output should be colorized.
+// It is a `pflag.Value`, therefore implements String(), Set(), Type()
+type colorConfiguration string
+
+const (
+ // colorAlways configures the output to always be colorized
+ colorAlways colorConfiguration = "always"
+ // colorAuto configures the the output to be colorized only when attached to a terminal
+ colorAuto colorConfiguration = "auto"
+ // colorNever configures the the output never to be colorized
+ colorNever colorConfiguration = "never"
+)
+
+// String returns the string representation
+func (e colorConfiguration) String() string {
+ return string(e)
+}
+
+// Set sets the color configuration
+func (e *colorConfiguration) Set(val string) error {
+ colorVal := colorConfiguration(val)
+ switch colorVal {
+ case colorAlways, colorAuto, colorNever:
+ *e = colorVal
+ return nil
+ default:
+ return fmt.Errorf("should be one of 'always', 'auto', or 'never'")
+ }
+}
+
+// Type returns the data type of the flag used for the color configuration
+func (e *colorConfiguration) Type() string {
+ return "string"
+}
+
+// ConfigureColor renews aurora.DefaultColorizer based on flags and TTY
+func ConfigureColor(cmd *cobra.Command) {
+ configureColor(cmd, term.IsTerminal(int(os.Stdout.Fd())))
+}
+
+func configureColor(cmd *cobra.Command, isTTY bool) {
+ colorFlag := cmd.Flag("color")
+ colorConfig := colorAuto // default config
+ if colorFlag != nil {
+ colorConfig = colorConfiguration(colorFlag.Value.String())
+ }
+
+ var shouldColorize bool
+ switch colorConfig {
+ case colorAlways:
+ shouldColorize = true
+ case colorNever:
+ shouldColorize = false
+ case colorAuto:
+ shouldColorize = isTTY
+ }
+
+ aurora.DefaultColorizer = aurora.New(
+ aurora.WithColors(shouldColorize),
+ aurora.WithHyperlinks(true),
+ )
+}
+
+// AddColorControlFlag adds color control flags to the command
+func AddColorControlFlag(cmd *cobra.Command) {
+ // By default, color is set to 'auto'
+ colorValue := colorAuto
+ cmd.Flags().Var(&colorValue, "color", "Control color output; options include 'always', 'auto', or 'never'")
+ _ = cmd.RegisterFlagCompletionFunc("color",
+ func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
+ return []string{colorAlways.String(), colorAuto.String(), colorNever.String()},
+ cobra.ShellCompDirectiveDefault | cobra.ShellCompDirectiveKeepOrder
+ })
+}
diff --git a/internal/cmd/plugin/color_test.go b/internal/cmd/plugin/color_test.go
new file mode 100644
index 0000000000..6fabdb6e10
--- /dev/null
+++ b/internal/cmd/plugin/color_test.go
@@ -0,0 +1,79 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "github.com/logrusorgru/aurora/v4"
+ "github.com/spf13/cobra"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Configure color", func() {
+ var cmd *cobra.Command
+ BeforeEach(func() {
+ cmd = &cobra.Command{
+ Use: "test",
+ }
+
+ AddColorControlFlag(cmd)
+ })
+
+ It("errors when the flag is invalid", func() {
+ err := cmd.ParseFlags([]string{"--color", "invalid"})
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("when set to auto, turns colorization on or off depending on the terminal attached", func() {
+ err := cmd.ParseFlags([]string{"--color", "auto"})
+ Expect(err).NotTo(HaveOccurred())
+
+ configureColor(cmd, false)
+ Expect(aurora.DefaultColorizer.Config().Colors).To(BeFalse())
+
+ configureColor(cmd, true)
+ Expect(aurora.DefaultColorizer.Config().Colors).To(BeTrue())
+ })
+
+ It("if the color flag is not set, defaults to auto", func() {
+ err := cmd.ParseFlags(nil)
+ Expect(err).NotTo(HaveOccurred())
+
+ configureColor(cmd, true)
+ Expect(aurora.DefaultColorizer.Config().Colors).To(BeTrue())
+
+ configureColor(cmd, false)
+ Expect(aurora.DefaultColorizer.Config().Colors).To(BeFalse())
+ })
+
+ It("enables color even on a non-terminal if the flag is set to always", func() {
+ err := cmd.ParseFlags([]string{"--color", "always"})
+ Expect(err).NotTo(HaveOccurred())
+
+ configureColor(cmd, false)
+ Expect(aurora.DefaultColorizer.Config().Colors).To(BeTrue())
+ })
+
+ It("disables color if the flag is set to never", func() {
+ err := cmd.ParseFlags([]string{"--color", "never"})
+ Expect(err).NotTo(HaveOccurred())
+
+ configureColor(cmd, true)
+ Expect(aurora.DefaultColorizer.Config().Colors).To(BeFalse())
+ })
+})
diff --git a/internal/cmd/plugin/plugin.go b/internal/cmd/plugin/plugin.go
index 8bd2e7cc4b..ba593533d7 100644
--- a/internal/cmd/plugin/plugin.go
+++ b/internal/cmd/plugin/plugin.go
@@ -24,7 +24,7 @@ import (
"strings"
"time"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
diff --git a/internal/cmd/plugin/report/logs.go b/internal/cmd/plugin/report/logs.go
index 286aec0def..220c6bf3b9 100644
--- a/internal/cmd/plugin/report/logs.go
+++ b/internal/cmd/plugin/report/logs.go
@@ -62,9 +62,13 @@ func streamOperatorLogsToZip(
Options: podLogOptions,
Previous: true,
}
- fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n")
+ if _, err := fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n"); err != nil {
+ return err
+ }
_ = streamPodLogs.Stream(ctx, writer)
- fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n")
+ if _, err := fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n"); err != nil {
+ return err
+ }
streamPodLogs.Previous = false
if err := streamPodLogs.Stream(ctx, writer); err != nil {
@@ -118,15 +122,18 @@ func streamClusterLogsToZip(
podPointer := pod
streamPodLogs.Pod = &podPointer
- fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n")
+ if _, err := fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n"); err != nil {
+ return err
+ }
// We ignore the error because it will error if there are no previous logs
_ = streamPodLogs.Stream(ctx, writer)
- fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n")
+ if _, err := fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n"); err != nil {
+ return err
+ }
streamPodLogs.Previous = false
- err = streamPodLogs.Stream(ctx, writer)
- if err != nil {
+ if err := streamPodLogs.Stream(ctx, writer); err != nil {
return err
}
}
diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go
index fc91d80aa9..b605605d21 100644
--- a/internal/controller/backup_controller.go
+++ b/internal/controller/backup_controller.go
@@ -23,7 +23,7 @@ import (
"reflect"
"time"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -41,6 +41,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/conditions"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
@@ -142,6 +143,17 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
contextLogger.Debug("Found cluster for backup", "cluster", clusterName)
+ // Store in the context the TLS configuration required communicating with the Pods
+ ctx, err := certs.NewTLSConfigForContext(
+ ctx,
+ r.Client,
+ cluster.GetServerCASecretObjectKey(),
+ cluster.GetServiceReadWriteName(),
+ )
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+
isRunning, err := r.isValidBackupRunning(ctx, &backup, &cluster)
if err != nil {
contextLogger.Error(err, "while running isValidBackupRunning")
@@ -209,7 +221,7 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
"cluster", cluster.Name,
"pod", pod.Name)
- // This backup has been started
+ // This backup can be started
if err := startInstanceManagerBackup(ctx, r.Client, &backup, pod, &cluster); err != nil {
r.Recorder.Eventf(&backup, "Warning", "Error", "Backup exit with error %v", err)
tryFlagBackupAsFailed(ctx, r.Client, &backup, fmt.Errorf("encountered an error while taking the backup: %w", err))
@@ -287,7 +299,8 @@ func (r *BackupReconciler) isValidBackupRunning(
return false, fmt.Errorf("unknown.spec.target received: %s", backup.Spec.Target)
}
- containerIsNotRestarted := backup.Status.InstanceID.ContainerID == pod.Status.ContainerStatuses[0].ContainerID
+ containerIsNotRestarted := utils.PodHasContainerStatuses(pod) &&
+ backup.Status.InstanceID.ContainerID == pod.Status.ContainerStatuses[0].ContainerID
isPodActive := utils.IsPodActive(pod)
if isCorrectPodElected && containerIsNotRestarted && isPodActive {
contextLogger.Info("Backup is already running on",
@@ -366,8 +379,16 @@ func (r *BackupReconciler) reconcileSnapshotBackup(
return &ctrl.Result{RequeueAfter: 10 * time.Second}, nil
}
+ if !utils.PodHasContainerStatuses(*targetPod) {
+ return nil, fmt.Errorf("target pod lacks container statuses")
+ }
+
if len(backup.Status.Phase) == 0 || backup.Status.Phase == apiv1.BackupPhasePending {
- backup.Status.SetAsStarted(targetPod, apiv1.BackupMethodVolumeSnapshot)
+ backup.Status.SetAsStarted(
+ targetPod.Name,
+ targetPod.Status.ContainerStatuses[0].ContainerID,
+ apiv1.BackupMethodVolumeSnapshot,
+ )
// given that we use only kubernetes resources we can use the backup name as ID
backup.Status.BackupID = backup.Name
backup.Status.BackupName = backup.Name
@@ -562,7 +583,7 @@ func startInstanceManagerBackup(
) error {
// This backup has been started
status := backup.GetStatus()
- status.SetAsStarted(pod, backup.Spec.Method)
+ status.SetAsStarted(pod.Name, pod.Status.ContainerStatuses[0].ContainerID, backup.Spec.Method)
if err := postgres.PatchBackupStatusAndRetry(ctx, client, backup); err != nil {
return err
diff --git a/internal/controller/backup_controller_test.go b/internal/controller/backup_controller_test.go
index 0e29ab6db0..6890e2ad46 100644
--- a/internal/controller/backup_controller_test.go
+++ b/internal/controller/backup_controller_test.go
@@ -20,7 +20,7 @@ import (
"context"
"time"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/internal/controller/backup_predicates.go b/internal/controller/backup_predicates.go
index 6e8f3d4775..3771ad4d81 100644
--- a/internal/controller/backup_predicates.go
+++ b/internal/controller/backup_predicates.go
@@ -19,7 +19,7 @@ package controller
import (
"context"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
diff --git a/internal/controller/backup_predicates_test.go b/internal/controller/backup_predicates_test.go
index 0ca8304baa..e7715ab37c 100644
--- a/internal/controller/backup_predicates_test.go
+++ b/internal/controller/backup_predicates_test.go
@@ -17,7 +17,7 @@ limitations under the License.
package controller
import (
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/event"
diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go
index 3014929e23..ad43549e70 100644
--- a/internal/controller/cluster_controller.go
+++ b/internal/controller/cluster_controller.go
@@ -44,6 +44,7 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/operatorclient"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation"
@@ -107,7 +108,6 @@ var ErrNextLoop = utils.ErrNextLoop
// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;watch;delete;patch
// +kubebuilder:rbac:groups="",resources=configmaps/status,verbs=get;update;patch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
-// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;create;watch;delete;patch
// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;delete;patch;create;watch
@@ -248,6 +248,17 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste
return ctrl.Result{RequeueAfter: 1 * time.Second}, nil
}
+ // Store in the context the TLS configuration required communicating with the Pods
+ ctx, err = certs.NewTLSConfigForContext(
+ ctx,
+ r.Client,
+ cluster.GetServerCASecretObjectKey(),
+ cluster.GetServiceReadWriteName(),
+ )
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+
// Get the replication status
instancesStatus := r.StatusClient.GetStatusFromInstances(ctx, resources.instances)
@@ -304,6 +315,10 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste
return ctrl.Result{RequeueAfter: 10 * time.Second}, registerPhaseErr
}
+ if res, err := r.ensureNoFailoverOnFullDisk(ctx, cluster, instancesStatus); err != nil || !res.IsZero() {
+ return res, err
+ }
+
if res, err := replicaclusterswitch.Reconcile(ctx, r.Client, cluster, instancesStatus); res != nil || err != nil {
if res != nil {
return *res, nil
@@ -394,6 +409,39 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste
return hookResult.Result, hookResult.Err
}
+func (r *ClusterReconciler) ensureNoFailoverOnFullDisk(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+ instances postgres.PostgresqlStatusList,
+) (ctrl.Result, error) {
+ contextLogger := log.FromContext(ctx).WithName("ensure_sufficient_disk_space")
+
+ var instanceNames []string
+ for _, state := range instances.Items {
+ if !isWALSpaceAvailableOnPod(state.Pod) {
+ instanceNames = append(instanceNames, state.Pod.Name)
+ }
+ }
+ if len(instanceNames) == 0 {
+ return ctrl.Result{}, nil
+ }
+
+ contextLogger = contextLogger.WithValues("instanceNames", instanceNames)
+ contextLogger.Warning(
+ "Insufficient disk space detected in a pod. PostgreSQL cannot proceed until the PVC group is enlarged",
+ )
+
+ reason := "Insufficient disk space detected in one or more pods is preventing PostgreSQL from running." +
+ "Please verify your storage settings. Further information inside .status.instancesReportedState"
+ registerPhaseErr := r.RegisterPhase(
+ ctx,
+ cluster,
+ "Not enough disk space",
+ reason,
+ )
+ return ctrl.Result{RequeueAfter: 10 * time.Second}, registerPhaseErr
+}
+
func (r *ClusterReconciler) handleSwitchover(
ctx context.Context,
cluster *apiv1.Cluster,
@@ -476,18 +524,6 @@ func (r *ClusterReconciler) getCluster(
return nil, fmt.Errorf("cannot get the managed resource: %w", err)
}
- var namespace corev1.Namespace
- if err := r.Get(ctx, client.ObjectKey{Namespace: "", Name: req.Namespace}, &namespace); err != nil {
- // This is a real error, maybe the RBAC configuration is wrong?
- return nil, fmt.Errorf("cannot get the containing namespace: %w", err)
- }
-
- if !namespace.DeletionTimestamp.IsZero() {
- // This happens when you delete a namespace containing a Cluster resource. If that's the case,
- // let's just wait for the Kubernetes to remove all object in the namespace.
- return nil, nil
- }
-
return cluster, nil
}
diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go
index 222dbbf542..62711339c4 100644
--- a/internal/controller/cluster_create.go
+++ b/internal/controller/cluster_create.go
@@ -20,12 +20,12 @@ import (
"context"
"fmt"
"reflect"
+ "slices"
"time"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/sethvargo/go-password/password"
- "golang.org/x/exp/slices"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
diff --git a/internal/controller/cluster_create_test.go b/internal/controller/cluster_create_test.go
index 9659e50ac3..4cb83d5937 100644
--- a/internal/controller/cluster_create_test.go
+++ b/internal/controller/cluster_create_test.go
@@ -19,7 +19,7 @@ package controller
import (
"context"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go
index 4d71749a59..8476891375 100644
--- a/internal/controller/cluster_status.go
+++ b/internal/controller/cluster_status.go
@@ -37,6 +37,7 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
)
@@ -810,3 +811,41 @@ func getPodsTopology(
return apiv1.Topology{SuccessfullyExtracted: true, Instances: data, NodesUsed: int32(len(nodesMap))}
}
+
+// isWALSpaceAvailableOnPod check if a Pod terminated because it has no
+// disk space for WALs
+func isWALSpaceAvailableOnPod(pod *corev1.Pod) bool {
+ isTerminatedForMissingWALDiskSpace := func(state *corev1.ContainerState) bool {
+ return state.Terminated != nil && state.Terminated.ExitCode == apiv1.MissingWALDiskSpaceExitCode
+ }
+
+ var pgContainerStatus *corev1.ContainerStatus
+ for i := range pod.Status.ContainerStatuses {
+ status := pod.Status.ContainerStatuses[i]
+ if status.Name == specs.PostgresContainerName {
+ pgContainerStatus = &pod.Status.ContainerStatuses[i]
+ break
+ }
+ }
+
+ // This is not an instance Pod as there's no PostgreSQL
+ // container
+ if pgContainerStatus == nil {
+ return true
+ }
+
+ // If the Pod was terminated because it didn't have enough disk
+ // space, then we have no disk space
+ if isTerminatedForMissingWALDiskSpace(&pgContainerStatus.State) {
+ return false
+ }
+
+ // The Pod is now running but not still ready, and last time it
+ // was terminated for missing disk space. Let's wait for it
+ // to be ready before classifying it as having enough disk space
+ if !pgContainerStatus.Ready && isTerminatedForMissingWALDiskSpace(&pgContainerStatus.LastTerminationState) {
+ return false
+ }
+
+ return true
+}
diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go
index fc0f5bae02..69c1d48efb 100644
--- a/internal/controller/cluster_upgrade.go
+++ b/internal/controller/cluster_upgrade.go
@@ -21,9 +21,6 @@ import (
"encoding/json"
"errors"
"fmt"
- "io"
- "net/http"
- neturl "net/url"
"reflect"
corev1 "k8s.io/api/core/v1"
@@ -33,9 +30,9 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -192,8 +189,8 @@ func (r *ClusterReconciler) updatePrimaryPod(
contextLogger.Info("The primary needs to be restarted, we'll trigger a switchover to do that",
"reason", reason,
"currentPrimary", primaryPod.Name,
- "targetPrimary", targetInstance.Pod.Name,
- "podList", podList)
+ "targetPrimary", targetInstance.Pod.Name)
+ podList.LogStatus(ctx)
r.Recorder.Eventf(cluster, "Normal", "Switchover",
"Initiating switchover to %s to upgrade %s", targetInstance.Pod.Name, primaryPod.Name)
return true, r.setPrimaryInstance(ctx, cluster, targetInstance.Pod.Name)
@@ -612,7 +609,8 @@ func checkPodSpecIsOutdated(
}
envConfig := specs.CreatePodEnvConfig(*cluster, status.Pod.Name)
gracePeriod := int64(cluster.GetMaxStopDelay())
- targetPodSpec := specs.CreateClusterPodSpec(status.Pod.Name, *cluster, envConfig, gracePeriod)
+ tlsEnabled := instance.GetStatusSchemeFromPod(status.Pod).IsHTTPS()
+ targetPodSpec := specs.CreateClusterPodSpec(status.Pod.Name, *cluster, envConfig, gracePeriod, tlsEnabled)
// the bootstrap init-container could change image after an operator upgrade.
// If in-place upgrades of the instance manager are enabled, we don't need rollout.
@@ -742,7 +740,7 @@ func (r *ClusterReconciler) upgradeInstanceManager(
}
}
- err = upgradeInstanceManagerOnPod(ctx, postgresqlStatus.Pod, targetManager)
+ err = r.StatusClient.UpgradeInstanceManager(ctx, postgresqlStatus.Pod, targetManager)
if err != nil {
enrichedError := fmt.Errorf("while upgrading instance manager on %s (hash: %s): %w",
postgresqlStatus.Pod.Name,
@@ -766,54 +764,3 @@ func (r *ClusterReconciler) upgradeInstanceManager(
return nil
}
-
-// upgradeInstanceManagerOnPod upgrades an instance manager of a Pod via an HTTP PUT request.
-func upgradeInstanceManagerOnPod(
- ctx context.Context,
- pod *corev1.Pod,
- targetManager *utils.AvailableArchitecture,
-) error {
- binaryFileStream, err := targetManager.FileStream()
- if err != nil {
- return err
- }
- defer func() {
- err = binaryFileStream.Close()
- }()
-
- updateURL := url.Build(pod.Status.PodIP, url.PathUpdate, url.StatusPort)
- req, err := http.NewRequestWithContext(ctx, http.MethodPut, updateURL, nil)
- if err != nil {
- return err
- }
- req.Body = binaryFileStream
-
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- if errors.Is(err.(*neturl.Error).Err, io.EOF) {
- // This is perfectly fine as the instance manager will
- // synchronously update and this call won't return.
- return nil
- }
-
- return err
- }
-
- if resp.StatusCode == http.StatusOK {
- // This should not happen. See previous block.
- return nil
- }
-
- var body []byte
- body, err = io.ReadAll(resp.Body)
- if err != nil {
- return err
- }
-
- err = resp.Body.Close()
- if err != nil {
- return err
- }
-
- return fmt.Errorf(string(body))
-}
diff --git a/internal/controller/replicas.go b/internal/controller/replicas.go
index 8f14430793..3190b00bcd 100644
--- a/internal/controller/replicas.go
+++ b/internal/controller/replicas.go
@@ -19,9 +19,9 @@ package controller
import (
"context"
"fmt"
+ "slices"
"time"
- "golang.org/x/exp/slices"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go
index 81100f7322..c50087dd47 100644
--- a/internal/management/controller/instance_controller.go
+++ b/internal/management/controller/instance_controller.go
@@ -18,6 +18,7 @@ package controller
import (
"context"
+ "crypto/tls"
"database/sql"
"errors"
"fmt"
@@ -298,7 +299,7 @@ func (r *InstanceReconciler) refreshConfigurationFiles(
return false, err
}
- reloadIdent, err := r.instance.RefreshPGIdent(cluster)
+ reloadIdent, err := r.instance.RefreshPGIdent(cluster.Spec.PostgresConfiguration.PgIdent)
if err != nil {
return false, err
}
@@ -394,30 +395,25 @@ func (r *InstanceReconciler) verifyParametersForFollower(cluster *apiv1.Cluster)
return err
}
log.Info("Found previous run flag", "filename", filename)
- enforcedParams, err := postgresManagement.GetEnforcedParametersThroughPgControldata(r.instance.PgData)
+ controldataParams, err := postgresManagement.LoadEnforcedParametersFromPgControldata(r.instance.PgData)
+ if err != nil {
+ return err
+ }
+ clusterParams, err := postgresManagement.LoadEnforcedParametersFromCluster(cluster)
if err != nil {
return err
}
- clusterParams := cluster.Spec.PostgresConfiguration.Parameters
options := make(map[string]string)
- for key, enforcedparam := range enforcedParams {
+ for key, enforcedparam := range controldataParams {
clusterparam, found := clusterParams[key]
if !found {
continue
}
- enforcedparamInt, err := strconv.Atoi(enforcedparam)
- if err != nil {
- return err
- }
- clusterparamInt, err := strconv.Atoi(clusterparam)
- if err != nil {
- return err
- }
// if the values from `pg_controldata` are higher than the cluster spec,
// they are the safer choice, so set them in config
- if enforcedparamInt > clusterparamInt {
- options[key] = enforcedparam
+ if enforcedparam > clusterparam {
+ options[key] = strconv.Itoa(enforcedparam)
}
}
if len(options) == 0 {
@@ -1005,6 +1001,31 @@ func (r *InstanceReconciler) processConfigReloadAndManageRestart(ctx context.Con
return r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster))
}
+// refreshCertificateFilesFromSecret receive a secret and rewrite the file
+// corresponding to the server certificate
+func (r *InstanceReconciler) refreshInstanceCertificateFromSecret(
+ secret *corev1.Secret,
+) error {
+ certData, ok := secret.Data[corev1.TLSCertKey]
+ if !ok {
+ return fmt.Errorf("missing %s field in Secret", corev1.TLSCertKey)
+ }
+
+ keyData, ok := secret.Data[corev1.TLSPrivateKeyKey]
+ if !ok {
+ return fmt.Errorf("missing %s field in Secret", corev1.TLSPrivateKeyKey)
+ }
+
+ certificate, err := tls.X509KeyPair(certData, keyData)
+ if err != nil {
+ return fmt.Errorf("failed decoding Secret: %w", err)
+ }
+
+ r.instance.ServerCertificate = &certificate
+
+ return err
+}
+
// refreshCertificateFilesFromSecret receive a secret and rewrite the file
// corresponding to the server certificate
func (r *InstanceReconciler) refreshCertificateFilesFromSecret(
diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go
index 45ae366872..5c0dfab926 100644
--- a/internal/management/controller/instance_startup.go
+++ b/internal/management/controller/instance_startup.go
@@ -37,7 +37,8 @@ import (
)
// refreshServerCertificateFiles gets the latest server certificates files from the
-// secrets. Returns true if configuration has been changed
+// secrets, and may set the instance certificate if it was missing our outdated.
+// Returns true if configuration has been changed or the instance has been updated
func (r *InstanceReconciler) refreshServerCertificateFiles(ctx context.Context, cluster *apiv1.Cluster) (bool, error) {
contextLogger := log.FromContext(ctx)
@@ -60,11 +61,20 @@ func (r *InstanceReconciler) refreshServerCertificateFiles(ctx context.Context,
return false, err
}
- return r.refreshCertificateFilesFromSecret(
+ changed, err := r.refreshCertificateFilesFromSecret(
ctx,
&secret,
postgresSpec.ServerCertificateLocation,
postgresSpec.ServerKeyLocation)
+ if err != nil {
+ return changed, err
+ }
+
+ if r.instance.ServerCertificate == nil || changed {
+ return changed, r.refreshInstanceCertificateFromSecret(&secret)
+ }
+
+ return changed, nil
}
// refreshReplicationUserCertificate gets the latest replication certificates from the
diff --git a/internal/management/controller/roles/reconciler_test.go b/internal/management/controller/roles/reconciler_test.go
index 35b4e90dc6..1ee151a170 100644
--- a/internal/management/controller/roles/reconciler_test.go
+++ b/internal/management/controller/roles/reconciler_test.go
@@ -56,8 +56,9 @@ var _ = Describe("Role reconciler test", func() {
},
}
pgStringError := "while listing DB roles for DRM: " +
- "failed to connect to `host=/controller/run user=postgres database=postgres`: dial " +
- "error (dial unix /controller/run/.s.PGSQL.5432: connect: no such file or directory)"
+ "failed to connect to `user=postgres database=postgres`: " +
+ "/controller/run/.s.PGSQL.5432 (/controller/run): " +
+ "dial error: dial unix /controller/run/.s.PGSQL.5432: connect: no such file or directory"
result, err := Reconcile(context.TODO(), instance, cluster, mockClient)
Expect(err.Error()).To(BeEquivalentTo(pgStringError))
Expect(result).To(BeEquivalentTo(reconcile.Result{}))
diff --git a/internal/management/controller/slots/infrastructure/postgresmanager.go b/internal/management/controller/slots/infrastructure/postgresmanager.go
index 6ab9cd82d1..689439f1ab 100644
--- a/internal/management/controller/slots/infrastructure/postgresmanager.go
+++ b/internal/management/controller/slots/infrastructure/postgresmanager.go
@@ -53,7 +53,9 @@ func (sm PostgresManager) List(
rows, err := db.QueryContext(
ctx,
- `SELECT slot_name, slot_type, active, coalesce(restart_lsn::TEXT, '') AS restart_lsn FROM pg_replication_slots
+ `SELECT slot_name, slot_type, active, coalesce(restart_lsn::TEXT, '') AS restart_lsn,
+ xmin IS NOT NULL OR catalog_xmin IS NOT NULL AS holds_xmin
+ FROM pg_replication_slots
WHERE NOT temporary AND slot_type = 'physical'`,
)
if err != nil {
@@ -71,6 +73,7 @@ func (sm PostgresManager) List(
&slot.Type,
&slot.Active,
&slot.RestartLSN,
+ &slot.HoldsXmin,
)
if err != nil {
return ReplicationSlotList{}, err
diff --git a/internal/management/controller/slots/infrastructure/postgresmanager_test.go b/internal/management/controller/slots/infrastructure/postgresmanager_test.go
index 7441cae725..251832847c 100644
--- a/internal/management/controller/slots/infrastructure/postgresmanager_test.go
+++ b/internal/management/controller/slots/infrastructure/postgresmanager_test.go
@@ -87,9 +87,9 @@ var _ = Describe("PostgresManager", func() {
})
It("should successfully list replication slots", func() {
- rows := sqlmock.NewRows([]string{"slot_name", "slot_type", "active", "restart_lsn"}).
- AddRow("_cnpg_slot1", string(SlotTypePhysical), true, "lsn1").
- AddRow("slot2", string(SlotTypePhysical), true, "lsn2")
+ rows := sqlmock.NewRows([]string{"slot_name", "slot_type", "active", "restart_lsn", "holds_xmin"}).
+ AddRow("_cnpg_slot1", string(SlotTypePhysical), true, "lsn1", false).
+ AddRow("slot2", string(SlotTypePhysical), true, "lsn2", false)
mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots").
WillReturnRows(rows)
diff --git a/internal/management/controller/slots/infrastructure/replicationslot.go b/internal/management/controller/slots/infrastructure/replicationslot.go
index 30992b969e..9cda0b2971 100644
--- a/internal/management/controller/slots/infrastructure/replicationslot.go
+++ b/internal/management/controller/slots/infrastructure/replicationslot.go
@@ -29,6 +29,7 @@ type ReplicationSlot struct {
Active bool `json:"active"`
RestartLSN string `json:"restartLSN,omitempty"`
IsHA bool `json:"isHA,omitempty"`
+ HoldsXmin bool `json:"holdsXmin,omitempty"`
}
// ReplicationSlotList contains a list of replication slots
diff --git a/internal/management/controller/slots/runner/runner.go b/internal/management/controller/slots/runner/runner.go
index 15af422be7..aa15229684 100644
--- a/internal/management/controller/slots/runner/runner.go
+++ b/internal/management/controller/slots/runner/runner.go
@@ -177,8 +177,12 @@ func synchronizeReplicationSlots(
}
}
for _, slot := range slotsInLocal.Items {
- // We delete the slots not present on the primary or old HA replication slots.
- if !slotsInPrimary.Has(slot.SlotName) || slot.SlotName == mySlotName {
+ // Delete slots on standby with wrong state:
+ // * slots not present on the primary
+ // * the slot used by this node
+ // * slots holding xmin (this can happen on a former primary, and will prevent VACUUM from
+ // removing tuples deleted by any later transaction.)
+ if !slotsInPrimary.Has(slot.SlotName) || slot.SlotName == mySlotName || slot.HoldsXmin {
if err := localSlotManager.Delete(ctx, slot); err != nil {
return err
}
diff --git a/internal/management/controller/slots/runner/runner_test.go b/internal/management/controller/slots/runner/runner_test.go
index 2bd747e691..de1df33e0b 100644
--- a/internal/management/controller/slots/runner/runner_test.go
+++ b/internal/management/controller/slots/runner/runner_test.go
@@ -32,6 +32,7 @@ import (
type fakeSlot struct {
name string
restartLSN string
+ holdsXmin bool
}
type fakeSlotManager struct {
@@ -52,6 +53,7 @@ func (sm *fakeSlotManager) List(
RestartLSN: slot.restartLSN,
Type: infrastructure.SlotTypePhysical,
Active: false,
+ HoldsXmin: slot.holdsXmin,
})
}
return slotList, nil
@@ -88,7 +90,6 @@ func (sm *fakeSlotManager) Delete(_ context.Context, slot infrastructure.Replica
}
var _ = Describe("Slot synchronization", func() {
- ctx := context.TODO()
localPodName := "cluster-2"
localSlotName := "_cnpg_cluster_2"
slot3 := "cluster-3"
@@ -111,12 +112,12 @@ var _ = Describe("Slot synchronization", func() {
},
}
- It("can create slots in local from those on primary", func() {
+ It("can create slots in local from those on primary", func(ctx SpecContext) {
localSlotsBefore, err := local.List(ctx, &config)
Expect(err).ShouldNot(HaveOccurred())
Expect(localSlotsBefore.Items).Should(BeEmpty())
- err = synchronizeReplicationSlots(context.TODO(), primary, local, localPodName, &config)
+ err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config)
Expect(err).ShouldNot(HaveOccurred())
localSlotsAfter, err := local.List(ctx, &config)
@@ -126,13 +127,13 @@ var _ = Describe("Slot synchronization", func() {
Expect(localSlotsAfter.Has(slot4)).To(BeTrue())
Expect(local.slotsCreated).To(Equal(2))
})
- It("can update slots in local when ReplayLSN in primary advanced", func() {
+ It("can update slots in local when ReplayLSN in primary advanced", func(ctx SpecContext) {
// advance slot3 in primary
newLSN := "0/308C4D8"
err := primary.Update(ctx, infrastructure.ReplicationSlot{SlotName: slot3, RestartLSN: newLSN})
Expect(err).ShouldNot(HaveOccurred())
- err = synchronizeReplicationSlots(context.TODO(), primary, local, localPodName, &config)
+ err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config)
Expect(err).ShouldNot(HaveOccurred())
localSlotsAfter, err := local.List(ctx, &config)
@@ -143,11 +144,11 @@ var _ = Describe("Slot synchronization", func() {
Expect(slot.RestartLSN).To(Equal(newLSN))
Expect(local.slotsUpdated).To(Equal(1))
})
- It("can drop slots in local when they are no longer in primary", func() {
+ It("can drop slots in local when they are no longer in primary", func(ctx SpecContext) {
err := primary.Delete(ctx, infrastructure.ReplicationSlot{SlotName: slot4})
Expect(err).ShouldNot(HaveOccurred())
- err = synchronizeReplicationSlots(context.TODO(), primary, local, localPodName, &config)
+ err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config)
Expect(err).ShouldNot(HaveOccurred())
localSlotsAfter, err := local.List(ctx, &config)
@@ -156,4 +157,22 @@ var _ = Describe("Slot synchronization", func() {
Expect(localSlotsAfter.Has(slot3)).To(BeTrue())
Expect(local.slotsDeleted).To(Equal(1))
})
+ It("can drop slots in local that hold xmin", func(ctx SpecContext) {
+ slotWithXmin := "_cnpg_xmin"
+ err := primary.Create(ctx, infrastructure.ReplicationSlot{SlotName: slotWithXmin})
+ Expect(err).ShouldNot(HaveOccurred())
+ local.slots[slotWithXmin] = fakeSlot{name: slotWithXmin, holdsXmin: true}
+ localSlotsBefore, err := local.List(ctx, &config)
+ Expect(err).ShouldNot(HaveOccurred())
+ Expect(localSlotsBefore.Has(slotWithXmin)).To(BeTrue())
+
+ err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config)
+ Expect(err).ShouldNot(HaveOccurred())
+
+ localSlotsAfter, err := local.List(ctx, &config)
+ Expect(err).ShouldNot(HaveOccurred())
+ Expect(localSlotsAfter.Has(slotWithXmin)).To(BeFalse())
+ Expect(localSlotsAfter.Items).Should(HaveLen(1))
+ Expect(local.slotsDeleted).To(Equal(2))
+ })
})
diff --git a/internal/scheme/scheme.go b/internal/scheme/scheme.go
index 06d59fb5af..a26858459f 100644
--- a/internal/scheme/scheme.go
+++ b/internal/scheme/scheme.go
@@ -17,7 +17,7 @@ limitations under the License.
package scheme
import (
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
diff --git a/pkg/certs/tls.go b/pkg/certs/tls.go
new file mode 100644
index 0000000000..32b1cfdf65
--- /dev/null
+++ b/pkg/certs/tls.go
@@ -0,0 +1,87 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package certs
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type contextKey string
+
+// contextKeyTLSConfig is the context key holding the TLS configuration
+const contextKeyTLSConfig contextKey = "tlsConfig"
+
+// newTLSConfigFromSecret creates a tls.Config from the given CA secret and serverName pair
+func newTLSConfigFromSecret(
+ ctx context.Context,
+ cli client.Client,
+ caSecret types.NamespacedName,
+ serverName string,
+) (*tls.Config, error) {
+ secret := &v1.Secret{}
+ err := cli.Get(ctx, caSecret, secret)
+ if err != nil {
+ return nil, fmt.Errorf("while getting caSecret %s: %w", caSecret.Name, err)
+ }
+
+ caCertificate, ok := secret.Data[CACertKey]
+ if !ok {
+ return nil, fmt.Errorf("missing %s entry in secret %s", CACertKey, caSecret.Name)
+ }
+
+ caCertPool := x509.NewCertPool()
+ caCertPool.AppendCertsFromPEM(caCertificate)
+ tlsConfig := tls.Config{
+ MinVersion: tls.VersionTLS13,
+ ServerName: serverName,
+ RootCAs: caCertPool,
+ }
+
+ return &tlsConfig, nil
+}
+
+// NewTLSConfigForContext creates a tls.config with the provided data and returns an expanded context that contains
+// the *tls.Config
+func NewTLSConfigForContext(
+ ctx context.Context,
+ cli client.Client,
+ caSecret types.NamespacedName,
+ serverName string,
+) (context.Context, error) {
+ conf, err := newTLSConfigFromSecret(ctx, cli, caSecret, serverName)
+ if err != nil {
+ return nil, err
+ }
+
+ return context.WithValue(ctx, contextKeyTLSConfig, conf), nil
+}
+
+// GetTLSConfigFromContext returns the *tls.Config contained by the context or any error encountered
+func GetTLSConfigFromContext(ctx context.Context) (*tls.Config, error) {
+ conf, ok := ctx.Value(contextKeyTLSConfig).(*tls.Config)
+ if !ok || conf == nil {
+ return nil, fmt.Errorf("context does not contain TLSConfig")
+ }
+ return conf, nil
+}
diff --git a/pkg/certs/tls_test.go b/pkg/certs/tls_test.go
new file mode 100644
index 0000000000..7602e23adb
--- /dev/null
+++ b/pkg/certs/tls_test.go
@@ -0,0 +1,107 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package certs
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("newTLSConfigFromSecret", func() {
+ var (
+ ctx context.Context
+ c client.Client
+ caSecret types.NamespacedName
+ serverName string
+ )
+
+ BeforeEach(func() {
+ ctx = context.TODO()
+ caSecret = types.NamespacedName{Name: "test-secret", Namespace: "default"}
+ serverName = "test-server"
+ })
+
+ Context("when the secret is found and valid", func() {
+ BeforeEach(func() {
+ secretData := map[string][]byte{
+ CACertKey: []byte(`-----BEGIN CERTIFICATE-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA7Qe3X7Q6WZpXqlXkq0Bd
+... (rest of the CA certificate) ...
+-----END CERTIFICATE-----`),
+ }
+ secret := &v1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: caSecret.Name,
+ Namespace: caSecret.Namespace,
+ },
+ Data: secretData,
+ }
+ c = fake.NewClientBuilder().WithObjects(secret).Build()
+ })
+
+ It("should return a valid tls.Config", func() {
+ tlsConfig, err := newTLSConfigFromSecret(ctx, c, caSecret, serverName)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(tlsConfig).NotTo(BeNil())
+ Expect(tlsConfig.MinVersion).To(Equal(uint16(tls.VersionTLS13)))
+ Expect(tlsConfig.ServerName).To(Equal(serverName))
+ Expect(tlsConfig.RootCAs).ToNot(BeNil())
+ })
+ })
+
+ Context("when the secret is not found", func() {
+ BeforeEach(func() {
+ c = fake.NewClientBuilder().Build()
+ })
+
+ It("should return an error", func() {
+ tlsConfig, err := newTLSConfigFromSecret(ctx, c, caSecret, serverName)
+ Expect(err).To(HaveOccurred())
+ Expect(tlsConfig).To(BeNil())
+ Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("while getting caSecret %s", caSecret.Name)))
+ })
+ })
+
+ Context("when the ca.crt entry is missing in the secret", func() {
+ BeforeEach(func() {
+ secret := &v1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: caSecret.Name,
+ Namespace: caSecret.Namespace,
+ },
+ }
+ c = fake.NewClientBuilder().WithObjects(secret).Build()
+ })
+
+ It("should return an error", func() {
+ tlsConfig, err := newTLSConfigFromSecret(ctx, c, caSecret, serverName)
+ Expect(err).To(HaveOccurred())
+ Expect(tlsConfig).To(BeNil())
+ Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("missing %s entry in secret %s", CACertKey, caSecret.Name)))
+ })
+ })
+})
diff --git a/pkg/fileutils/directory.go b/pkg/fileutils/directory.go
new file mode 100644
index 0000000000..a7a612a9ba
--- /dev/null
+++ b/pkg/fileutils/directory.go
@@ -0,0 +1,139 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fileutils
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "syscall"
+
+ "github.com/thoas/go-funk"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+)
+
+const (
+ createFileBlockSize = 262144
+ probeFileName = "_cnpg_probe_"
+)
+
+type fileCreatorFunc = func(ctx context.Context, name string, size int) error
+
+// DiskProbe represents a filesystem directory and provides methods to interact
+// with it, such as checking for available disk space by attempting to create
+// a file of a specified size.
+type DiskProbe struct {
+ path string
+ createFileFunc fileCreatorFunc
+}
+
+// NewDiskProbe creates and returns a new Directory instance for the specified
+// path.
+func NewDiskProbe(path string) *DiskProbe {
+ return &DiskProbe{
+ path: path,
+ createFileFunc: createFileWithSize,
+ }
+}
+
+// createFileWithSize creates a file with a certain name and
+// a certain size. It will fail if the file already exists.
+//
+// To allocate the file, the specified number of bytes will
+// be written, set to zero.
+//
+// The code of this function is written after the `XLogFileInitInternal`
+// PostgreSQL function, to be found in `src/backend/access/transam/xlog.c`
+func createFileWithSize(ctx context.Context, name string, size int) error {
+ contextLogger := log.FromContext(ctx).WithValues("probeFileName", name)
+
+ f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0o600) // nolint:gosec
+ if err != nil {
+ return fmt.Errorf("while opening size probe file: %w", err)
+ }
+ defer func() {
+ if closeErr := f.Close(); closeErr != nil {
+ if err != nil {
+ contextLogger.Error(
+ closeErr,
+ "Detected error while closing probe file while managing a write error",
+ "originalError", err)
+ } else {
+ err = closeErr
+ }
+ }
+ }()
+
+ buf := make([]byte, createFileBlockSize)
+ var writtenBytes int
+
+ for writtenBytes < size {
+ b, err := f.Write(buf[:min(len(buf), size-writtenBytes)])
+ if err != nil {
+ return fmt.Errorf("while writing to size probe file: %w", err)
+ }
+ writtenBytes += b
+ }
+
+ return nil
+}
+
+// HasStorageAvailable checks if there's enough disk space to store a
+// file with a specified size inside the directory. It does that
+// by using createFileFunc to create such a file in the directory
+// and then removing it.
+func (d DiskProbe) HasStorageAvailable(ctx context.Context, size int) (bool, error) {
+ var err error
+
+ probeFileName := path.Join(d.path, probeFileName+funk.RandomString(4))
+ contextLogger := log.FromContext(ctx).WithValues("probeFileName", probeFileName)
+
+ defer func() {
+ if removeErr := RemoveFile(probeFileName); removeErr != nil {
+ if err == nil {
+ err = removeErr
+ } else {
+ contextLogger.Error(
+ err,
+ "Detected error while removing free disk space probe file",
+ "originalError", err)
+ }
+ }
+ }()
+
+ err = d.createFileFunc(ctx, probeFileName, size)
+ if isNoSpaceLeftOnDevice(err) {
+ return false, nil
+ } else if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+// isNoSpaceLeftOnDevice returns true when there's no more
+// space left
+func isNoSpaceLeftOnDevice(err error) bool {
+ var pathError *os.PathError
+ if errors.As(err, &pathError) {
+ return pathError.Err == syscall.ENOSPC
+ }
+
+ return false
+}
diff --git a/pkg/fileutils/directory_test.go b/pkg/fileutils/directory_test.go
new file mode 100644
index 0000000000..91c2c8ec5f
--- /dev/null
+++ b/pkg/fileutils/directory_test.go
@@ -0,0 +1,109 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fileutils
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path"
+ "syscall"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Size probe functions", func() {
+ testFileName := path.Join(tempDir1, "_test_")
+
+ AfterEach(func() {
+ err := RemoveFile(testFileName)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("creates a file with a specific size", func(ctx SpecContext) {
+ expectedSize := createFileBlockSize + 400
+
+ err := createFileWithSize(ctx, testFileName, expectedSize)
+ Expect(err).ToNot(HaveOccurred())
+
+ info, err := os.Stat(testFileName)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(int(info.Size())).To(Equal(expectedSize))
+ })
+
+ It("can create an empty file", func(ctx SpecContext) {
+ err := createFileWithSize(ctx, testFileName, 0)
+ Expect(err).ToNot(HaveOccurred())
+
+ info, err := os.Stat(testFileName)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(int(info.Size())).To(Equal(0))
+ })
+
+ It("can detect free space in a directory", func(ctx SpecContext) {
+ result, err := NewDiskProbe(tempDir1).HasStorageAvailable(ctx, 100)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(result).To(BeTrue())
+ })
+
+ It("errors out when the directory doesn't exist", func(ctx SpecContext) {
+ result, err := NewDiskProbe(path.Join(tempDir1, "_not_existing_")).HasStorageAvailable(ctx, 100)
+ Expect(err).To(HaveOccurred())
+ Expect(result).To(BeFalse())
+ })
+
+ It("can detect when there is no more free space in a directory", func(ctx SpecContext) {
+ creatorFunction := func(_ context.Context, _ string, _ int) error {
+ return &os.PathError{
+ Err: syscall.ENOSPC,
+ }
+ }
+
+ dir := NewDiskProbe(tempDir1)
+ dir.createFileFunc = creatorFunction
+ result, err := dir.HasStorageAvailable(ctx, 100)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(result).To(BeFalse())
+ })
+})
+
+var _ = Describe("ENOSPC error checking", func() {
+ It("does not detect a nil error as ENOSPC", func() {
+ Expect(isNoSpaceLeftOnDevice(nil)).To(BeFalse())
+ })
+
+ It("does not detect a generic error as ENOSPC", func() {
+ Expect(isNoSpaceLeftOnDevice(fmt.Errorf("a generic error"))).To(BeFalse())
+ })
+
+ It("detects ENOSPC errors", func() {
+ testError := &os.PathError{
+ Err: syscall.ENOSPC,
+ }
+ Expect(isNoSpaceLeftOnDevice(testError)).To(BeTrue())
+ })
+
+ It("detects ENOSPC errors when they're wrapped in other errors", func() {
+ var testError error
+ testError = &os.PathError{
+ Err: syscall.ENOSPC,
+ }
+ testError = fmt.Errorf("something bad happened: %w", testError)
+ Expect(isNoSpaceLeftOnDevice(testError)).To(BeTrue())
+ })
+})
diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go
index 638f5eddae..8cc34e6310 100644
--- a/pkg/management/postgres/configuration.go
+++ b/pkg/management/postgres/configuration.go
@@ -198,16 +198,22 @@ func quoteHbaLiteral(literal string) string {
return fmt.Sprintf(`"%s"`, literal)
}
-// GeneratePostgresqlIdent generates the pg_ident.conf content
-func (instance *Instance) GeneratePostgresqlIdent(cluster *apiv1.Cluster) (string, error) {
- return postgres.CreateIdentRules(cluster.Spec.PostgresConfiguration.PgIdent,
- getCurrentUserOrDefaultToInsecureMapping())
+// generatePostgresqlIdent generates the pg_ident.conf content given
+// a set of additional pg_ident lines that is usually taken from the
+// Cluster configuration
+func (instance *Instance) generatePostgresqlIdent(additionalLines []string) (string, error) {
+ return postgres.CreateIdentRules(
+ additionalLines,
+ getCurrentUserOrDefaultToInsecureMapping(),
+ )
}
-// RefreshPGIdent generates and writes down the pg_ident.conf file
-func (instance *Instance) RefreshPGIdent(cluster *apiv1.Cluster) (postgresIdentChanged bool, err error) {
- // Generate pg_hba.conf file
- pgIdentContent, err := instance.GeneratePostgresqlIdent(cluster)
+// RefreshPGIdent generates and writes down the pg_ident.conf file given
+// a set of additional pg_ident lines that is usually taken from the
+// Cluster configuration
+func (instance *Instance) RefreshPGIdent(additionalLines []string) (postgresIdentChanged bool, err error) {
+ // Generate pg_ident.conf file
+ pgIdentContent, err := instance.generatePostgresqlIdent(additionalLines)
if err != nil {
return false, nil
}
diff --git a/pkg/management/postgres/consts.go b/pkg/management/postgres/consts.go
new file mode 100644
index 0000000000..244c174d1f
--- /dev/null
+++ b/pkg/management/postgres/consts.go
@@ -0,0 +1,21 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package postgres
+
+// pgWalDirectory is the name of the pg_wal directory inside
+// PGDATA
+const pgWalDirectory = "pg_wal"
diff --git a/pkg/management/postgres/ident.go b/pkg/management/postgres/ident.go
index d080215d2d..4b3d07a032 100644
--- a/pkg/management/postgres/ident.go
+++ b/pkg/management/postgres/ident.go
@@ -17,28 +17,11 @@ limitations under the License.
package postgres
import (
- "fmt"
"os/user"
- "path/filepath"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/fileutils"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants"
)
-// WritePostgresUserMaps creates a pg_ident.conf file containing only one map called "local" that
-// maps the current user to "postgres" user.
-func WritePostgresUserMaps(pgData string) error {
- username := getCurrentUserOrDefaultToInsecureMapping()
- _, err := fileutils.WriteStringToFile(filepath.Join(pgData, constants.PostgresqlIdentFile),
- fmt.Sprintf("local %s postgres\n", username))
- if err != nil {
- return err
- }
-
- return nil
-}
-
// getCurrentUserOrDefaultToInsecureMapping retrieves the current system user's username.
// If the retrieval fails, it falls back to an insecure mapping using the root ("/") as the default username.
//
diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go
index f26b5e6cb7..b4016527e6 100644
--- a/pkg/management/postgres/instance.go
+++ b/pkg/management/postgres/instance.go
@@ -18,6 +18,7 @@ package postgres
import (
"context"
+ "crypto/tls"
"database/sql"
"errors"
"fmt"
@@ -42,9 +43,10 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/logpipe"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils"
+ postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
// this is needed to correctly open the sql connection with the pgx driver
_ "github.com/jackc/pgx/v5/stdlib"
@@ -199,6 +201,12 @@ type Instance struct {
// tablespaceSynchronizerChan is used to send tablespace configuration to the tablespace synchronizer
tablespaceSynchronizerChan chan map[string]apiv1.TablespaceConfiguration
+
+ // StatusPortTLS enables TLS on the status port used to communicate with the operator
+ StatusPortTLS bool
+
+ // ServerCertificate is the certificate we use to serve https connections
+ ServerCertificate *tls.Certificate
}
// SetAlterSystemEnabled allows or deny the usage of the
@@ -247,6 +255,31 @@ func (instance *Instance) SetCanCheckReadiness(enabled bool) {
instance.canCheckReadiness.Store(enabled)
}
+// CheckHasDiskSpaceForWAL checks if we have enough disk space to store two WAL files,
+// and returns true if we have free disk space for 2 WAL segments, false otherwise
+func (instance *Instance) CheckHasDiskSpaceForWAL(ctx context.Context) (bool, error) {
+ pgControlDataString, err := instance.GetPgControldata()
+ if err != nil {
+ return false, fmt.Errorf("while running pg_controldata to detect WAL segment size: %w", err)
+ }
+
+ pgControlData := utils.ParsePgControldataOutput(pgControlDataString)
+ walSegmentSizeString, ok := pgControlData["Bytes per WAL segment"]
+ if !ok {
+ return false, fmt.Errorf("no 'Bytes per WAL segment' section into pg_controldata output")
+ }
+
+ walSegmentSize, err := strconv.Atoi(walSegmentSizeString)
+ if err != nil {
+ return false, fmt.Errorf(
+ "wrong 'Bytes per WAL segment' pg_controldata value (not an integer): '%s' %w",
+ walSegmentSizeString, err)
+ }
+
+ walDirectory := path.Join(instance.PgData, pgWalDirectory)
+ return fileutils.NewDiskProbe(walDirectory).HasStorageAvailable(ctx, walSegmentSize)
+}
+
// SetMightBeUnavailable marks whether the instance being down should be tolerated
func (instance *Instance) SetMightBeUnavailable(enabled bool) {
instance.mightBeUnavailable.Store(enabled)
@@ -299,7 +332,9 @@ func (instance *Instance) VerifyPgDataCoherence(ctx context.Context) error {
return err
}
- return WritePostgresUserMaps(instance.PgData)
+ // creates a bare pg_ident.conf that only grants local access
+ _, err := instance.RefreshPGIdent(nil)
+ return err
}
// InstanceCommand are commands for the goroutine managing postgres
@@ -679,7 +714,7 @@ func (instance *Instance) GetPgVersion() (semver.Version, error) {
return semver.Version{}, err
}
- parsedVersion, err := utils.GetPgVersion(db)
+ parsedVersion, err := postgresutils.GetPgVersion(db)
if err != nil {
return semver.Version{}, err
}
diff --git a/pkg/management/postgres/logpipe/CSVReadWriter.go b/pkg/management/postgres/logpipe/CSVReadWriter.go
index 4ff56641d4..606f9b7e79 100644
--- a/pkg/management/postgres/logpipe/CSVReadWriter.go
+++ b/pkg/management/postgres/logpipe/CSVReadWriter.go
@@ -19,6 +19,7 @@ package logpipe
import (
"bytes"
"encoding/csv"
+ "errors"
"io"
)
@@ -33,17 +34,57 @@ type CSVReadWriter interface {
type CSVRecordReadWriter struct {
io.Writer
*csv.Reader
+ allowedFieldsPerRecord []int
+}
+
+// Read reads a CSV record from the underlying reader, returns the records or any error encountered
+func (r *CSVRecordReadWriter) Read() ([]string, error) {
+ record, err := r.Reader.Read()
+ if err == nil {
+ return record, nil
+ }
+
+ var parseError *csv.ParseError
+ if !errors.As(err, &parseError) {
+ return nil, err
+ }
+
+ if !errors.Is(parseError.Err, csv.ErrFieldCount) {
+ return nil, err
+ }
+
+ for _, allowedFields := range r.allowedFieldsPerRecord {
+ if len(record) == allowedFields {
+ r.Reader.FieldsPerRecord = allowedFields
+ return record, nil
+ }
+ }
+
+ return nil, err
}
// NewCSVRecordReadWriter returns a new CSVRecordReadWriter which parses CSV lines
// with an expected number of fields. It uses a single record for memory efficiency.
-func NewCSVRecordReadWriter(fieldsPerRecord int) *CSVRecordReadWriter {
+// If no fieldsPerRecord are provided, it allows variable fields per record.
+// If fieldsPerRecord are provided, it will only allow those numbers of fields per record.
+func NewCSVRecordReadWriter(fieldsPerRecord ...int) *CSVRecordReadWriter {
recordBuffer := new(bytes.Buffer)
reader := csv.NewReader(recordBuffer)
reader.ReuseRecord = true
- reader.FieldsPerRecord = fieldsPerRecord
+
+ if len(fieldsPerRecord) == 0 {
+ // Allow variable fields per record as we don't have an opinion
+ reader.FieldsPerRecord = -1
+ } else {
+ // We'll optimistically set the first value as the default, this way we'll get an error on the first line too.
+ // Leaving this to 0 would allow the first line to pass, setting the
+ // fields per record for all the following lines without us checking it.
+ reader.FieldsPerRecord = fieldsPerRecord[0]
+ }
+
return &CSVRecordReadWriter{
- Writer: recordBuffer,
- Reader: reader,
+ Writer: recordBuffer,
+ Reader: reader,
+ allowedFieldsPerRecord: fieldsPerRecord,
}
}
diff --git a/pkg/management/postgres/logpipe/pgaudit.go b/pkg/management/postgres/logpipe/pgaudit.go
index 370cabcc37..45a6bc8ed3 100644
--- a/pkg/management/postgres/logpipe/pgaudit.go
+++ b/pkg/management/postgres/logpipe/pgaudit.go
@@ -19,6 +19,10 @@ package logpipe
// PgAuditFieldsPerRecord is the number of fields in a pgaudit log line
const PgAuditFieldsPerRecord int = 9
+// PgAuditFieldsPerRecordWithRows is the number of fields in a pgaudit log line
+// when "pgaudit.log_rows" is set to "on"
+const PgAuditFieldsPerRecordWithRows int = 10
+
// PgAuditRecordName is the value of the logger field for pgaudit
const PgAuditRecordName = "pgaudit"
@@ -34,7 +38,7 @@ func NewPgAuditLoggingDecorator() *PgAuditLoggingDecorator {
return &PgAuditLoggingDecorator{
LoggingRecord: &LoggingRecord{},
Audit: &PgAuditRecord{},
- CSVReadWriter: NewCSVRecordReadWriter(PgAuditFieldsPerRecord),
+ CSVReadWriter: NewCSVRecordReadWriter(PgAuditFieldsPerRecord, PgAuditFieldsPerRecordWithRows),
}
}
@@ -84,6 +88,11 @@ func (r *PgAuditRecord) fromCSV(auditContent []string) {
r.ObjectName = auditContent[6]
r.Statement = auditContent[7]
r.Parameter = auditContent[8]
+ if len(auditContent) >= PgAuditFieldsPerRecordWithRows {
+ r.Rows = auditContent[9]
+ } else {
+ r.Rows = ""
+ }
}
// PgAuditRecord stores all the fields of a pgaudit log line
@@ -97,4 +106,5 @@ type PgAuditRecord struct {
ObjectName string `json:"object_name,omitempty"`
Statement string `json:"statement,omitempty"`
Parameter string `json:"parameter,omitempty"`
+ Rows string `json:"rows,omitempty"`
}
diff --git a/pkg/management/postgres/logpipe/pgaudit_test.go b/pkg/management/postgres/logpipe/pgaudit_test.go
index 5f805a81d9..5cf9729162 100644
--- a/pkg/management/postgres/logpipe/pgaudit_test.go
+++ b/pkg/management/postgres/logpipe/pgaudit_test.go
@@ -51,7 +51,7 @@ var _ = Describe("pgAudit CSV log record", func() {
})
var _ = Describe("PgAudit CVS logging decorator", func() {
- Context("Given a CSV record embedding pgAudit", func() {
+ Context("Given a CSV record embedding pgAudit without rows", func() {
It("fills the fields for PostgreSQL 13", func() { // nolint:dupl
values := make([]string, FieldsPerRecord12)
for i := range values {
@@ -159,6 +159,116 @@ var _ = Describe("PgAudit CVS logging decorator", func() {
})
})
+ Context("Given a CSV record embedding pgAudit with rows", func() {
+ It("fills the fields for PostgreSQL 13", func() { // nolint:dupl
+ values := make([]string, FieldsPerRecord12)
+ for i := range values {
+ values[i] = fmt.Sprintf("%d", i)
+ }
+ auditValues := make([]string, PgAuditFieldsPerRecordWithRows)
+ for i := range auditValues {
+ auditValues[i] = fmt.Sprintf("A%d", i)
+ }
+ values[13] = writePgAuditMessage(auditValues)
+ r := NewPgAuditLoggingDecorator()
+ result := r.FromCSV(values)
+ Expect(result.GetName()).To(Equal(PgAuditRecordName))
+ typedResult := result.(*PgAuditLoggingDecorator)
+ Expect(*typedResult.LoggingRecord).To(Equal(LoggingRecord{
+ LogTime: "0",
+ Username: "1",
+ DatabaseName: "2",
+ ProcessID: "3",
+ ConnectionFrom: "4",
+ SessionID: "5",
+ SessionLineNum: "6",
+ CommandTag: "7",
+ SessionStartTime: "8",
+ VirtualTransactionID: "9",
+ TransactionID: "10",
+ ErrorSeverity: "11",
+ SQLStateCode: "12",
+ Message: "",
+ Detail: "14",
+ Hint: "15",
+ InternalQuery: "16",
+ InternalQueryPos: "17",
+ Context: "18",
+ Query: "19",
+ QueryPos: "20",
+ Location: "21",
+ ApplicationName: "22",
+ BackendType: "",
+ }))
+ Expect(*typedResult.Audit).To(Equal(PgAuditRecord{
+ AuditType: "A0",
+ StatementID: "A1",
+ SubstatementID: "A2",
+ Class: "A3",
+ Command: "A4",
+ ObjectType: "A5",
+ ObjectName: "A6",
+ Statement: "A7",
+ Parameter: "A8",
+ Rows: "A9",
+ }))
+ })
+
+ It("fills the fields for PostgreSQL 13", func() { // nolint:dupl
+ values := make([]string, FieldsPerRecord13)
+ for i := range values {
+ values[i] = fmt.Sprintf("%d", i)
+ }
+ auditValues := make([]string, PgAuditFieldsPerRecordWithRows)
+ for i := range auditValues {
+ auditValues[i] = fmt.Sprintf("A%d", i)
+ }
+ values[13] = writePgAuditMessage(auditValues)
+ r := NewPgAuditLoggingDecorator()
+ result := r.FromCSV(values)
+ Expect(result.GetName()).To(Equal(PgAuditRecordName))
+ typedResult := result.(*PgAuditLoggingDecorator)
+ Expect(*typedResult.LoggingRecord).To(Equal(LoggingRecord{
+ LogTime: "0",
+ Username: "1",
+ DatabaseName: "2",
+ ProcessID: "3",
+ ConnectionFrom: "4",
+ SessionID: "5",
+ SessionLineNum: "6",
+ CommandTag: "7",
+ SessionStartTime: "8",
+ VirtualTransactionID: "9",
+ TransactionID: "10",
+ ErrorSeverity: "11",
+ SQLStateCode: "12",
+ Message: "",
+ Detail: "14",
+ Hint: "15",
+ InternalQuery: "16",
+ InternalQueryPos: "17",
+ Context: "18",
+ Query: "19",
+ QueryPos: "20",
+ Location: "21",
+ ApplicationName: "22",
+ BackendType: "23",
+ }))
+ Expect(*typedResult.Audit).To(Equal(PgAuditRecord{
+ AuditType: "A0",
+ StatementID: "A1",
+ SubstatementID: "A2",
+ Class: "A3",
+ Command: "A4",
+ ObjectType: "A5",
+ ObjectName: "A6",
+ Statement: "A7",
+ Parameter: "A8",
+ Rows: "A9",
+ }))
+ })
+ })
+
Context("Given a CSV record not embedding pgAudit", func() {
It("fills the fields for PostgreSQL 13", func() {
values := make([]string, FieldsPerRecord12)
@@ -237,7 +347,7 @@ var _ = Describe("PgAudit CVS logging decorator", func() {
})
var _ = Describe("pgAudit parsing internals", func() {
- When("a message contains a pgAudit formatted record", func() {
+ When("a message contains a pgAudit formatted record without rows", func() {
writer := NewCSVRecordReadWriter(PgAuditFieldsPerRecord)
pgAuditRecord := &PgAuditRecord{}
validRecords := []*LoggingRecord{
@@ -268,6 +378,44 @@ var _ = Describe("pgAudit parsing internals", func() {
}
})
})
+ When("a message contains a pgAudit formatted record with rows", func() {
+ writer := NewCSVRecordReadWriter(PgAuditFieldsPerRecord, PgAuditFieldsPerRecordWithRows)
+ pgAuditRecord := &PgAuditRecord{}
+ validRecords := []*LoggingRecord{
+ {Message: "AUDIT: SESSION,1,1,READ,SELECT,,,\"SELECT pg_last_wal_receive_lsn()," +
+ " pg_last_wal_replay_lsn(), pg_is_wal_replay_paused()\","},
+ {Message: "AUDIT: SESSION,1,1,DDL,CREATE TABLE,TABLE,public.account,\"create table account\n(" +
+ "\n id int,\n name text,\n password text,\n description text\n);\",,2"},
+ {Message: "AUDIT: SESSION,1,1,READ,SELECT,,,\"SELECT pg_last_wal_receive_lsn()," +
+ " pg_last_wal_replay_lsn(), pg_is_wal_replay_paused()\","},
+ }
+ It("identifies the message as pgAudit generated", func() {
+ for _, record := range validRecords {
+ tag, content := getTagAndContent(record)
+ Expect(tag).To(BeEquivalentTo("AUDIT"))
+ Expect(content).NotTo(BeEmpty())
+ }
+ })
+ It("decodes the messages correctly switching between lengths as needed", func() {
+ for i, record := range validRecords {
+ tag, rawContent := getTagAndContent(record)
+ Expect(tag).To(BeEquivalentTo("AUDIT"))
+ n, err := writer.Write([]byte(rawContent))
+ Expect(err).ShouldNot(HaveOccurred())
+ Expect(n).To(BeEquivalentTo(len(rawContent)))
+ content, err := writer.Read()
+ Expect(err).ShouldNot(HaveOccurred())
+ Expect(content).NotTo(BeEmpty())
+ pgAuditRecord.fromCSV(content)
+ Expect(pgAuditRecord.AuditType).To(BeEquivalentTo("SESSION"))
+ if i == 1 {
+ Expect(pgAuditRecord.Rows).To(BeEquivalentTo("2"))
+ } else {
+ Expect(pgAuditRecord.Rows).To(BeEmpty())
+ }
+ }
+ })
+ })
})
func writePgAuditMessage(content []string) string {
diff --git a/pkg/management/postgres/metrics/collector.go b/pkg/management/postgres/metrics/collector.go
index 0e33b51017..523a8cec52 100644
--- a/pkg/management/postgres/metrics/collector.go
+++ b/pkg/management/postgres/metrics/collector.go
@@ -330,6 +330,15 @@ func (c QueryCollector) collect(conn *sql.DB, ch chan<- prometheus.Metric) error
}
}()
+ shouldBeCollected, err := c.userQuery.isCollectable(tx)
+ if err != nil {
+ return err
+ }
+
+ if !shouldBeCollected {
+ return nil
+ }
+
rows, err := tx.Query(c.userQuery.Query)
if err != nil {
return err
diff --git a/pkg/management/postgres/metrics/parser.go b/pkg/management/postgres/metrics/parser.go
index 94a4246b63..b3aa614c84 100644
--- a/pkg/management/postgres/metrics/parser.go
+++ b/pkg/management/postgres/metrics/parser.go
@@ -18,6 +18,8 @@ limitations under the License.
package metrics
import (
+ "database/sql"
+ "errors"
"fmt"
"gopkg.in/yaml.v3"
@@ -29,6 +31,7 @@ type UserQueries map[string]UserQuery
// UserQuery represent a query created by the user
type UserQuery struct {
Query string `yaml:"query"`
+ PredicateQuery string `yaml:"predicate_query"`
Metrics []Mapping `yaml:"metrics"`
Master bool `yaml:"master"` // wokeignore:rule=master
Primary bool `yaml:"primary"`
@@ -88,3 +91,29 @@ func ParseQueries(content []byte) (UserQueries, error) {
return result, nil
}
+
+// isCollectable checks if a query to collect metrics should be executed.
+// The method tests the query provided in the PredicateQuery property within the same transaction
+// used to collect metrics.
+// PredicateQuery should return at most a single row with a single column with type bool.
+// If no PredicateQuery is provided, the query is considered collectable by default
+func (userQuery UserQuery) isCollectable(tx *sql.Tx) (bool, error) {
+ if userQuery.PredicateQuery == "" {
+ return true, nil
+ }
+
+ var isCollectable sql.NullBool
+ if err := tx.QueryRow(userQuery.PredicateQuery).Scan(&isCollectable); err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return false, nil
+ }
+
+ return false, err
+ }
+
+ if !isCollectable.Valid {
+ return false, nil
+ }
+
+ return isCollectable.Bool, nil
+}
diff --git a/pkg/management/postgres/metrics/parser_test.go b/pkg/management/postgres/metrics/parser_test.go
index 2e210e6a5b..c185aede05 100644
--- a/pkg/management/postgres/metrics/parser_test.go
+++ b/pkg/management/postgres/metrics/parser_test.go
@@ -17,6 +17,10 @@ limitations under the License.
package metrics
import (
+ "database/sql"
+
+ "github.com/DATA-DOG/go-sqlmock"
+
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
@@ -47,6 +51,8 @@ var _ = Describe("Metrics parser", func() {
some_query:
query: |
SELECT current_database() as datname, count(*) as rows FROM some_table
+ predicate_query: |
+ SELECT 1 as row FROM some_table WHERE some filters
cache_seconds: 100
metrics:
- datname:
@@ -67,6 +73,8 @@ some_query:
Expect(result["some_query"].Query).To(Equal("SELECT current_database() as datname, count(*)" +
" as rows FROM some_table\n"))
+ Expect(result["some_query"].PredicateQuery).To(Equal("SELECT 1 as row" +
+ " FROM some_table WHERE some filters\n"))
Expect(result["some_query"].Primary).To(BeFalse())
Expect(result["some_query"].TargetDatabases).To(ContainElements("test", "app"))
Expect(result["some_query"].CacheSeconds).To(BeEquivalentTo(100))
@@ -88,6 +96,74 @@ test:
})
})
+var _ = Describe("userQuery", func() {
+ var uq *UserQuery
+ var db *sql.DB
+ var mock sqlmock.Sqlmock
+ BeforeEach(func() {
+ var err error
+ uq = &UserQuery{}
+ db, mock, err = sqlmock.New()
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("should evaluate true correctly (isCollectable)", func(ctx SpecContext) {
+ const predicate = "SELECT TRUE"
+ uq.PredicateQuery = predicate
+ rows := sqlmock.NewRows([]string{"result"}).AddRow(true)
+ mock.ExpectBegin()
+ mock.ExpectQuery(predicate).WithoutArgs().WillReturnRows(rows)
+
+ tx, err := db.BeginTx(ctx, nil)
+ Expect(err).ToNot(HaveOccurred())
+ res, err := uq.isCollectable(tx)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(res).To(BeTrue())
+ })
+
+ It("should evaluate false correctly (isCollectable)", func(ctx SpecContext) {
+ const predicate = "SELECT FALSE"
+ uq.PredicateQuery = predicate
+ rows := sqlmock.NewRows([]string{"result"}).AddRow(false)
+ mock.ExpectBegin()
+ mock.ExpectQuery(predicate).WithoutArgs().WillReturnRows(rows)
+
+ tx, err := db.BeginTx(ctx, nil)
+ Expect(err).ToNot(HaveOccurred())
+ res, err := uq.isCollectable(tx)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(res).To(BeFalse())
+ })
+
+ It("should evaluate nil correctly (isCollectable)", func(ctx SpecContext) {
+ const predicate = "SELECT NIL"
+ uq.PredicateQuery = predicate
+ rows := sqlmock.NewRows([]string{"result"}).AddRow(nil)
+ mock.ExpectBegin()
+ mock.ExpectQuery(predicate).WithoutArgs().WillReturnRows(rows)
+
+ tx, err := db.BeginTx(ctx, nil)
+ Expect(err).ToNot(HaveOccurred())
+ res, err := uq.isCollectable(tx)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(res).To(BeFalse())
+ })
+
+ It("should evaluate ErrNoRows correctly (isCollectable)", func(ctx SpecContext) {
+ const predicate = "SELECT TEST_EMPTY"
+ uq.PredicateQuery = predicate
+ rows := sqlmock.NewRows([]string{"result"}).RowError(0, sql.ErrNoRows)
+ mock.ExpectBegin()
+ mock.ExpectQuery(predicate).WithoutArgs().WillReturnRows(rows)
+
+ tx, err := db.BeginTx(ctx, nil)
+ Expect(err).ToNot(HaveOccurred())
+ res, err := uq.isCollectable(tx)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(res).To(BeFalse())
+ })
+})
+
const pgExporterQueries = `
pg_replication:
query: "SELECT CASE WHEN NOT pg_is_in_recovery() [...]"
diff --git a/pkg/management/postgres/probes.go b/pkg/management/postgres/probes.go
index 7558957c75..48fbc6c1f3 100644
--- a/pkg/management/postgres/probes.go
+++ b/pkg/management/postgres/probes.go
@@ -161,7 +161,7 @@ func updateResultForDecrease(
if !result.IsPrimary {
// in case of hot standby parameters being decreased,
// followers need to wait for the new value to be present in the PGDATA before being restarted.
- pgControldataParams, err := GetEnforcedParametersThroughPgControldata(instance.PgData)
+ pgControldataParams, err := LoadEnforcedParametersFromPgControldata(instance.PgData)
if err != nil {
return err
}
@@ -172,7 +172,7 @@ func updateResultForDecrease(
return nil
}
-func areAllParamsUpdated(decreasedValues map[string]string, pgControldataParams map[string]string) bool {
+func areAllParamsUpdated(decreasedValues map[string]int, pgControldataParams map[string]int) bool {
var readyParams int
for setting, newValue := range decreasedValues {
if pgControldataParams[setting] == newValue {
@@ -185,12 +185,12 @@ func areAllParamsUpdated(decreasedValues map[string]string, pgControldataParams
// GetDecreasedSensibleSettings tries to get all decreased hot standby sensible parameters from the instance.
// Returns a map containing all the decreased hot standby sensible parameters with their new value.
// See https://www.postgresql.org/docs/current/hot-standby.html#HOT-STANDBY-ADMIN for more details.
-func (instance *Instance) GetDecreasedSensibleSettings(superUserDB *sql.DB) (map[string]string, error) {
+func (instance *Instance) GetDecreasedSensibleSettings(superUserDB *sql.DB) (map[string]int, error) {
// We check whether all parameters with a pending restart from pg_settings
// have a decreased value reported as not applied from pg_file_settings.
rows, err := superUserDB.Query(
`
-SELECT pending_settings.name, coalesce(new_setting,default_setting) as new_setting
+SELECT pending_settings.name, CAST(coalesce(new_setting,default_setting) AS INTEGER) as new_setting
FROM
(
SELECT name,
@@ -231,9 +231,10 @@ WHERE pending_settings.name IN (
}
}()
- decreasedSensibleValues := make(map[string]string)
+ decreasedSensibleValues := make(map[string]int)
for rows.Next() {
- var newValue, name string
+ var name string
+ var newValue int
if err = rows.Scan(&name, &newValue); err != nil {
return nil, err
}
diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go
index e9ff25b86c..285fa4161f 100644
--- a/pkg/management/postgres/restore.go
+++ b/pkg/management/postgres/restore.go
@@ -27,6 +27,7 @@ import (
"os/exec"
"path"
"path/filepath"
+ "strconv"
"strings"
"time"
@@ -346,7 +347,7 @@ func (info InitInfo) restoreCustomWalDir(ctx context.Context) (bool, error) {
}
contextLogger := log.FromContext(ctx)
- pgDataWal := path.Join(info.PgData, "pg_wal")
+ pgDataWal := path.Join(info.PgData, pgWalDirectory)
// if the link is already present we have nothing to do.
if linkInfo, _ := os.Readlink(pgDataWal); linkInfo == info.PgWal {
@@ -579,10 +580,10 @@ func (info InitInfo) writeRestoreWalConfig(backup *apiv1.Backup, cluster *apiv1.
strings.Join(cmd, " "),
cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions())
- return info.writeRecoveryConfiguration(recoveryFileContents)
+ return info.writeRecoveryConfiguration(cluster, recoveryFileContents)
}
-func (info InitInfo) writeRecoveryConfiguration(recoveryFileContents string) error {
+func (info InitInfo) writeRecoveryConfiguration(cluster *apiv1.Cluster, recoveryFileContents string) error {
// Ensure restore_command is used to correctly recover WALs
// from the object storage
major, err := postgresutils.GetMajorVersion(info.PgData)
@@ -601,21 +602,47 @@ func (info InitInfo) writeRecoveryConfiguration(recoveryFileContents string) err
return fmt.Errorf("cannot write recovery config: %w", err)
}
- enforcedParams, err := GetEnforcedParametersThroughPgControldata(info.PgData)
+ // Now we need to choose which parameters to use to complete the recovery
+ // of this PostgreSQL instance.
+ // We know the values that these parameters had when the backup was started
+ // from the `pg_controldata` output.
+ // We don't know how these values were set in the newer WALs.
+ //
+ // The only way to proceed is to rely on the user-defined configuration,
+ // with the caveat of ensuring that the values are high enough to be
+ // able to start recovering the backup.
+ //
+ // To be on the safe side, we'll use the largest setting we find
+ // from `pg_controldata` and the Cluster definition.
+ //
+ // https://www.postgresql.org/docs/16/hot-standby.html#HOT-STANDBY-ADMIN
+ controldataParams, err := LoadEnforcedParametersFromPgControldata(info.PgData)
if err != nil {
return err
}
- if enforcedParams != nil {
- changed, err := configfile.UpdatePostgresConfigurationFile(
- path.Join(info.PgData, constants.PostgresqlCustomConfigurationFile),
- enforcedParams,
+ clusterParams, err := LoadEnforcedParametersFromCluster(cluster)
+ if err != nil {
+ return err
+ }
+ enforcedParams := make(map[string]string)
+ for _, param := range pgControldataSettingsToParamsMap {
+ value := max(clusterParams[param], controldataParams[param])
+ enforcedParams[param] = strconv.Itoa(value)
+ }
+ changed, err := configfile.UpdatePostgresConfigurationFile(
+ path.Join(info.PgData, constants.PostgresqlCustomConfigurationFile),
+ enforcedParams,
+ )
+ if changed {
+ log.Info(
+ "Aligned PostgreSQL configuration to satisfy both pg_controldata and cluster spec",
+ "enforcedParams", enforcedParams,
+ "controldataParams", controldataParams,
+ "clusterParams", clusterParams,
)
- if changed {
- log.Info("enforcing parameters found in pg_controldata", "parameters", enforcedParams)
- }
- if err != nil {
- return fmt.Errorf("cannot write recovery config for enforced parameters: %w", err)
- }
+ }
+ if err != nil {
+ return fmt.Errorf("cannot write recovery config for enforced parameters: %w", err)
}
if major >= 12 {
@@ -650,9 +677,9 @@ func (info InitInfo) writeRecoveryConfiguration(recoveryFileContents string) err
0o600)
}
-// GetEnforcedParametersThroughPgControldata will parse the output of pg_controldata in order to get
+// LoadEnforcedParametersFromPgControldata will parse the output of pg_controldata in order to get
// the values of all the hot standby sensible parameters
-func GetEnforcedParametersThroughPgControldata(pgData string) (map[string]string, error) {
+func LoadEnforcedParametersFromPgControldata(pgData string) (map[string]int, error) {
var stdoutBuffer bytes.Buffer
var stderrBuffer bytes.Buffer
pgControlDataCmd := exec.Command(pgControlDataName,
@@ -671,12 +698,44 @@ func GetEnforcedParametersThroughPgControldata(pgData string) (map[string]string
log.Debug("pg_controldata stdout", "stdout", stdoutBuffer.String())
- enforcedParams := map[string]string{}
+ enforcedParams := make(map[string]int)
for key, value := range utils.ParsePgControldataOutput(stdoutBuffer.String()) {
if param, ok := pgControldataSettingsToParamsMap[key]; ok {
- enforcedParams[param] = value
+ intValue, err := strconv.Atoi(value)
+ if err != nil {
+ log.Error(err, "while parsing pg_controldata content",
+ "key", key,
+ "value", value)
+ return nil, err
+ }
+ enforcedParams[param] = intValue
}
}
+
+ return enforcedParams, nil
+}
+
+// LoadEnforcedParametersFromCluster loads the enforced parameters which defined in cluster spec
+func LoadEnforcedParametersFromCluster(
+ cluster *apiv1.Cluster,
+) (map[string]int, error) {
+ clusterParams := cluster.Spec.PostgresConfiguration.Parameters
+ enforcedParams := map[string]int{}
+ for _, param := range pgControldataSettingsToParamsMap {
+ value, found := clusterParams[param]
+ if !found {
+ continue
+ }
+
+ intValue, err := strconv.Atoi(value)
+ if err != nil {
+ log.Error(err, "while parsing enforced postgres parameter",
+ "param", param,
+ "value", value)
+ return nil, err
+ }
+ enforcedParams[param] = intValue
+ }
return enforcedParams, nil
}
@@ -715,36 +774,36 @@ func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error {
_, err = temporaryInstance.RefreshPGHBA(cluster, "")
if err != nil {
- return fmt.Errorf("while reading configuration files from ConfigMap: %w", err)
+ return fmt.Errorf("while generating pg_hba.conf: %w", err)
}
- _, err = temporaryInstance.RefreshPGIdent(cluster)
+ _, err = temporaryInstance.RefreshPGIdent(cluster.Spec.PostgresConfiguration.PgIdent)
if err != nil {
- return fmt.Errorf("while reading configuration files from ConfigMap: %w", err)
+ return fmt.Errorf("while generating pg_ident.conf: %w", err)
}
_, err = temporaryInstance.RefreshConfigurationFilesFromCluster(cluster, false)
if err != nil {
- return fmt.Errorf("while reading configuration files from ConfigMap: %w", err)
+ return fmt.Errorf("while generating Postgres configuration: %w", err)
}
err = fileutils.CopyFile(
path.Join(temporaryInitInfo.PgData, "postgresql.conf"),
path.Join(info.PgData, "postgresql.conf"))
if err != nil {
- return fmt.Errorf("while creating postgresql.conf: %w", err)
+ return fmt.Errorf("while installing postgresql.conf: %w", err)
}
err = fileutils.CopyFile(
path.Join(temporaryInitInfo.PgData, constants.PostgresqlCustomConfigurationFile),
path.Join(info.PgData, constants.PostgresqlCustomConfigurationFile))
if err != nil {
- return fmt.Errorf("while creating custom.conf: %w", err)
+ return fmt.Errorf("while installing %v: %w", constants.PostgresqlCustomConfigurationFile, err)
}
err = fileutils.CopyFile(
path.Join(temporaryInitInfo.PgData, constants.PostgresqlOverrideConfigurationFile),
path.Join(info.PgData, constants.PostgresqlOverrideConfigurationFile))
if err != nil {
- return fmt.Errorf("while creating %v: %w", constants.PostgresqlOverrideConfigurationFile, err)
+ return fmt.Errorf("while installing %v: %w", constants.PostgresqlOverrideConfigurationFile, err)
}
// Disable SSL as we still don't have the required certificates
@@ -758,8 +817,8 @@ func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error {
return err
}
-// WriteRestoreHbaConf writes a pg_hba.conf allowing access without password from localhost.
-// this is needed to set the PostgreSQL password after the postgres server is started and active
+// WriteRestoreHbaConf writes basic pg_hba.conf and pg_ident.conf allowing access without password from localhost.
+// This is needed to set the PostgreSQL password after the postgres server is started and active
func (info InitInfo) WriteRestoreHbaConf() error {
// We allow every access from localhost, and this is needed to correctly restore
// the database
@@ -770,8 +829,9 @@ func (info InitInfo) WriteRestoreHbaConf() error {
return err
}
- // Create the local map referred in the HBA configuration
- return WritePostgresUserMaps(info.PgData)
+ // Create only the local map referred in the HBA configuration
+ _, err = info.GetInstance().RefreshPGIdent(nil)
+ return err
}
// ConfigureInstanceAfterRestore changes the superuser password
diff --git a/pkg/management/postgres/restore_test.go b/pkg/management/postgres/restore_test.go
index 4bdde0b42f..df5557e750 100644
--- a/pkg/management/postgres/restore_test.go
+++ b/pkg/management/postgres/restore_test.go
@@ -24,6 +24,7 @@ import (
"github.com/thoas/go-funk"
"k8s.io/utils/strings/slices"
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/fileutils"
. "github.com/onsi/ginkgo/v2"
@@ -35,8 +36,8 @@ var _ = Describe("testing restore InitInfo methods", func() {
Expect(err).ToNot(HaveOccurred())
pgData := path.Join(tempDir, "postgres", "data", "pgdata")
- pgWal := path.Join(pgData, "pg_wal")
- newPgWal := path.Join(tempDir, "postgres", "wal", "pg_wal")
+ pgWal := path.Join(pgData, pgWalDirectory)
+ newPgWal := path.Join(tempDir, "postgres", "wal", pgWalDirectory)
AfterEach(func() {
_ = fileutils.RemoveDirectoryContent(tempDir)
@@ -147,4 +148,60 @@ var _ = Describe("testing restore InitInfo methods", func() {
Expect(err).ToNot(HaveOccurred())
Expect(chg).To(BeFalse())
})
+
+ It("should parse enforced params from cluster", func() {
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
+ Parameters: map[string]string{
+ "max_connections": "200",
+ "max_wal_senders": "20",
+ "max_worker_processes": "18",
+ "max_prepared_transactions": "50",
+ },
+ },
+ },
+ }
+ enforcedParamsInPGData, err := LoadEnforcedParametersFromCluster(cluster)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(enforcedParamsInPGData).To(HaveLen(4))
+ Expect(enforcedParamsInPGData["max_connections"]).To(Equal(200))
+ Expect(enforcedParamsInPGData["max_wal_senders"]).To(Equal(20))
+ Expect(enforcedParamsInPGData["max_worker_processes"]).To(Equal(18))
+ Expect(enforcedParamsInPGData["max_prepared_transactions"]).To(Equal(50))
+ })
+
+ It("report error if user given one in incorrect value in the cluster", func() {
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
+ Parameters: map[string]string{
+ "max_connections": "200s",
+ "max_wal_senders": "20",
+ "max_worker_processes": "18",
+ "max_prepared_transactions": "50",
+ },
+ },
+ },
+ }
+ _, err := LoadEnforcedParametersFromCluster(cluster)
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("ignore the non-enforced params user give", func() {
+ cluster := &apiv1.Cluster{
+ Spec: apiv1.ClusterSpec{
+ PostgresConfiguration: apiv1.PostgresConfiguration{
+ Parameters: map[string]string{
+ "max_connections": "200",
+ "wal_sender_timeout": "10min",
+ },
+ },
+ },
+ }
+ enforcedParamsInPGData, err := LoadEnforcedParametersFromCluster(cluster)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(enforcedParamsInPGData).To(HaveLen(1))
+ Expect(enforcedParamsInPGData["max_connections"]).To(Equal(200))
+ })
})
diff --git a/pkg/management/postgres/webserver/backup_client.go b/pkg/management/postgres/webserver/backup_client.go
index 3d8ea554c6..c47bd5c50b 100644
--- a/pkg/management/postgres/webserver/backup_client.go
+++ b/pkg/management/postgres/webserver/backup_client.go
@@ -19,6 +19,7 @@ package webserver
import (
"bytes"
"context"
+ "crypto/tls"
"encoding/json"
"fmt"
"io"
@@ -26,8 +27,12 @@ import (
"net/http"
"time"
+ corev1 "k8s.io/api/core/v1"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance"
)
// backupClient a client to interact with the instance backup endpoints
@@ -37,13 +42,9 @@ type backupClient struct {
// BackupClient is a struct capable of interacting with the instance backup endpoints
type BackupClient interface {
- StatusWithErrors(ctx context.Context, podIP string) (*Response[BackupResultData], error)
- Start(
- ctx context.Context,
- podIP string,
- sbq StartBackupRequest,
- ) error
- Stop(ctx context.Context, podIP string, sbq StopBackupRequest) error
+ StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*Response[BackupResultData], error)
+ Start(ctx context.Context, pod *corev1.Pod, sbq StartBackupRequest) error
+ Stop(ctx context.Context, pod *corev1.Pod, sbq StopBackupRequest) error
}
// NewBackupClient creates a client capable of interacting with the instance backup endpoints
@@ -53,11 +54,23 @@ func NewBackupClient() BackupClient {
// We want a connection timeout to prevent waiting for the default
// TCP connection timeout (30 seconds) on lost SYN packets
+ dialer := &net.Dialer{
+ Timeout: connectionTimeout,
+ }
timeoutClient := &http.Client{
Transport: &http.Transport{
- DialContext: (&net.Dialer{
- Timeout: connectionTimeout,
- }).DialContext,
+ DialContext: dialer.DialContext,
+ DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
+ tlsConfig, err := certs.GetTLSConfigFromContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ tlsDialer := tls.Dialer{
+ NetDialer: dialer,
+ Config: tlsConfig,
+ }
+ return tlsDialer.DialContext(ctx, network, addr)
+ },
},
Timeout: requestTimeout,
}
@@ -66,8 +79,9 @@ func NewBackupClient() BackupClient {
// StatusWithErrors retrieves the current status of the backup.
// Returns the response body in case there is an error in the request
-func (c *backupClient) StatusWithErrors(ctx context.Context, podIP string) (*Response[BackupResultData], error) {
- httpURL := url.Build(podIP, url.PathPgModeBackup, url.StatusPort)
+func (c *backupClient) StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*Response[BackupResultData], error) {
+ scheme := instance.GetStatusSchemeFromPod(pod)
+ httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort)
req, err := http.NewRequestWithContext(ctx, "GET", httpURL, nil)
if err != nil {
return nil, err
@@ -77,12 +91,9 @@ func (c *backupClient) StatusWithErrors(ctx context.Context, podIP string) (*Res
}
// Start runs the pg_start_backup
-func (c *backupClient) Start(
- ctx context.Context,
- podIP string,
- sbq StartBackupRequest,
-) error {
- httpURL := url.Build(podIP, url.PathPgModeBackup, url.StatusPort)
+func (c *backupClient) Start(ctx context.Context, pod *corev1.Pod, sbq StartBackupRequest) error {
+ scheme := instance.GetStatusSchemeFromPod(pod)
+ httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort)
// Marshalling the payload to JSON
jsonBody, err := json.Marshal(sbq)
@@ -101,8 +112,9 @@ func (c *backupClient) Start(
}
// Stop runs the command pg_stop_backup
-func (c *backupClient) Stop(ctx context.Context, podIP string, sbq StopBackupRequest) error {
- httpURL := url.Build(podIP, url.PathPgModeBackup, url.StatusPort)
+func (c *backupClient) Stop(ctx context.Context, pod *corev1.Pod, sbq StopBackupRequest) error {
+ scheme := instance.GetStatusSchemeFromPod(pod)
+ httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort)
// Marshalling the payload to JSON
jsonBody, err := json.Marshal(sbq)
if err != nil {
diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go
index 5a171ebdc6..2f38166359 100644
--- a/pkg/management/postgres/webserver/remote.go
+++ b/pkg/management/postgres/webserver/remote.go
@@ -18,12 +18,12 @@ package webserver
import (
"context"
+ "crypto/tls"
"database/sql"
"encoding/json"
"errors"
"fmt"
"net/http"
- "time"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -74,7 +74,6 @@ func NewRemoteWebServer(
typedClient: typedClient,
instance: instance,
}
- go endpoints.keepBackupAliveConn()
serveMux := http.NewServeMux()
serveMux.HandleFunc(url.PathPgModeBackup, endpoints.backup)
@@ -91,6 +90,15 @@ func NewRemoteWebServer(
ReadHeaderTimeout: DefaultReadHeaderTimeout,
}
+ if instance.StatusPortTLS {
+ server.TLSConfig = &tls.Config{
+ MinVersion: tls.VersionTLS13,
+ GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) {
+ return instance.ServerCertificate, nil
+ },
+ }
+ }
+
return NewWebServer(instance, server), nil
}
@@ -328,15 +336,3 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ
return
}
}
-
-// TODO: no need to active ping, we are connected locally
-func (ws *remoteWebserverEndpoints) keepBackupAliveConn() {
- for {
- if ws.currentBackup != nil && ws.currentBackup.conn != nil &&
- ws.currentBackup.err == nil && ws.currentBackup.data.Phase != Completed {
- log.Trace("keeping current backup connection alive")
- _ = ws.currentBackup.conn.PingContext(context.Background())
- }
- time.Sleep(3 * time.Second)
- }
-}
diff --git a/pkg/management/postgres/webserver/webserver.go b/pkg/management/postgres/webserver/webserver.go
index 1b74c59846..41070a3ed2 100644
--- a/pkg/management/postgres/webserver/webserver.go
+++ b/pkg/management/postgres/webserver/webserver.go
@@ -86,7 +86,12 @@ func (ws *Webserver) Start(ctx context.Context) error {
go func() {
log.Info("Starting webserver", "address", ws.server.Addr)
- err := ws.server.ListenAndServe()
+ var err error
+ if ws.server.TLSConfig != nil {
+ err = ws.server.ListenAndServeTLS("", "")
+ } else {
+ err = ws.server.ListenAndServe()
+ }
if err != nil {
errChan <- err
}
diff --git a/pkg/management/url/url.go b/pkg/management/url/url.go
index a8429fe649..6a43e4e856 100644
--- a/pkg/management/url/url.go
+++ b/pkg/management/url/url.go
@@ -23,13 +23,13 @@ import (
const (
// LocalPort is the port for only available from Postgres.
- LocalPort int = 8010
+ LocalPort int32 = 8010
// PostgresMetricsPort is the port for the exporter of PostgreSQL related metrics (HTTP)
- PostgresMetricsPort int = 9187
+ PostgresMetricsPort int32 = 9187
// PgBouncerMetricsPort is the port for the exporter of PgBouncer related metrics (HTTP)
- PgBouncerMetricsPort int = 9127
+ PgBouncerMetricsPort int32 = 9127
// PathHealth is the URL path for Health State
PathHealth string = "/healthz"
@@ -59,19 +59,19 @@ const (
PathCache string = "/cache/"
// StatusPort is the port for status HTTP requests
- StatusPort int = 8000
+ StatusPort int32 = 8000
)
// Local builds an http request pointing to localhost
-func Local(path string, port int) string {
- return Build("localhost", path, port)
+func Local(path string, port int32) string {
+ return Build("http", "localhost", path, port)
}
// Build builds an url given the hostname and the path, pointing to the status web server
-func Build(hostname, path string, port int) string {
+func Build(scheme, hostname, path string, port int32) string {
// If path already starts with '/' we remove it
if path[0] == '/' {
path = path[1:]
}
- return fmt.Sprintf("http://%s:%d/%s", hostname, port, path)
+ return fmt.Sprintf("%s://%s:%d/%s", scheme, hostname, port, path)
}
diff --git a/pkg/postgres/booleans.go b/pkg/postgres/booleans.go
new file mode 100644
index 0000000000..365bff9a9a
--- /dev/null
+++ b/pkg/postgres/booleans.go
@@ -0,0 +1,38 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package postgres
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ParsePostgresConfigBoolean returns the boolean value parsed from a string as a postgres boolean.
+// It returns an error if the input string is not a valid postgres boolean
+// See: https://www.postgresql.org/docs/current/config-setting.html
+// Boolean: Values can be written as on, off, true, false, yes, no, 1, 0 (all case-insensitive)
+// or any unambiguous prefix of one of these.
+func ParsePostgresConfigBoolean(in string) (bool, error) {
+ switch strings.ToLower(in) {
+ case "1", "on", "y", "ye", "yes", "t", "tr", "tru", "true":
+ return true, nil
+ case "0", "of", "off", "n", "no", "f", "fa", "fal", "fals", "false":
+ return false, nil
+ default:
+ return false, fmt.Errorf("configuration value is not a postgres boolean: %s", in)
+ }
+}
diff --git a/pkg/postgres/booleans_test.go b/pkg/postgres/booleans_test.go
new file mode 100644
index 0000000000..d4733355f7
--- /dev/null
+++ b/pkg/postgres/booleans_test.go
@@ -0,0 +1,50 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package postgres
+
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = DescribeTable("Test parsing of PostgreSQL configuration booleans",
+ func(input string, expectedValue, expectError bool) {
+ value, err := ParsePostgresConfigBoolean(input)
+ if expectError {
+ Expect(err).Should(HaveOccurred())
+ } else {
+ Expect(err).ShouldNot(HaveOccurred())
+ }
+ Expect(value).To(Equal(expectedValue))
+ },
+ Entry("foo", "foo", false, true),
+ Entry("on", "on", true, false),
+ Entry("ON", "ON", true, false),
+ Entry("off", "off", false, false),
+ Entry("true", "true", true, false),
+ Entry("false", "false", false, false),
+ Entry("0", "0", false, false),
+ Entry("1", "1", true, false),
+ Entry("n", "n", false, false),
+ Entry("y", "y", true, false),
+ Entry("t", "t", true, false),
+ Entry("f", "f", false, false),
+ Entry("o", "o", false, true),
+ Entry("ye", "ye", true, false),
+ Entry("tr", "tr", true, false),
+ Entry("fa", "fa", false, false),
+)
diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go
index 1b338ef6e7..655be4ecfe 100644
--- a/pkg/postgres/configuration.go
+++ b/pkg/postgres/configuration.go
@@ -457,7 +457,7 @@ var (
"wal_sender_timeout": "5s",
"wal_receiver_timeout": "5s",
"wal_level": "logical",
- "wal_log_hints": "on",
+ ParameterWalLogHints: "on",
// Workaround for PostgreSQL not behaving correctly when
// a default value is not explicit in the postgresql.conf and
// the parameter cannot be changed without a restart.
diff --git a/pkg/reconciler/backup/volumesnapshot/catalog.go b/pkg/reconciler/backup/volumesnapshot/catalog.go
index da72152b61..38048b4803 100644
--- a/pkg/reconciler/backup/volumesnapshot/catalog.go
+++ b/pkg/reconciler/backup/volumesnapshot/catalog.go
@@ -20,7 +20,7 @@ import (
"context"
"time"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
diff --git a/pkg/reconciler/backup/volumesnapshot/online.go b/pkg/reconciler/backup/volumesnapshot/online.go
index 61c8496320..e1d74097a1 100644
--- a/pkg/reconciler/backup/volumesnapshot/online.go
+++ b/pkg/reconciler/backup/volumesnapshot/online.go
@@ -42,7 +42,7 @@ func (o *onlineExecutor) finalize(
backup *apiv1.Backup,
targetPod *corev1.Pod,
) (*ctrl.Result, error) {
- body, err := o.backupClient.StatusWithErrors(ctx, targetPod.Status.PodIP)
+ body, err := o.backupClient.StatusWithErrors(ctx, targetPod)
if err != nil {
return nil, fmt.Errorf("while getting status while finalizing: %w", err)
}
@@ -71,9 +71,7 @@ func (o *onlineExecutor) finalize(
switch status.Phase {
case webserver.Started:
- if err := o.backupClient.Stop(ctx,
- targetPod.Status.PodIP,
- *webserver.NewStopBackupRequest(backup.Name)); err != nil {
+ if err := o.backupClient.Stop(ctx, targetPod, *webserver.NewStopBackupRequest(backup.Name)); err != nil {
return nil, fmt.Errorf("while stopping the backup client: %w", err)
}
return &ctrl.Result{RequeueAfter: time.Second * 5}, nil
@@ -96,7 +94,7 @@ func (o *onlineExecutor) prepare(
volumeSnapshotConfig := backup.GetVolumeSnapshotConfiguration(*cluster.Spec.Backup.VolumeSnapshot)
// Handle hot snapshots
- body, err := o.backupClient.StatusWithErrors(ctx, targetPod.Status.PodIP)
+ body, err := o.backupClient.StatusWithErrors(ctx, targetPod)
if err != nil {
return nil, fmt.Errorf("while getting status while preparing: %w", err)
}
@@ -114,7 +112,7 @@ func (o *onlineExecutor) prepare(
BackupName: backup.Name,
Force: true,
}
- if err := o.backupClient.Start(ctx, targetPod.Status.PodIP, req); err != nil {
+ if err := o.backupClient.Start(ctx, targetPod, req); err != nil {
return nil, fmt.Errorf("while trying to start the backup: %w", err)
}
return &ctrl.Result{RequeueAfter: 5 * time.Second}, nil
diff --git a/pkg/reconciler/backup/volumesnapshot/online_test.go b/pkg/reconciler/backup/volumesnapshot/online_test.go
index 8c98d19230..c72d951e02 100644
--- a/pkg/reconciler/backup/volumesnapshot/online_test.go
+++ b/pkg/reconciler/backup/volumesnapshot/online_test.go
@@ -46,17 +46,17 @@ type fakeBackupClient struct {
func (f *fakeBackupClient) StatusWithErrors(
_ context.Context,
- _ string,
+ _ *corev1.Pod,
) (*webserver.Response[webserver.BackupResultData], error) {
return f.response, f.injectStatusError
}
-func (f *fakeBackupClient) Start(_ context.Context, _ string, _ webserver.StartBackupRequest) error {
+func (f *fakeBackupClient) Start(_ context.Context, _ *corev1.Pod, _ webserver.StartBackupRequest) error {
f.startCalled = true
return f.injectStartError
}
-func (f *fakeBackupClient) Stop(_ context.Context, _ string, _ webserver.StopBackupRequest) error {
+func (f *fakeBackupClient) Stop(_ context.Context, _ *corev1.Pod, _ webserver.StopBackupRequest) error {
f.stopCalled = true
return f.injectStopError
}
diff --git a/pkg/reconciler/backup/volumesnapshot/reconciler.go b/pkg/reconciler/backup/volumesnapshot/reconciler.go
index 152e9212f2..1e3081828a 100644
--- a/pkg/reconciler/backup/volumesnapshot/reconciler.go
+++ b/pkg/reconciler/backup/volumesnapshot/reconciler.go
@@ -24,7 +24,7 @@ import (
"strconv"
"time"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
diff --git a/pkg/reconciler/backup/volumesnapshot/reconciler_test.go b/pkg/reconciler/backup/volumesnapshot/reconciler_test.go
index ad66e6f451..8ce7adc393 100644
--- a/pkg/reconciler/backup/volumesnapshot/reconciler_test.go
+++ b/pkg/reconciler/backup/volumesnapshot/reconciler_test.go
@@ -21,7 +21,7 @@ import (
"fmt"
"time"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
diff --git a/pkg/reconciler/backup/volumesnapshot/resources.go b/pkg/reconciler/backup/volumesnapshot/resources.go
index 785e69aaf9..5e8231f94e 100644
--- a/pkg/reconciler/backup/volumesnapshot/resources.go
+++ b/pkg/reconciler/backup/volumesnapshot/resources.go
@@ -21,7 +21,7 @@ import (
"fmt"
"strings"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
diff --git a/pkg/reconciler/backup/volumesnapshot/resources_test.go b/pkg/reconciler/backup/volumesnapshot/resources_test.go
index c6c9f6bff9..ea65460e57 100644
--- a/pkg/reconciler/backup/volumesnapshot/resources_test.go
+++ b/pkg/reconciler/backup/volumesnapshot/resources_test.go
@@ -19,7 +19,7 @@ package volumesnapshot
import (
"errors"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
diff --git a/pkg/reconciler/persistentvolumeclaim/calculator.go b/pkg/reconciler/persistentvolumeclaim/calculator.go
index e152716268..87a21458fb 100644
--- a/pkg/reconciler/persistentvolumeclaim/calculator.go
+++ b/pkg/reconciler/persistentvolumeclaim/calculator.go
@@ -19,7 +19,7 @@ package persistentvolumeclaim
import (
"fmt"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
diff --git a/pkg/reconciler/persistentvolumeclaim/instance.go b/pkg/reconciler/persistentvolumeclaim/instance.go
index 362f5578b6..3dd3ad388f 100644
--- a/pkg/reconciler/persistentvolumeclaim/instance.go
+++ b/pkg/reconciler/persistentvolumeclaim/instance.go
@@ -18,9 +18,9 @@ package persistentvolumeclaim
import (
"context"
+ "slices"
"time"
- "golang.org/x/exp/slices"
corev1 "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/pkg/reconciler/persistentvolumeclaim/storagesource.go b/pkg/reconciler/persistentvolumeclaim/storagesource.go
index 9f1dcbfcac..28894f3df0 100644
--- a/pkg/reconciler/persistentvolumeclaim/storagesource.go
+++ b/pkg/reconciler/persistentvolumeclaim/storagesource.go
@@ -19,7 +19,7 @@ package persistentvolumeclaim
import (
"context"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
diff --git a/pkg/reconciler/persistentvolumeclaim/validation.go b/pkg/reconciler/persistentvolumeclaim/validation.go
index 7ec872ac6b..2b48b0bf7e 100644
--- a/pkg/reconciler/persistentvolumeclaim/validation.go
+++ b/pkg/reconciler/persistentvolumeclaim/validation.go
@@ -20,7 +20,7 @@ import (
"context"
"fmt"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/pkg/reconciler/persistentvolumeclaim/validation_test.go b/pkg/reconciler/persistentvolumeclaim/validation_test.go
index a76dc00d59..fded9877c4 100644
--- a/pkg/reconciler/persistentvolumeclaim/validation_test.go
+++ b/pkg/reconciler/persistentvolumeclaim/validation_test.go
@@ -17,7 +17,7 @@ limitations under the License.
package persistentvolumeclaim
import (
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
diff --git a/pkg/resources/instance/client.go b/pkg/resources/instance/client.go
index 920aff3ff5..6bb1a2adca 100644
--- a/pkg/resources/instance/client.go
+++ b/pkg/resources/instance/client.go
@@ -18,12 +18,15 @@ package instance
import (
"context"
+ "crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
+ neturl "net/url"
+ "slices"
"sort"
"time"
@@ -31,12 +34,19 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/url"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
+const (
+ defaultRequestTimeout = 30 * time.Second
+ noRequestTimeout = 0
+)
+
// requestRetry is the default backoff used to query the instance manager
// for the status of each PostgreSQL instance.
var requestRetry = wait.Backoff{
@@ -63,27 +73,37 @@ func (i StatusError) Error() string {
// NewStatusClient returns a client capable of querying the instance HTTP endpoints
func NewStatusClient() *StatusClient {
- const connectionTimeout = 2 * time.Second
- const requestTimeout = 30 * time.Second
+ const defaultConnectionTimeout = 2 * time.Second
// We want a connection timeout to prevent waiting for the default
// TCP connection timeout (30 seconds) on lost SYN packets
+ dialer := &net.Dialer{
+ Timeout: defaultConnectionTimeout,
+ }
timeoutClient := &http.Client{
Transport: &http.Transport{
- DialContext: (&net.Dialer{
- Timeout: connectionTimeout,
- }).DialContext,
+ DialContext: dialer.DialContext,
+ DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
+ tlsConfig, err := certs.GetTLSConfigFromContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ tlsDialer := tls.Dialer{
+ NetDialer: dialer,
+ Config: tlsConfig,
+ }
+ return tlsDialer.DialContext(ctx, network, addr)
+ },
},
- Timeout: requestTimeout,
}
- return &StatusClient{timeoutClient}
+ return &StatusClient{Client: timeoutClient}
}
// extractInstancesStatus extracts the status of the underlying PostgreSQL instance from
// the requested Pod, via the instance manager. In case of failure, errors are passed
// in the result list
-func (r StatusClient) extractInstancesStatus(
+func (r *StatusClient) extractInstancesStatus(
ctx context.Context,
activePods []corev1.Pod,
) postgres.PostgresqlStatusList {
@@ -127,7 +147,7 @@ func (r *StatusClient) getReplicaStatusFromPodViaHTTP(
// online upgrades. It is not intended to wait for recovering from any
// other remote failure.
_ = retry.OnError(requestRetry, isErrorRetryable, func() error {
- result = r.rawInstanceStatusRequest(ctx, r.Client, pod)
+ result = r.rawInstanceStatusRequest(ctx, pod)
return result.Error
})
@@ -169,12 +189,13 @@ func (r *StatusClient) GetPgControlDataFromInstance(
) (string, error) {
contextLogger := log.FromContext(ctx)
- httpURL := url.Build(pod.Status.PodIP, url.PathPGControlData, url.StatusPort)
+ scheme := GetStatusSchemeFromPod(pod)
+ httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPGControlData, url.StatusPort)
req, err := http.NewRequestWithContext(ctx, "GET", httpURL, nil)
if err != nil {
return "", err
}
-
+ r.Client.Timeout = defaultRequestTimeout
resp, err := r.Client.Do(req)
if err != nil {
return "", err
@@ -210,20 +231,83 @@ func (r *StatusClient) GetPgControlDataFromInstance(
return result.Data, result.Error
}
+// UpgradeInstanceManager upgrades the instance manager to the passed availableArchitecture
+func (r *StatusClient) UpgradeInstanceManager(
+ ctx context.Context,
+ pod *corev1.Pod,
+ availableArchitecture *utils.AvailableArchitecture,
+) error {
+ contextLogger := log.FromContext(ctx)
+
+ binaryFileStream, err := availableArchitecture.FileStream()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if binaryErr := binaryFileStream.Close(); binaryErr != nil {
+ contextLogger.Error(err, "while closing the binaryFileStream")
+ }
+ }()
+
+ scheme := GetStatusSchemeFromPod(pod)
+ updateURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathUpdate, url.StatusPort)
+ req, err := http.NewRequestWithContext(ctx, http.MethodPut, updateURL, nil)
+ if err != nil {
+ return err
+ }
+ req.Body = binaryFileStream
+
+ r.Client.Timeout = noRequestTimeout
+ resp, err := r.Client.Do(req)
+ // This is the desired response. The instance manager will
+ // synchronously update and this call won't return.
+ if isEOF(err) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if resp.StatusCode == http.StatusOK {
+ // Currently the instance manager should never return StatusOK
+ return errors.New("instance manager has returned an unexpected status code")
+ }
+
+ var body []byte
+ body, err = io.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err = resp.Body.Close(); err != nil {
+ return err
+ }
+
+ return fmt.Errorf("the instance manager upgrade path returned the following error: '%s", string(body))
+}
+
+func isEOF(err error) bool {
+ if err == nil {
+ return false
+ }
+ return errors.Is(err.(*neturl.Error).Err, io.EOF)
+}
+
// rawInstanceStatusRequest retrieves the status of PostgreSQL pods via an HTTP request with GET method.
func (r *StatusClient) rawInstanceStatusRequest(
ctx context.Context,
- client *http.Client,
pod corev1.Pod,
) (result postgres.PostgresqlStatus) {
- statusURL := url.Build(pod.Status.PodIP, url.PathPgStatus, url.StatusPort)
+ scheme := GetStatusSchemeFromPod(&pod)
+ statusURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgStatus, url.StatusPort)
req, err := http.NewRequestWithContext(ctx, "GET", statusURL, nil)
if err != nil {
result.Error = err
return result
}
- resp, err := client.Do(req)
+ r.Client.Timeout = defaultRequestTimeout
+ resp, err := r.Client.Do(req)
if err != nil {
result.Error = err
return result
@@ -255,3 +339,40 @@ func (r *StatusClient) rawInstanceStatusRequest(
return result
}
+
+// HTTPScheme identifies a valid scheme: http, https
+type HTTPScheme string
+
+const (
+ schemeHTTP HTTPScheme = "http"
+ schemeHTTPS HTTPScheme = "https"
+)
+
+// IsHTTPS returns true if schemeHTTPS
+func (h HTTPScheme) IsHTTPS() bool {
+ return h == schemeHTTPS
+}
+
+// ToString returns the scheme as a string value
+func (h HTTPScheme) ToString() string {
+ return string(h)
+}
+
+// GetStatusSchemeFromPod detects if a Pod is exposing the status via HTTP or HTTPS
+func GetStatusSchemeFromPod(pod *corev1.Pod) HTTPScheme {
+ // Fall back to comparing the container environment configuration
+ for _, container := range pod.Spec.Containers {
+ // we go to the next array element if it isn't the postgres container
+ if container.Name != specs.PostgresContainerName {
+ continue
+ }
+
+ if slices.Contains(container.Command, "--status-port-tls") {
+ return schemeHTTPS
+ }
+
+ break
+ }
+
+ return schemeHTTP
+}
diff --git a/pkg/specs/jobs.go b/pkg/specs/jobs.go
index 5cecaaa981..e392ab1216 100644
--- a/pkg/specs/jobs.go
+++ b/pkg/specs/jobs.go
@@ -20,7 +20,7 @@ import (
"fmt"
"github.com/kballard/go-shellquote"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/pkg/specs/pgbouncer/deployments.go b/pkg/specs/pgbouncer/deployments.go
index 7895ea740d..364e8d6324 100644
--- a/pkg/specs/pgbouncer/deployments.go
+++ b/pkg/specs/pgbouncer/deployments.go
@@ -81,7 +81,7 @@ func Deployment(pooler *apiv1.Pooler, cluster *apiv1.Cluster) (*appsv1.Deploymen
}).
WithContainerPort("pgbouncer", &corev1.ContainerPort{
Name: "metrics",
- ContainerPort: int32(url.PgBouncerMetricsPort),
+ ContainerPort: url.PgBouncerMetricsPort,
}).
WithInitContainerImage(specs.BootstrapControllerContainerName, config.Current.OperatorImageName, true).
WithInitContainerCommand(specs.BootstrapControllerContainerName,
@@ -112,7 +112,7 @@ func Deployment(pooler *apiv1.Pooler, cluster *apiv1.Cluster) (*appsv1.Deploymen
TimeoutSeconds: 5,
ProbeHandler: corev1.ProbeHandler{
TCPSocket: &corev1.TCPSocketAction{
- Port: intstr.FromInt(pgBouncerConfig.PgBouncerPort),
+ Port: intstr.FromInt32(pgBouncerConfig.PgBouncerPort),
},
},
}, false).
diff --git a/pkg/specs/pgbouncer/deployments_test.go b/pkg/specs/pgbouncer/deployments_test.go
index 4b95bb7dc1..b00777b106 100644
--- a/pkg/specs/pgbouncer/deployments_test.go
+++ b/pkg/specs/pgbouncer/deployments_test.go
@@ -154,6 +154,6 @@ var _ = Describe("Deployment", func() {
Expect(deployment).ToNot(BeNil())
Expect(deployment.Spec.Template.Spec.Containers[0].ReadinessProbe.TimeoutSeconds).To(Equal(int32(5)))
Expect(deployment.Spec.Template.Spec.Containers[0].ReadinessProbe.TCPSocket.Port).
- To(Equal(intstr.FromInt(pgBouncerConfig.PgBouncerPort)))
+ To(Equal(intstr.FromInt32(pgBouncerConfig.PgBouncerPort)))
})
})
diff --git a/pkg/specs/poddisruptionbudget.go b/pkg/specs/poddisruptionbudget.go
index ce81c683b6..b7319fee10 100644
--- a/pkg/specs/poddisruptionbudget.go
+++ b/pkg/specs/poddisruptionbudget.go
@@ -33,8 +33,8 @@ func BuildReplicasPodDisruptionBudget(cluster *apiv1.Cluster) *policyv1.PodDisru
if cluster == nil || cluster.Spec.Instances < 3 {
return nil
}
- minAvailableReplicas := cluster.Spec.Instances - 2
- allReplicasButOne := intstr.FromInt(minAvailableReplicas)
+ minAvailableReplicas := int32(cluster.Spec.Instances - 2)
+ allReplicasButOne := intstr.FromInt32(minAvailableReplicas)
pdb := &policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
@@ -63,7 +63,7 @@ func BuildPrimaryPodDisruptionBudget(cluster *apiv1.Cluster) *policyv1.PodDisrup
if cluster == nil {
return nil
}
- one := intstr.FromInt(1)
+ one := intstr.FromInt32(1)
pdb := &policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go
index 6e68b705ff..ee25b421ca 100644
--- a/pkg/specs/pods.go
+++ b/pkg/specs/pods.go
@@ -23,9 +23,9 @@ import (
"fmt"
"math"
"reflect"
+ "slices"
"strconv"
- "golang.org/x/exp/slices"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -159,6 +159,7 @@ func CreateClusterPodSpec(
cluster apiv1.Cluster,
envConfig EnvConfig,
gracePeriod int64,
+ enableHTTPS bool,
) corev1.PodSpec {
return corev1.PodSpec{
Hostname: podName,
@@ -166,7 +167,7 @@ func CreateClusterPodSpec(
createBootstrapContainer(cluster),
},
SchedulerName: cluster.Spec.SchedulerName,
- Containers: createPostgresContainers(cluster, envConfig),
+ Containers: createPostgresContainers(cluster, envConfig, enableHTTPS),
Volumes: createPostgresVolumes(&cluster, podName),
SecurityContext: CreatePodSecurityContext(
cluster.GetSeccompProfile(),
@@ -183,7 +184,7 @@ func CreateClusterPodSpec(
// createPostgresContainers create the PostgreSQL containers that are
// used for every instance
-func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig) []corev1.Container {
+func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enableHTTPS bool) []corev1.Container {
containers := []corev1.Container{
{
Name: PostgresContainerName,
@@ -199,7 +200,7 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig) []core
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: url.PathHealth,
- Port: intstr.FromInt32(int32(url.StatusPort)),
+ Port: intstr.FromInt32(url.StatusPort),
},
},
},
@@ -209,7 +210,7 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig) []core
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: url.PathReady,
- Port: intstr.FromInt(url.StatusPort),
+ Port: intstr.FromInt32(url.StatusPort),
},
},
},
@@ -219,7 +220,7 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig) []core
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: url.PathHealth,
- Port: intstr.FromInt(url.StatusPort),
+ Port: intstr.FromInt32(url.StatusPort),
},
},
},
@@ -237,12 +238,12 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig) []core
},
{
Name: "metrics",
- ContainerPort: int32(url.PostgresMetricsPort),
+ ContainerPort: url.PostgresMetricsPort,
Protocol: "TCP",
},
{
Name: "status",
- ContainerPort: int32(url.StatusPort),
+ ContainerPort: url.StatusPort,
Protocol: "TCP",
},
},
@@ -250,11 +251,29 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig) []core
},
}
+ if enableHTTPS {
+ containers[0].StartupProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS
+ containers[0].LivenessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS
+ containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS
+ containers[0].Command = append(containers[0].Command, "--status-port-tls")
+ }
+
addManagerLoggingOptions(cluster, &containers[0])
+ // if user customizes the liveness probe timeout, we need to adjust the failure threshold
+ addLivenessProbeFailureThreshold(cluster, &containers[0])
+
return containers
}
+// adjust the liveness probe failure threshold based on the `spec.livenessProbeTimeout` value
+func addLivenessProbeFailureThreshold(cluster apiv1.Cluster, container *corev1.Container) {
+ if cluster.Spec.LivenessProbeTimeout != nil {
+ timeout := *cluster.Spec.LivenessProbeTimeout
+ container.LivenessProbe.FailureThreshold = getLivenessProbeFailureThreshold(timeout)
+ }
+}
+
// getStartupProbeFailureThreshold get the startup probe failure threshold
// FAILURE_THRESHOLD = ceil(startDelay / periodSeconds) and minimum value is 1
func getStartupProbeFailureThreshold(startupDelay int32) int32 {
@@ -264,6 +283,15 @@ func getStartupProbeFailureThreshold(startupDelay int32) int32 {
return int32(math.Ceil(float64(startupDelay) / float64(StartupProbePeriod)))
}
+// getLivenessProbeFailureThreshold get the liveness probe failure threshold
+// FAILURE_THRESHOLD = ceil(livenessTimeout / periodSeconds) and minimum value is 1
+func getLivenessProbeFailureThreshold(livenessTimeout int32) int32 {
+ if livenessTimeout <= LivenessProbePeriod {
+ return 1
+ }
+ return int32(math.Ceil(float64(livenessTimeout) / float64(LivenessProbePeriod)))
+}
+
// CreateAffinitySection creates the affinity sections for Pods, given the configuration
// from the user
func CreateAffinitySection(clusterName string, config apiv1.AffinityConfiguration) *corev1.Affinity {
@@ -381,7 +409,8 @@ func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) *corev1.Pod {
envConfig := CreatePodEnvConfig(cluster, podName)
- podSpec := CreateClusterPodSpec(podName, cluster, envConfig, gracePeriod)
+ tlsEnabled := true
+ podSpec := CreateClusterPodSpec(podName, cluster, envConfig, gracePeriod, tlsEnabled)
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
diff --git a/pkg/specs/pods_test.go b/pkg/specs/pods_test.go
index 3585ffe1e9..1930fea5bf 100644
--- a/pkg/specs/pods_test.go
+++ b/pkg/specs/pods_test.go
@@ -929,3 +929,13 @@ var _ = Describe("Compute startup probe failure threshold", func() {
Expect(getStartupProbeFailureThreshold(109)).To(BeNumerically("==", 11))
})
})
+
+var _ = Describe("Compute liveness probe failure threshold", func() {
+ It("should take the minimum value 1", func() {
+ Expect(getLivenessProbeFailureThreshold(5)).To(BeNumerically("==", 1))
+ })
+
+ It("should take the value from 'startDelay / periodSeconds'", func() {
+ Expect(getLivenessProbeFailureThreshold(31)).To(BeNumerically("==", 4))
+ })
+})
diff --git a/pkg/specs/roles.go b/pkg/specs/roles.go
index fb69f20f22..19dc793c78 100644
--- a/pkg/specs/roles.go
+++ b/pkg/specs/roles.go
@@ -17,7 +17,8 @@ limitations under the License.
package specs
import (
- "golang.org/x/exp/slices"
+ "slices"
+
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/pkg/specs/services.go b/pkg/specs/services.go
index f8fff97845..a49e27a85e 100644
--- a/pkg/specs/services.go
+++ b/pkg/specs/services.go
@@ -31,7 +31,7 @@ func buildInstanceServicePorts() []corev1.ServicePort {
{
Name: PostgresContainerName,
Protocol: corev1.ProtocolTCP,
- TargetPort: intstr.FromInt(postgres.ServerPort),
+ TargetPort: intstr.FromInt32(postgres.ServerPort),
Port: postgres.ServerPort,
},
}
diff --git a/pkg/stringset/stringset.go b/pkg/stringset/stringset.go
index 32672222df..f5678ec4a0 100644
--- a/pkg/stringset/stringset.go
+++ b/pkg/stringset/stringset.go
@@ -18,7 +18,7 @@ limitations under the License.
package stringset
import (
- "golang.org/x/exp/slices"
+ "slices"
)
// Data represent a set of strings
diff --git a/pkg/utils/exec.go b/pkg/utils/exec.go
index 429bbbe46b..f9844f5365 100644
--- a/pkg/utils/exec.go
+++ b/pkg/utils/exec.go
@@ -102,7 +102,8 @@ func ExecCommand(
Stderr: &stderr,
})
if err != nil {
- return stdout.String(), stderr.String(), fmt.Errorf("%w - %v", err, stderr.String())
+ retErr := fmt.Errorf("cmd: %s\nerror: %w\nstdErr: %v", command, err, stderr.String())
+ return stdout.String(), stderr.String(), retErr
}
return stdout.String(), stderr.String(), nil
diff --git a/pkg/utils/fencing.go b/pkg/utils/fencing.go
index 7ead694e7a..7cedb1cbf5 100644
--- a/pkg/utils/fencing.go
+++ b/pkg/utils/fencing.go
@@ -21,6 +21,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "slices"
"sort"
corev1 "k8s.io/api/core/v1"
@@ -139,9 +140,9 @@ func removeFencedInstance(instanceName string, object metav1.Object) (bool, erro
// FencingMetadataExecutor executes the logic regarding adding and removing the fencing annotation for a kubernetes
// object
type FencingMetadataExecutor struct {
- fenceFunc func(string, metav1.Object) (appliedChange bool, err error)
- instanceName string
- cli client.Client
+ fenceFunc func(string, metav1.Object) (appliedChange bool, err error)
+ cli client.Client
+ instanceNames []string
}
// NewFencingMetadataExecutor creates a fluent client for FencingMetadataExecutor
@@ -165,37 +166,46 @@ func (fb *FencingMetadataExecutor) RemoveFencing() *FencingMetadataExecutor {
// ForAllInstances applies the logic to all cluster instances
func (fb *FencingMetadataExecutor) ForAllInstances() *FencingMetadataExecutor {
- fb.instanceName = FenceAllInstances
+ fb.instanceNames = []string{FenceAllInstances}
return fb
}
// ForInstance applies the logic to the specified instance
-func (fb *FencingMetadataExecutor) ForInstance(instanceName string) *FencingMetadataExecutor {
- fb.instanceName = instanceName
+func (fb *FencingMetadataExecutor) ForInstance(instanceNames ...string) *FencingMetadataExecutor {
+ fb.instanceNames = instanceNames
return fb
}
// Execute executes the instructions given with the fluent builder, returns any error encountered
func (fb *FencingMetadataExecutor) Execute(ctx context.Context, key types.NamespacedName, obj client.Object) error {
- if fb.instanceName == "" {
+ if len(fb.instanceNames) == 0 {
return errors.New("chose an operation to execute")
}
+ if len(fb.instanceNames) > 1 && slices.Contains(fb.instanceNames, FenceAllInstances) {
+ return errors.New("the fence-all-instances token (*) cannot be used along other instances")
+ }
if err := fb.cli.Get(ctx, key, obj); err != nil {
return err
}
- if fb.instanceName != FenceAllInstances {
- var pod corev1.Pod
- if err := fb.cli.Get(ctx, client.ObjectKey{Namespace: key.Namespace, Name: fb.instanceName}, &pod); err != nil {
- return fmt.Errorf("node %s not found in namespace %s", fb.instanceName, key.Namespace)
+ for _, name := range fb.instanceNames {
+ if name != FenceAllInstances {
+ var pod corev1.Pod
+ if err := fb.cli.Get(ctx, client.ObjectKey{Namespace: key.Namespace, Name: name}, &pod); err != nil {
+ return fmt.Errorf("node %s not found in namespace %s", name, key.Namespace)
+ }
}
}
+ var appliedChange bool
fencedObject := obj.DeepCopyObject().(client.Object)
- appliedChange, err := fb.fenceFunc(fb.instanceName, fencedObject)
- if err != nil {
- return err
+ for _, name := range fb.instanceNames {
+ changed, err := fb.fenceFunc(name, fencedObject)
+ if err != nil {
+ return err
+ }
+ appliedChange = appliedChange || changed
}
if !appliedChange {
return nil
diff --git a/pkg/utils/logs/cluster_logs.go b/pkg/utils/logs/cluster_logs.go
index 04fb6399bd..84e45f47eb 100644
--- a/pkg/utils/logs/cluster_logs.go
+++ b/pkg/utils/logs/cluster_logs.go
@@ -24,6 +24,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
@@ -226,6 +227,8 @@ func (csr *ClusterStreamingRequest) streamInGoroutine(
if err != nil {
log.Printf("error on streaming request, pod %s: %v", podName, err)
return
+ } else if apierrs.IsBadRequest(err) {
+ return
}
defer func() {
err := logStream.Close()
diff --git a/pkg/utils/math.go b/pkg/utils/math.go
index ee5999cf84..7a98f3d02c 100644
--- a/pkg/utils/math.go
+++ b/pkg/utils/math.go
@@ -16,9 +16,17 @@ limitations under the License.
package utils
-import (
- "golang.org/x/exp/constraints"
-)
+// anyNumber is a constraint that permits any number type. This type
+// definition is copied rather than depending on x/exp/constraints since the
+// dependency is otherwise unneeded, the definition is relatively trivial and
+// static, and the Go language maintainers are not sure if/where these will live
+// in the standard library.
+//
+// Reference: https://github.com/golang/go/issues/61914
+type anyNumber interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
+}
// IsPowerOfTwo calculates if a number is power of two or not
// reference: https://github.com/golang/go/blob/master/src/strconv/itoa.go#L204 #wokeignore:rule=master
@@ -28,9 +36,9 @@ func IsPowerOfTwo(n int) bool {
}
// ToBytes converts an input value in MB to bytes
-// Input: value - an integer representing size in MB
+// Input: value - a number representing size in MB
// Output: the size in bytes, calculated by multiplying the input value by 1024 * 1024
-func ToBytes[T constraints.Signed | constraints.Float](mb T) float64 {
+func ToBytes[T anyNumber](mb T) float64 {
multiplier := float64(1024)
return float64(mb) * multiplier * multiplier
}
diff --git a/pkg/utils/pod_conditions.go b/pkg/utils/pod_conditions.go
index d3c15ad89a..0d47799e2b 100644
--- a/pkg/utils/pod_conditions.go
+++ b/pkg/utils/pod_conditions.go
@@ -55,6 +55,11 @@ func IsPodReady(pod corev1.Pod) bool {
return false
}
+// PodHasContainerStatuses checks if a Pod has container status elements
+func PodHasContainerStatuses(pod corev1.Pod) bool {
+ return len(pod.Status.ContainerStatuses) > 0
+}
+
// IsPodActive checks if a pod is active, copied from:
// https://github.com/kubernetes/kubernetes/blob/1bd0077/test/e2e/framework/pod/resource.go#L664
func IsPodActive(p corev1.Pod) bool {
diff --git a/pkg/utils/systemid.go b/pkg/utils/systemid.go
deleted file mode 100644
index 72034d7473..0000000000
--- a/pkg/utils/systemid.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright The CloudNativePG Contributors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package utils
-
-import (
- "context"
-
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-var systemUID string
-
-// DetectKubeSystemUID retrieves the UID of the kube-system namespace of the containing cluster
-func DetectKubeSystemUID(ctx context.Context, kubeClient client.Client) error {
- ns := &corev1.Namespace{}
- if err := kubeClient.Get(ctx, types.NamespacedName{Name: "kube-system"}, ns); err != nil {
- return err
- }
- systemUID = string(ns.UID)
-
- return nil
-}
-
-// GetKubeSystemUID returns the uid of the kube-system namespace
-func GetKubeSystemUID() string {
- return systemUID
-}
diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go
index d55e55db6c..1d07474bdf 100644
--- a/pkg/versions/versions.go
+++ b/pkg/versions/versions.go
@@ -20,13 +20,13 @@ package versions
const (
// Version is the version of the operator
- Version = "1.23.1"
+ Version = "1.23.2"
// DefaultImageName is the default image used by the operator to create pods
DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:16.3"
// DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL
- DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.23.1"
+ DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.23.2"
)
// BuildInfo is a struct containing all the info about the build
@@ -36,7 +36,7 @@ type BuildInfo struct {
var (
// buildVersion injected during the build
- buildVersion = "1.23.1"
+ buildVersion = "1.23.2"
// buildCommit injected during the build
buildCommit = "none"
diff --git a/releases/cnpg-1.23.2.yaml b/releases/cnpg-1.23.2.yaml
new file mode 100644
index 0000000000..b61fb4690d
--- /dev/null
+++ b/releases/cnpg-1.23.2.yaml
@@ -0,0 +1,16082 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ name: cnpg-system
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.15.0
+ name: backups.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Backup
+ listKind: BackupList
+ plural: backups
+ singular: backup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.method
+ name: Method
+ type: string
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ - jsonPath: .status.error
+ name: Error
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Backup is the Schema for the backups API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the backup.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ cluster:
+ description: The cluster to backup
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ method:
+ default: barmanObjectStore
+ description: |-
+ The backup method to be used, possible options are `barmanObjectStore`,
+ `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`.
+ enum:
+ - barmanObjectStore
+ - volumeSnapshot
+ - plugin
+ type: string
+ online:
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ type: boolean
+ onlineConfiguration:
+ description: |-
+ Configuration parameters to control the online/hot backup with volume snapshots
+ Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ pluginConfiguration:
+ description: Configuration parameters passed to the plugin managing
+ this backup
+ properties:
+ name:
+ description: Name is the name of the plugin managing this backup
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Parameters are the configuration parameters passed to the backup
+ plugin for this backup
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: |-
+ The policy to decide which instance should perform this backup. If empty,
+ it defaults to `cluster.spec.backup.target`.
+ Available options are empty string, `primary` and `prefer-standby`.
+ `primary` to have backups run always on primary instances,
+ `prefer-standby` to have backups run preferably on the most updated
+ standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ required:
+ - cluster
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the backup. This data may not be up to
+ date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without providing
+ explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ backupId:
+ description: The ID of the Barman backup
+ type: string
+ backupLabelFile:
+ description: Backup label file content as returned by Postgres in
+ case of online (hot) backups
+ format: byte
+ type: string
+ backupName:
+ description: The Name of the Barman backup
+ type: string
+ beginLSN:
+ description: The starting xlog
+ type: string
+ beginWal:
+ description: The starting WAL
+ type: string
+ commandError:
+ description: The backup command output in case of error
+ type: string
+ commandOutput:
+ description: Unused. Retained for compatibility with old versions.
+ type: string
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data. This may not be populated in case of errors.
+ type: string
+ encryption:
+ description: Encryption method required to S3 API
+ type: string
+ endLSN:
+ description: The ending xlog
+ type: string
+ endWal:
+ description: The ending WAL
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive.
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ error:
+ description: The detected error
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google Cloud
+ Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud Storage JSON
+ file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ instanceID:
+ description: Information to identify the instance where the backup
+ has been taken from
+ properties:
+ ContainerID:
+ description: The container ID
+ type: string
+ podName:
+ description: The pod name
+ type: string
+ type: object
+ method:
+ description: The backup method being used
+ type: string
+ online:
+ description: Whether the backup was online/hot (`true`) or offline/cold
+ (`false`)
+ type: boolean
+ phase:
+ description: The last backup status
+ type: string
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without providing
+ explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing the region
+ name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ snapshotBackupStatus:
+ description: Status of the volumeSnapshot backup
+ properties:
+ elements:
+ description: The elements list, populated with the gathered volume
+ snapshots
+ items:
+ description: BackupSnapshotElementStatus is a volume snapshot
+ that is part of a volume snapshot method backup
+ properties:
+ name:
+ description: Name is the snapshot resource name
+ type: string
+ tablespaceName:
+ description: |-
+ TablespaceName is the name of the snapshotted tablespace. Only set
+ when type is PG_TABLESPACE
+ type: string
+ type:
+ description: Type is tho role of the snapshot in the cluster,
+ such as PG_DATA, PG_WAL and PG_TABLESPACE
+ type: string
+ required:
+ - name
+ - type
+ type: object
+ type: array
+ type: object
+ startedAt:
+ description: When the backup was started
+ format: date-time
+ type: string
+ stoppedAt:
+ description: When the backup was terminated
+ format: date-time
+ type: string
+ tablespaceMapFile:
+ description: Tablespace map file content as returned by Postgres in
+ case of online (hot) backups
+ format: byte
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.15.0
+ name: clusterimagecatalogs.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: ClusterImageCatalog
+ listKind: ClusterImageCatalogList
+ plural: clusterimagecatalogs
+ singular: clusterimagecatalog
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ClusterImageCatalog is the Schema for the clusterimagecatalogs
+ API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the ClusterImageCatalog.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ images:
+ description: List of CatalogImages available in the catalog
+ items:
+ description: CatalogImage defines the image and major version
+ properties:
+ image:
+ description: The image reference
+ type: string
+ major:
+ description: The PostgreSQL major version of the image. Must
+ be unique within the catalog.
+ minimum: 10
+ type: integer
+ required:
+ - image
+ - major
+ type: object
+ maxItems: 8
+ minItems: 1
+ type: array
+ x-kubernetes-validations:
+ - message: Images must have unique major versions
+ rule: self.all(e, self.filter(f, f.major==e.major).size() == 1)
+ required:
+ - images
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.15.0
+ name: clusters.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Cluster
+ listKind: ClusterList
+ plural: clusters
+ singular: cluster
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Number of instances
+ jsonPath: .status.instances
+ name: Instances
+ type: integer
+ - description: Number of ready instances
+ jsonPath: .status.readyInstances
+ name: Ready
+ type: integer
+ - description: Cluster current status
+ jsonPath: .status.phase
+ name: Status
+ type: string
+ - description: Primary pod
+ jsonPath: .status.currentPrimary
+ name: Primary
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Cluster is the Schema for the PostgreSQL API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the cluster.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ affinity:
+ description: Affinity/Anti-affinity rules for Pods
+ properties:
+ additionalPodAffinity:
+ description: AdditionalPodAffinity allows to specify pod affinity
+ terms to be passed to all the cluster's pods.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ additionalPodAntiAffinity:
+ description: |-
+ AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated
+ by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ enablePodAntiAffinity:
+ description: |-
+ Activates anti-affinity for the pods. The operator will define pods
+ anti-affinity unless this field is explicitly set to false
+ type: boolean
+ nodeAffinity:
+ description: |-
+ NodeAffinity describes node affinity scheduling rules for the pod.
+ More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the
+ corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching the corresponding
+ nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms.
+ The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: |-
+ NodeSelector is map of key-value pairs used to define the nodes on which
+ the pods can run.
+ More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ type: object
+ podAntiAffinityType:
+ description: |-
+ PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be
+ considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or
+ "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are
+ added if all the existing nodes don't match the required pod anti-affinity rule.
+ More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ type: string
+ tolerations:
+ description: |-
+ Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run
+ on tainted nodes.
+ More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologyKey:
+ description: |-
+ TopologyKey to use for anti-affinity configuration. See k8s documentation
+ for more info on that
+ type: string
+ type: object
+ backup:
+ description: The configuration to be used for backups
+ properties:
+ barmanObjectStore:
+ description: The configuration for the barman-cloud tool suite
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure
+ Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without
+ providing explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ data:
+ description: |-
+ The configuration to be used to backup the data files
+ When not defined, base backups files will be stored uncompressed and may
+ be unencrypted in the object store, according to the bucket default
+ policy.
+ properties:
+ additionalCommandArgs:
+ description: |-
+ AdditionalCommandArgs represents additional arguments that can be appended
+ to the 'barman-cloud-backup' command-line invocation. These arguments
+ provide flexibility to customize the backup process further according to
+ specific requirements or configurations.
+
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-backup' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a backup file (a tar file per tablespace) while streaming it
+ to the object store. Available options are empty string (no
+ compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ jobs:
+ description: |-
+ The number of parallel jobs to be used to upload the backup, defaults
+ to 2
+ format: int32
+ minimum: 1
+ type: integer
+ type: object
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data
+ minLength: 1
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google
+ Cloud Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud Storage
+ JSON file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ historyTags:
+ additionalProperties:
+ type: string
+ description: |-
+ HistoryTags is a list of key value pairs that will be passed to the
+ Barman --history-tags option.
+ type: object
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without
+ providing explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing the
+ region name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: |-
+ Tags is a list of key value pairs that will be passed to the
+ Barman --tags option.
+ type: object
+ wal:
+ description: |-
+ The configuration for the backup of the WAL stream.
+ When not defined, WAL files will be stored uncompressed and may be
+ unencrypted in the object store, according to the bucket default policy.
+ properties:
+ compression:
+ description: |-
+ Compress a WAL file before sending it to the object store. Available
+ options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ maxParallel:
+ description: |-
+ Number of WAL files to be either archived in parallel (when the
+ PostgreSQL instance is archiving to a backup object store) or
+ restored in parallel (when a PostgreSQL standby is fetching WAL
+ files from a recovery object store). If not specified, WAL files
+ will be processed one at a time. It accepts a positive integer as a
+ value - with 1 being the minimum accepted value.
+ minimum: 1
+ type: integer
+ type: object
+ required:
+ - destinationPath
+ type: object
+ retentionPolicy:
+ description: |-
+ RetentionPolicy is the retention policy to be used for backups
+ and WALs (i.e. '60d'). The retention policy is expressed in the form
+ of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` -
+ days, weeks, months.
+ It's currently only applicable when using the BarmanObjectStore method.
+ pattern: ^[1-9][0-9]*[dwm]$
+ type: string
+ target:
+ default: prefer-standby
+ description: |-
+ The policy to decide which instance should perform backups. Available
+ options are empty string, which will default to `prefer-standby` policy,
+ `primary` to have backups run always on primary instances, `prefer-standby`
+ to have backups run preferably on the most updated standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ volumeSnapshot:
+ description: VolumeSnapshot provides the configuration for the
+ execution of volume snapshot backups.
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations key-value pairs that will be added
+ to .metadata.annotations snapshot resources.
+ type: object
+ className:
+ description: |-
+ ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim.
+ It is the default class for the other types if no specific class is present
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels are key-value pairs that will be added
+ to .metadata.labels snapshot resources.
+ type: object
+ online:
+ default: true
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ type: boolean
+ onlineConfiguration:
+ default:
+ immediateCheckpoint: false
+ waitForArchive: true
+ description: Configuration parameters to control the online/hot
+ backup with volume snapshots
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ snapshotOwnerReference:
+ default: none
+ description: SnapshotOwnerReference indicates the type of
+ owner reference the snapshot should have
+ enum:
+ - none
+ - cluster
+ - backup
+ type: string
+ tablespaceClassName:
+ additionalProperties:
+ type: string
+ description: |-
+ TablespaceClassName specifies the Snapshot Class to be used for the tablespaces.
+ defaults to the PGDATA Snapshot Class, if set
+ type: object
+ walClassName:
+ description: WalClassName specifies the Snapshot Class to
+ be used for the PG_WAL PersistentVolumeClaim.
+ type: string
+ type: object
+ type: object
+ bootstrap:
+ description: Instructions to bootstrap this cluster
+ properties:
+ initdb:
+ description: Bootstrap the cluster via initdb
+ properties:
+ dataChecksums:
+ description: |-
+ Whether the `-k` option should be passed to initdb,
+ enabling checksums on data pages (default: `false`)
+ type: boolean
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ encoding:
+ description: The value to be passed as option `--encoding`
+ for initdb (default:`UTF8`)
+ type: string
+ import:
+ description: |-
+ Bootstraps the new cluster by importing data from an existing PostgreSQL
+ instance using logical backup (`pg_dump` and `pg_restore`)
+ properties:
+ databases:
+ description: The databases to import
+ items:
+ type: string
+ type: array
+ postImportApplicationSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the application
+ database right after is imported - to be used with extreme care
+ (by default empty). Only available in microservice type.
+ items:
+ type: string
+ type: array
+ roles:
+ description: The roles to import
+ items:
+ type: string
+ type: array
+ schemaOnly:
+ description: |-
+ When set to true, only the `pre-data` and `post-data` sections of
+ `pg_restore` are invoked, avoiding data import. Default: `false`.
+ type: boolean
+ source:
+ description: The source of the import
+ properties:
+ externalCluster:
+ description: The name of the externalCluster used
+ for import
+ type: string
+ required:
+ - externalCluster
+ type: object
+ type:
+ description: The import type. Can be `microservice` or
+ `monolith`.
+ enum:
+ - microservice
+ - monolith
+ type: string
+ required:
+ - databases
+ - source
+ - type
+ type: object
+ localeCType:
+ description: The value to be passed as option `--lc-ctype`
+ for initdb (default:`C`)
+ type: string
+ localeCollate:
+ description: The value to be passed as option `--lc-collate`
+ for initdb (default:`C`)
+ type: string
+ options:
+ description: |-
+ The list of options that must be passed to initdb when creating the cluster.
+ Deprecated: This could lead to inconsistent configurations,
+ please use the explicit provided parameters instead.
+ If defined, explicit values will be ignored.
+ items:
+ type: string
+ type: array
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ postInitApplicationSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the application
+ database right after is created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ postInitApplicationSQLRefs:
+ description: |-
+ PostInitApplicationSQLRefs points references to ConfigMaps or Secrets which
+ contain SQL files, the general implementation order to these references is
+ from all Secrets to all ConfigMaps, and inside Secrets or ConfigMaps,
+ the implementation order is same as the order of each array
+ (by default empty)
+ properties:
+ configMapRefs:
+ description: ConfigMapRefs holds a list of references
+ to ConfigMaps
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ secretRefs:
+ description: SecretRefs holds a list of references to
+ Secrets
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ type: object
+ postInitSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser immediately
+ after the cluster has been created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ postInitTemplateSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the `template1`
+ after the cluster has been created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ walSegmentSize:
+ description: |-
+ The value in megabytes (1 to 1024) to be passed to the `--wal-segsize`
+ option for initdb (default: empty, resulting in PostgreSQL default: 16MB)
+ maximum: 1024
+ minimum: 1
+ type: integer
+ type: object
+ pg_basebackup:
+ description: |-
+ Bootstrap the cluster taking a physical backup of another compatible
+ PostgreSQL instance
+ properties:
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ source:
+ description: The name of the server of which we need to take
+ a physical backup
+ minLength: 1
+ type: string
+ required:
+ - source
+ type: object
+ recovery:
+ description: Bootstrap the cluster from a backup
+ properties:
+ backup:
+ description: |-
+ The backup object containing the physical base backup from which to
+ initiate the recovery procedure.
+ Mutually exclusive with `source` and `volumeSnapshots`.
+ properties:
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive.
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ recoveryTarget:
+ description: |-
+ By default, the recovery process applies all the available
+ WAL files in the archive (full recovery). However, you can also
+ end the recovery as soon as a consistent state is reached or
+ recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object,
+ as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...).
+ More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET
+ properties:
+ backupID:
+ description: |-
+ The ID of the backup from which to start the recovery process.
+ If empty (default) the operator will automatically detect the backup
+ based on targetTime or targetLSN if specified. Otherwise use the
+ latest available backup in chronological order.
+ type: string
+ exclusive:
+ description: |-
+ Set the target to be exclusive. If omitted, defaults to false, so that
+ in Postgres, `recovery_target_inclusive` will be true
+ type: boolean
+ targetImmediate:
+ description: End recovery as soon as a consistent state
+ is reached
+ type: boolean
+ targetLSN:
+ description: The target LSN (Log Sequence Number)
+ type: string
+ targetName:
+ description: |-
+ The target name (to be previously created
+ with `pg_create_restore_point`)
+ type: string
+ targetTLI:
+ description: The target timeline ("latest" or a positive
+ integer)
+ type: string
+ targetTime:
+ description: The target time as a timestamp in the RFC3339
+ standard
+ type: string
+ targetXID:
+ description: The target transaction ID
+ type: string
+ type: object
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ source:
+ description: |-
+ The external cluster whose backup we will restore. This is also
+ used as the name of the folder under which the backup is stored,
+ so it must be set to the name of the source cluster
+ Mutually exclusive with `backup`.
+ type: string
+ volumeSnapshots:
+ description: |-
+ The static PVC data source(s) from which to initiate the
+ recovery procedure. Currently supporting `VolumeSnapshot`
+ and `PersistentVolumeClaim` resources that map an existing
+ PVC group, compatible with CloudNativePG, and taken with
+ a cold backup copy on a fenced Postgres instance (limitation
+ which will be removed in the future when online backup
+ will be implemented).
+ Mutually exclusive with `backup`.
+ properties:
+ storage:
+ description: Configuration of the storage of the instances
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ tablespaceStorage:
+ additionalProperties:
+ description: |-
+ TypedLocalObjectReference contains enough information to let you locate the
+ typed referenced object inside the same namespace.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ description: Configuration of the storage for PostgreSQL
+ tablespaces
+ type: object
+ walStorage:
+ description: Configuration of the storage for PostgreSQL
+ WAL (Write-Ahead Log)
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - storage
+ type: object
+ type: object
+ type: object
+ certificates:
+ description: The configuration for the CA and related certificates
+ properties:
+ clientCASecret:
+ description: |-
+ The secret containing the Client CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates,
+ used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided,
+ this can be omitted.
+ type: string
+ replicationTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+ the `streaming_replica` user.
+ If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be
+ created using the provided CA.
+ type: string
+ serverAltDNSNames:
+ description: The list of the server alternative DNS names to be
+ added to the generated server TLS certificates, when required.
+ items:
+ type: string
+ type: array
+ serverCASecret:
+ description: |-
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate,
+ used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided,
+ this can be omitted.
+ type: string
+ serverTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely.
+ If not defined, ServerCASecret must provide also `ca.key` and a new secret will be
+ created using the provided CA.
+ type: string
+ type: object
+ description:
+ description: Description of this PostgreSQL cluster
+ type: string
+ enablePDB:
+ default: true
+ description: |-
+ Manage the `PodDisruptionBudget` resources within the cluster. When
+ configured as `true` (default setting), the pod disruption budgets
+ will safeguard the primary node from being terminated. Conversely,
+ setting it to `false` will result in the absence of any
+ `PodDisruptionBudget` resource, permitting the shutdown of all nodes
+ hosting the PostgreSQL cluster. This latter configuration is
+ advisable for any PostgreSQL cluster employed for
+ development/staging purposes.
+ type: boolean
+ enableSuperuserAccess:
+ default: false
+ description: |-
+ When this option is enabled, the operator will use the `SuperuserSecret`
+ to update the `postgres` user password (if the secret is
+ not present, the operator will automatically create one). When this
+ option is disabled, the operator will ignore the `SuperuserSecret` content, delete
+ it when automatically created, and then blank the password of the `postgres`
+ user by setting it to `NULL`. Disabled by default.
+ type: boolean
+ env:
+ description: |-
+ Env follows the Env format to pass environment variables
+ to the pods created in the cluster
+ items:
+ description: EnvVar represents an environment variable present in
+ a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value. Cannot
+ be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the exposed
+ resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ description: |-
+ EnvFrom follows the EnvFrom format to pass environment variables
+ sources to the pods to be used by Env
+ items:
+ description: EnvFromSource represents the source of a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the ConfigMap must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend to each key in
+ the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ ephemeralVolumeSource:
+ description: EphemeralVolumeSource allows the user to configure the
+ source of ephemeral volumes.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to
+ consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the
+ PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ ephemeralVolumesSizeLimit:
+ description: |-
+ EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral
+ volumes
+ properties:
+ shm:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Shm is the size limit of the shared memory volume
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ temporaryData:
+ anyOf:
+ - type: integer
+ - type: string
+ description: TemporaryData is the size limit of the temporary
+ data volume
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ externalClusters:
+ description: The list of external clusters which are used in the configuration
+ items:
+ description: |-
+ ExternalCluster represents the connection parameters to an
+ external cluster which is used in the other sections of the configuration
+ properties:
+ barmanObjectStore:
+ description: The configuration for the barman-cloud tool suite
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure
+ Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without
+ providing explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ data:
+ description: |-
+ The configuration to be used to backup the data files
+ When not defined, base backups files will be stored uncompressed and may
+ be unencrypted in the object store, according to the bucket default
+ policy.
+ properties:
+ additionalCommandArgs:
+ description: |-
+ AdditionalCommandArgs represents additional arguments that can be appended
+ to the 'barman-cloud-backup' command-line invocation. These arguments
+ provide flexibility to customize the backup process further according to
+ specific requirements or configurations.
+
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-backup' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a backup file (a tar file per tablespace) while streaming it
+ to the object store. Available options are empty string (no
+ compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ jobs:
+ description: |-
+ The number of parallel jobs to be used to upload the backup, defaults
+ to 2
+ format: int32
+ minimum: 1
+ type: integer
+ type: object
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data
+ minLength: 1
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google
+ Cloud Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud
+ Storage JSON file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ historyTags:
+ additionalProperties:
+ type: string
+ description: |-
+ HistoryTags is a list of key value pairs that will be passed to the
+ Barman --history-tags option.
+ type: object
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without
+ providing explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing
+ the region name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: |-
+ Tags is a list of key value pairs that will be passed to the
+ Barman --tags option.
+ type: object
+ wal:
+ description: |-
+ The configuration for the backup of the WAL stream.
+ When not defined, WAL files will be stored uncompressed and may be
+ unencrypted in the object store, according to the bucket default policy.
+ properties:
+ compression:
+ description: |-
+ Compress a WAL file before sending it to the object store. Available
+ options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ maxParallel:
+ description: |-
+ Number of WAL files to be either archived in parallel (when the
+ PostgreSQL instance is archiving to a backup object store) or
+ restored in parallel (when a PostgreSQL standby is fetching WAL
+ files from a recovery object store). If not specified, WAL files
+ will be processed one at a time. It accepts a positive integer as a
+ value - with 1 being the minimum accepted value.
+ minimum: 1
+ type: integer
+ type: object
+ required:
+ - destinationPath
+ type: object
+ connectionParameters:
+ additionalProperties:
+ type: string
+ description: The list of connection parameters, such as dbname,
+ host, username, etc
+ type: object
+ name:
+ description: The server name, required
+ type: string
+ password:
+ description: |-
+ The reference to the password to be used to connect to the server.
+ If a password is provided, CloudNativePG creates a PostgreSQL
+ passfile at `/controller/external/NAME/pass` (where "NAME" is the
+ cluster's name). This passfile is automatically referenced in the
+ connection string when establishing a connection to the remote
+ PostgreSQL server from the current PostgreSQL `Cluster`. This ensures
+ secure and efficient password management for external clusters.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ sslCert:
+ description: |-
+ The reference to an SSL certificate to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ sslKey:
+ description: |-
+ The reference to an SSL private key to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ sslRootCert:
+ description: |-
+ The reference to an SSL CA public key to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - name
+ type: object
+ type: array
+ failoverDelay:
+ default: 0
+ description: |-
+ The amount of time (in seconds) to wait before triggering a failover
+ after the primary PostgreSQL instance in the cluster was detected
+ to be unhealthy
+ format: int32
+ type: integer
+ imageCatalogRef:
+ description: Defines the major PostgreSQL version we want to use within
+ an ImageCatalog
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ major:
+ description: The major version of PostgreSQL we want to use from
+ the ImageCatalog
+ type: integer
+ x-kubernetes-validations:
+ - message: Major is immutable
+ rule: self == oldSelf
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - major
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ x-kubernetes-validations:
+ - message: Only image catalogs are supported
+ rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog'
+ - message: Only image catalogs are supported
+ rule: self.apiGroup == 'postgresql.cnpg.io'
+ imageName:
+ description: |-
+ Name of the container image, supporting both tags (`:`)
+ and digests for deterministic and repeatable deployments
+ (`:@sha256:`)
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of `Always`, `Never` or `IfNotPresent`.
+ If not defined, it defaults to `IfNotPresent`.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ imagePullSecrets:
+ description: The list of pull secrets to be used to pull the images
+ items:
+ description: |-
+ LocalObjectReference contains enough information to let you locate a
+ local object with a known type inside the same namespace
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ inheritedMetadata:
+ description: Metadata that will be inherited by all objects related
+ to the Cluster
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ instances:
+ default: 1
+ description: Number of instances required in the cluster
+ minimum: 1
+ type: integer
+ livenessProbeTimeout:
+ description: |-
+ LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance
+ to successfully respond to the liveness probe (default 30).
+ The Liveness probe failure threshold is derived from this value using the formula:
+ ceiling(livenessProbe / 10).
+ format: int32
+ type: integer
+ logLevel:
+ default: info
+ description: 'The instances'' log level, one of the following values:
+ error, warning, info (default), debug, trace'
+ enum:
+ - error
+ - warning
+ - info
+ - debug
+ - trace
+ type: string
+ managed:
+ description: The configuration that is used by the portions of PostgreSQL
+ that are managed by the instance manager
+ properties:
+ roles:
+ description: Database roles managed by the `Cluster`
+ items:
+ description: |-
+ RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role
+ with the additional field Ensure specifying whether to ensure the presence or
+ absence of the role in the database
+
+
+ The defaults of the CREATE ROLE command are applied
+ Reference: https://www.postgresql.org/docs/current/sql-createrole.html
+ properties:
+ bypassrls:
+ description: |-
+ Whether a role bypasses every row-level security (RLS) policy.
+ Default is `false`.
+ type: boolean
+ comment:
+ description: Description of the role
+ type: string
+ connectionLimit:
+ default: -1
+ description: |-
+ If the role can log in, this specifies how many concurrent
+ connections the role can make. `-1` (the default) means no limit.
+ format: int64
+ type: integer
+ createdb:
+ description: |-
+ When set to `true`, the role being defined will be allowed to create
+ new databases. Specifying `false` (default) will deny a role the
+ ability to create databases.
+ type: boolean
+ createrole:
+ description: |-
+ Whether the role will be permitted to create, alter, drop, comment
+ on, change the security label for, and grant or revoke membership in
+ other roles. Default is `false`.
+ type: boolean
+ disablePassword:
+ description: DisablePassword indicates that a role's password
+ should be set to NULL in Postgres
+ type: boolean
+ ensure:
+ default: present
+ description: Ensure the role is `present` or `absent` -
+ defaults to "present"
+ enum:
+ - present
+ - absent
+ type: string
+ inRoles:
+ description: |-
+ List of one or more existing roles to which this role will be
+ immediately added as a new member. Default empty.
+ items:
+ type: string
+ type: array
+ inherit:
+ default: true
+ description: |-
+ Whether a role "inherits" the privileges of roles it is a member of.
+ Defaults is `true`.
+ type: boolean
+ login:
+ description: |-
+ Whether the role is allowed to log in. A role having the `login`
+ attribute can be thought of as a user. Roles without this attribute
+ are useful for managing database privileges, but are not users in
+ the usual sense of the word. Default is `false`.
+ type: boolean
+ name:
+ description: Name of the role
+ type: string
+ passwordSecret:
+ description: |-
+ Secret containing the password of the role (if present)
+ If null, the password will be ignored unless DisablePassword is set
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ replication:
+ description: |-
+ Whether a role is a replication role. A role must have this
+ attribute (or be a superuser) in order to be able to connect to the
+ server in replication mode (physical or logical replication) and in
+ order to be able to create or drop replication slots. A role having
+ the `replication` attribute is a very highly privileged role, and
+ should only be used on roles actually used for replication. Default
+ is `false`.
+ type: boolean
+ superuser:
+ description: |-
+ Whether the role is a `superuser` who can override all access
+ restrictions within the database - superuser status is dangerous and
+ should be used only when really needed. You must yourself be a
+ superuser to create a new superuser. Defaults is `false`.
+ type: boolean
+ validUntil:
+ description: |-
+ Date and time after which the role's password is no longer valid.
+ When omitted, the password will never expire (default).
+ format: date-time
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ maxSyncReplicas:
+ default: 0
+ description: |-
+ The target value for the synchronous replication quorum, that can be
+ decreased if the number of ready standbys is lower than this.
+ Undefined or 0 disable synchronous replication.
+ minimum: 0
+ type: integer
+ minSyncReplicas:
+ default: 0
+ description: |-
+ Minimum number of instances required in synchronous replication with the
+ primary. Undefined or 0 allow writes to complete when no standby is
+ available.
+ minimum: 0
+ type: integer
+ monitoring:
+ description: The configuration of the monitoring infrastructure of
+ this cluster
+ properties:
+ customQueriesConfigMap:
+ description: The list of config maps containing the custom queries
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ customQueriesSecret:
+ description: The list of secrets containing the custom queries
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ disableDefaultQueries:
+ default: false
+ description: |-
+ Whether the default queries should be injected.
+ Set it to `true` if you don't want to inject default queries into the cluster.
+ Default: false.
+ type: boolean
+ enablePodMonitor:
+ default: false
+ description: Enable or disable the `PodMonitor`
+ type: boolean
+ podMonitorMetricRelabelings:
+ description: The list of metric relabelings for the `PodMonitor`.
+ Applied to samples before ingestion.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ podMonitorRelabelings:
+ description: The list of relabelings for the `PodMonitor`. Applied
+ to samples before scraping.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ type: object
+ nodeMaintenanceWindow:
+ description: Define a maintenance window for the Kubernetes nodes
+ properties:
+ inProgress:
+ default: false
+ description: Is there a node maintenance activity in progress?
+ type: boolean
+ reusePVC:
+ default: true
+ description: |-
+ Reuse the existing PVC (wait for the node to come
+ up again) or not (recreate it elsewhere - when `instances` >1)
+ type: boolean
+ type: object
+ plugins:
+ description: |-
+ The plugins configuration, containing
+ any plugin to be loaded with the corresponding configuration
+ items:
+ description: |-
+ PluginConfiguration specifies a plugin that need to be loaded for this
+ cluster to be reconciled
+ properties:
+ name:
+ description: Name is the plugin name
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is the configuration of the plugin
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ postgresGID:
+ default: 26
+ description: The GID of the `postgres` user inside the image, defaults
+ to `26`
+ format: int64
+ type: integer
+ postgresUID:
+ default: 26
+ description: The UID of the `postgres` user inside the image, defaults
+ to `26`
+ format: int64
+ type: integer
+ postgresql:
+ description: Configuration of the PostgreSQL server
+ properties:
+ enableAlterSystem:
+ description: |-
+ If this parameter is true, the user will be able to invoke `ALTER SYSTEM`
+ on this CloudNativePG Cluster.
+ This should only be used for debugging and troubleshooting.
+ Defaults to false.
+ type: boolean
+ ldap:
+ description: Options to specify LDAP configuration
+ properties:
+ bindAsAuth:
+ description: Bind as authentication configuration
+ properties:
+ prefix:
+ description: Prefix for the bind authentication option
+ type: string
+ suffix:
+ description: Suffix for the bind authentication option
+ type: string
+ type: object
+ bindSearchAuth:
+ description: Bind+Search authentication configuration
+ properties:
+ baseDN:
+ description: Root DN to begin the user search
+ type: string
+ bindDN:
+ description: DN of the user to bind to the directory
+ type: string
+ bindPassword:
+ description: Secret with the password for the user to
+ bind to the directory
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ searchAttribute:
+ description: Attribute to match against the username
+ type: string
+ searchFilter:
+ description: Search filter to use when doing the search+bind
+ authentication
+ type: string
+ type: object
+ port:
+ description: LDAP server port
+ type: integer
+ scheme:
+ description: LDAP schema to be used, possible options are
+ `ldap` and `ldaps`
+ enum:
+ - ldap
+ - ldaps
+ type: string
+ server:
+ description: LDAP hostname or IP address
+ type: string
+ tls:
+ description: Set to 'true' to enable LDAP over TLS. 'false'
+ is default
+ type: boolean
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: PostgreSQL configuration options (postgresql.conf)
+ type: object
+ pg_hba:
+ description: |-
+ PostgreSQL Host Based Authentication rules (lines to be appended
+ to the pg_hba.conf file)
+ items:
+ type: string
+ type: array
+ pg_ident:
+ description: |-
+ PostgreSQL User Name Maps rules (lines to be appended
+ to the pg_ident.conf file)
+ items:
+ type: string
+ type: array
+ promotionTimeout:
+ description: |-
+ Specifies the maximum number of seconds to wait when promoting an instance to primary.
+ Default value is 40000000, greater than one year in seconds,
+ big enough to simulate an infinite timeout
+ format: int32
+ type: integer
+ shared_preload_libraries:
+ description: Lists of shared preload libraries to add to the default
+ ones
+ items:
+ type: string
+ type: array
+ syncReplicaElectionConstraint:
+ description: |-
+ Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be
+ set up.
+ properties:
+ enabled:
+ description: This flag enables the constraints for sync replicas
+ type: boolean
+ nodeLabelsAntiAffinity:
+ description: A list of node labels values to extract and compare
+ to evaluate if the pods reside in the same topology or not
+ items:
+ type: string
+ type: array
+ required:
+ - enabled
+ type: object
+ type: object
+ primaryUpdateMethod:
+ default: restart
+ description: |-
+ Method to follow to upgrade the primary server during a rolling
+ update procedure, after all replicas have been successfully updated:
+ it can be with a switchover (`switchover`) or in-place (`restart` - default)
+ enum:
+ - switchover
+ - restart
+ type: string
+ primaryUpdateStrategy:
+ default: unsupervised
+ description: |-
+ Deployment strategy to follow to upgrade the primary server during a rolling
+ update procedure, after all replicas have been successfully updated:
+ it can be automated (`unsupervised` - default) or manual (`supervised`)
+ enum:
+ - unsupervised
+ - supervised
+ type: string
+ priorityClassName:
+ description: |-
+ Name of the priority class which will be used in every generated Pod, if the PriorityClass
+ specified does not exist, the pod will not be able to schedule. Please refer to
+ https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
+ for more information
+ type: string
+ projectedVolumeTemplate:
+ description: |-
+ Template to be used to define projected volumes, projected volumes will be mounted
+ under `/projected` base folder
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: sources is the list of volume projections
+ items:
+ description: Projection that may be projected along with other
+ supported volume types
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from the volume root to write
+ the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about the configMap data
+ to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a
+ volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information about the downwardAPI
+ data to project
+ properties:
+ items:
+ description: Items is a list of DownwardAPIVolume file
+ items:
+ description: DownwardAPIVolumeFile represents information
+ to create the file containing the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of the
+ pod: only annotations, labels, name, namespace
+ and uid are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative
+ path name of the file to be created. Must not
+ be absolute or contain the ''..'' path. Must
+ be utf-8 encoded. The first item of the relative
+ path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for
+ volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description: secret information about the secret data to
+ project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a
+ volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: optional field specify whether the Secret
+ or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is information about the
+ serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ replica:
+ description: Replica cluster configuration
+ properties:
+ enabled:
+ description: |-
+ If replica mode is enabled, this cluster will be a replica of an
+ existing cluster. Replica cluster can be created from a recovery
+ object store or via streaming through pg_basebackup.
+ Refer to the Replica clusters page of the documentation for more information.
+ type: boolean
+ source:
+ description: The name of the external cluster which is the replication
+ origin
+ minLength: 1
+ type: string
+ required:
+ - enabled
+ - source
+ type: object
+ replicationSlots:
+ default:
+ highAvailability:
+ enabled: true
+ description: Replication slots management configuration
+ properties:
+ highAvailability:
+ default:
+ enabled: true
+ description: Replication slots for high availability configuration
+ properties:
+ enabled:
+ default: true
+ description: |-
+ If enabled (default), the operator will automatically manage replication slots
+ on the primary instance and use them in streaming replication
+ connections with all the standby instances that are part of the HA
+ cluster. If disabled, the operator will not take advantage
+ of replication slots in streaming connections with the replicas.
+ This feature also controls replication slots in replica cluster,
+ from the designated primary to its cascading replicas.
+ type: boolean
+ slotPrefix:
+ default: _cnpg_
+ description: |-
+ Prefix for replication slots managed by the operator for HA.
+ It may only contain lower case letters, numbers, and the underscore character.
+ This can only be set at creation time. By default set to `_cnpg_`.
+ pattern: ^[0-9a-z_]*$
+ type: string
+ type: object
+ synchronizeReplicas:
+ description: Configures the synchronization of the user defined
+ physical replication slots
+ properties:
+ enabled:
+ default: true
+ description: When set to true, every replication slot that
+ is on the primary is synchronized on each standby
+ type: boolean
+ excludePatterns:
+ description: List of regular expression patterns to match
+ the names of replication slots to be excluded (by default
+ empty)
+ items:
+ type: string
+ type: array
+ required:
+ - enabled
+ type: object
+ updateInterval:
+ default: 30
+ description: |-
+ Standby will update the status of the local replication slots
+ every `updateInterval` seconds (default 30).
+ minimum: 1
+ type: integer
+ type: object
+ resources:
+ description: |-
+ Resources requirements of every generated Pod. Please refer to
+ https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ for more information.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ schedulerName:
+ description: |-
+ If specified, the pod will be dispatched by specified Kubernetes
+ scheduler. If not specified, the pod will be dispatched by the default
+ scheduler. More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/
+ type: string
+ seccompProfile:
+ description: |-
+ The SeccompProfile applied to every Pod and Container.
+ Defaults to: `RuntimeDefault`
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ serviceAccountTemplate:
+ description: Configure the generation of the service account
+ properties:
+ metadata:
+ description: |-
+ Metadata are the metadata to be used for the generated
+ service account
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ type: object
+ required:
+ - metadata
+ type: object
+ smartShutdownTimeout:
+ default: 180
+ description: |-
+ The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete.
+ Make sure you reserve enough time for the operator to request a fast shutdown of Postgres
+ (that is: `stopDelay` - `smartShutdownTimeout`).
+ format: int32
+ type: integer
+ startDelay:
+ default: 3600
+ description: |-
+ The time in seconds that is allowed for a PostgreSQL instance to
+ successfully start up (default 3600).
+ The startup probe failure threshold is derived from this value using the formula:
+ ceiling(startDelay / 10).
+ format: int32
+ type: integer
+ stopDelay:
+ default: 1800
+ description: |-
+ The time in seconds that is allowed for a PostgreSQL instance to
+ gracefully shutdown (default 1800)
+ format: int32
+ type: integer
+ storage:
+ description: Configuration of the storage of the instances
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent Volume
+ Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to consider
+ for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ superuserSecret:
+ description: |-
+ The secret containing the superuser password. If not defined a new
+ secret will be created with a randomly generated password
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ switchoverDelay:
+ default: 3600
+ description: |-
+ The time in seconds that is allowed for a primary PostgreSQL instance
+ to gracefully shutdown during a switchover.
+ Default value is 3600 seconds (1 hour).
+ format: int32
+ type: integer
+ tablespaces:
+ description: The tablespaces configuration
+ items:
+ description: |-
+ TablespaceConfiguration is the configuration of a tablespace, and includes
+ the storage specification for the tablespace
+ properties:
+ name:
+ description: The name of the tablespace
+ type: string
+ owner:
+ description: Owner is the PostgreSQL user owning the tablespace
+ properties:
+ name:
+ type: string
+ type: object
+ storage:
+ description: The storage configuration for the tablespace
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent
+ Volume Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes
+ to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to
+ the PersistentVolume backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ temporary:
+ default: false
+ description: |-
+ When set to true, the tablespace will be added as a `temp_tablespaces`
+ entry in PostgreSQL, and will be available to automatically house temp
+ database objects, or other temporary files. Please refer to PostgreSQL
+ documentation for more information on the `temp_tablespaces` GUC.
+ type: boolean
+ required:
+ - name
+ - storage
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: |-
+ TopologySpreadConstraints specifies how to spread matching pods among the given topology.
+ More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching
+ pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ walStorage:
+ description: Configuration of the storage for PostgreSQL WAL (Write-Ahead
+ Log)
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent Volume
+ Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to consider
+ for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ required:
+ - instances
+ type: object
+ x-kubernetes-validations:
+ - message: imageName and imageCatalogRef are mutually exclusive
+ rule: '!(has(self.imageCatalogRef) && has(self.imageName))'
+ status:
+ description: |-
+ Most recently observed status of the cluster. This data may not be up
+ to date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ availableArchitectures:
+ description: AvailableArchitectures reports the available architectures
+ of a cluster
+ items:
+ description: AvailableArchitecture represents the state of a cluster's
+ architecture
+ properties:
+ goArch:
+ description: GoArch is the name of the executable architecture
+ type: string
+ hash:
+ description: Hash is the hash of the executable
+ type: string
+ required:
+ - goArch
+ - hash
+ type: object
+ type: array
+ azurePVCUpdateEnabled:
+ description: AzurePVCUpdateEnabled shows if the PVC online upgrade
+ is enabled for this cluster
+ type: boolean
+ certificates:
+ description: The configuration for the CA and related certificates,
+ initialized with defaults.
+ properties:
+ clientCASecret:
+ description: |-
+ The secret containing the Client CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates,
+ used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided,
+ this can be omitted.
+ type: string
+ expirations:
+ additionalProperties:
+ type: string
+ description: Expiration dates for all certificates.
+ type: object
+ replicationTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+ the `streaming_replica` user.
+ If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be
+ created using the provided CA.
+ type: string
+ serverAltDNSNames:
+ description: The list of the server alternative DNS names to be
+ added to the generated server TLS certificates, when required.
+ items:
+ type: string
+ type: array
+ serverCASecret:
+ description: |-
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate,
+ used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided,
+ this can be omitted.
+ type: string
+ serverTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely.
+ If not defined, ServerCASecret must provide also `ca.key` and a new secret will be
+ created using the provided CA.
+ type: string
+ type: object
+ cloudNativePGCommitHash:
+ description: The commit hash number of which this operator running
+ type: string
+ cloudNativePGOperatorHash:
+ description: The hash of the binary of the operator
+ type: string
+ conditions:
+ description: Conditions for cluster object
+ items:
+ description: "Condition contains details for one aspect of the current
+ state of this API Resource.\n---\nThis struct is intended for
+ direct use as an array at the field path .status.conditions. For
+ example,\n\n\n\ttype FooStatus struct{\n\t // Represents the
+ observations of a foo's current state.\n\t // Known .status.conditions.type
+ are: \"Available\", \"Progressing\", and \"Degraded\"\n\t //
+ +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t
+ \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\"
+ patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t
+ \ // other fields\n\t}"
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: |-
+ type of condition in CamelCase or in foo.example.com/CamelCase.
+ ---
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
+ useful (see .node.status.conditions), the ability to deconflict is important.
+ The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ configMapResourceVersion:
+ description: |-
+ The list of resource versions of the configmaps,
+ managed by the operator. Every change here is done in the
+ interest of the instance manager, which will refresh the
+ configmap data
+ properties:
+ metrics:
+ additionalProperties:
+ type: string
+ description: |-
+ A map with the versions of all the config maps used to pass metrics.
+ Map keys are the config map names, map values are the versions
+ type: object
+ type: object
+ currentPrimary:
+ description: Current primary instance
+ type: string
+ currentPrimaryFailingSinceTimestamp:
+ description: |-
+ The timestamp when the primary was detected to be unhealthy
+ This field is reported when `.spec.failoverDelay` is populated or during online upgrades
+ type: string
+ currentPrimaryTimestamp:
+ description: The timestamp when the last actual promotion to primary
+ has occurred
+ type: string
+ danglingPVC:
+ description: |-
+ List of all the PVCs created by this cluster and still available
+ which are not attached to a Pod
+ items:
+ type: string
+ type: array
+ firstRecoverabilityPoint:
+ description: |-
+ The first recoverability point, stored as a date in RFC3339 format.
+ This field is calculated from the content of FirstRecoverabilityPointByMethod
+ type: string
+ firstRecoverabilityPointByMethod:
+ additionalProperties:
+ format: date-time
+ type: string
+ description: The first recoverability point, stored as a date in RFC3339
+ format, per backup method type
+ type: object
+ healthyPVC:
+ description: List of all the PVCs not dangling nor initializing
+ items:
+ type: string
+ type: array
+ image:
+ description: Image contains the image name used by the pods
+ type: string
+ initializingPVC:
+ description: List of all the PVCs that are being initialized by this
+ cluster
+ items:
+ type: string
+ type: array
+ instanceNames:
+ description: List of instance names in the cluster
+ items:
+ type: string
+ type: array
+ instances:
+ description: The total number of PVC Groups detected in the cluster.
+ It may differ from the number of existing instance pods.
+ type: integer
+ instancesReportedState:
+ additionalProperties:
+ description: InstanceReportedState describes the last reported state
+ of an instance during a reconciliation loop
+ properties:
+ isPrimary:
+ description: indicates if an instance is the primary one
+ type: boolean
+ timeLineID:
+ description: indicates on which TimelineId the instance is
+ type: integer
+ required:
+ - isPrimary
+ type: object
+ description: The reported state of the instances during the last reconciliation
+ loop
+ type: object
+ instancesStatus:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: InstancesStatus indicates in which status the instances
+ are
+ type: object
+ jobCount:
+ description: How many Jobs have been created by this cluster
+ format: int32
+ type: integer
+ lastFailedBackup:
+ description: Stored as a date in RFC3339 format
+ type: string
+ lastSuccessfulBackup:
+ description: |-
+ Last successful backup, stored as a date in RFC3339 format
+ This field is calculated from the content of LastSuccessfulBackupByMethod
+ type: string
+ lastSuccessfulBackupByMethod:
+ additionalProperties:
+ format: date-time
+ type: string
+ description: Last successful backup, stored as a date in RFC3339 format,
+ per backup method type
+ type: object
+ latestGeneratedNode:
+ description: ID of the latest generated node (used to avoid node name
+ clashing)
+ type: integer
+ managedRolesStatus:
+ description: ManagedRolesStatus reports the state of the managed roles
+ in the cluster
+ properties:
+ byStatus:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: ByStatus gives the list of roles in each state
+ type: object
+ cannotReconcile:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: |-
+ CannotReconcile lists roles that cannot be reconciled in PostgreSQL,
+ with an explanation of the cause
+ type: object
+ passwordStatus:
+ additionalProperties:
+ description: PasswordState represents the state of the password
+ of a managed RoleConfiguration
+ properties:
+ resourceVersion:
+ description: the resource version of the password secret
+ type: string
+ transactionID:
+ description: the last transaction ID to affect the role
+ definition in PostgreSQL
+ format: int64
+ type: integer
+ type: object
+ description: PasswordStatus gives the last transaction id and
+ password secret version for each managed role
+ type: object
+ type: object
+ onlineUpdateEnabled:
+ description: OnlineUpdateEnabled shows if the online upgrade is enabled
+ inside the cluster
+ type: boolean
+ phase:
+ description: Current phase of the cluster
+ type: string
+ phaseReason:
+ description: Reason for the current phase
+ type: string
+ pluginStatus:
+ description: PluginStatus is the status of the loaded plugins
+ items:
+ description: PluginStatus is the status of a loaded plugin
+ properties:
+ backupCapabilities:
+ description: |-
+ BackupCapabilities are the list of capabilities of the
+ plugin regarding the Backup management
+ items:
+ type: string
+ type: array
+ capabilities:
+ description: |-
+ Capabilities are the list of capabilities of the
+ plugin
+ items:
+ type: string
+ type: array
+ name:
+ description: Name is the name of the plugin
+ type: string
+ operatorCapabilities:
+ description: |-
+ OperatorCapabilities are the list of capabilities of the
+ plugin regarding the reconciler
+ items:
+ type: string
+ type: array
+ version:
+ description: |-
+ Version is the version of the plugin loaded by the
+ latest reconciliation loop
+ type: string
+ walCapabilities:
+ description: |-
+ WALCapabilities are the list of capabilities of the
+ plugin regarding the WAL management
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - version
+ type: object
+ type: array
+ poolerIntegrations:
+ description: The integration needed by poolers referencing the cluster
+ properties:
+ pgBouncerIntegration:
+ description: PgBouncerIntegrationStatus encapsulates the needed
+ integration for the pgbouncer poolers referencing the cluster
+ properties:
+ secrets:
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ pvcCount:
+ description: How many PVCs have been created by this cluster
+ format: int32
+ type: integer
+ readService:
+ description: Current list of read pods
+ type: string
+ readyInstances:
+ description: The total number of ready instances in the cluster. It
+ is equal to the number of ready instance pods.
+ type: integer
+ resizingPVC:
+ description: List of all the PVCs that have ResizingPVC condition.
+ items:
+ type: string
+ type: array
+ secretsResourceVersion:
+ description: |-
+ The list of resource versions of the secrets
+ managed by the operator. Every change here is done in the
+ interest of the instance manager, which will refresh the
+ secret data
+ properties:
+ applicationSecretVersion:
+ description: The resource version of the "app" user secret
+ type: string
+ barmanEndpointCA:
+ description: The resource version of the Barman Endpoint CA if
+ provided
+ type: string
+ caSecretVersion:
+ description: Unused. Retained for compatibility with old versions.
+ type: string
+ clientCaSecretVersion:
+ description: The resource version of the PostgreSQL client-side
+ CA secret version
+ type: string
+ externalClusterSecretVersion:
+ additionalProperties:
+ type: string
+ description: The resource versions of the external cluster secrets
+ type: object
+ managedRoleSecretVersion:
+ additionalProperties:
+ type: string
+ description: The resource versions of the managed roles secrets
+ type: object
+ metrics:
+ additionalProperties:
+ type: string
+ description: |-
+ A map with the versions of all the secrets used to pass metrics.
+ Map keys are the secret names, map values are the versions
+ type: object
+ replicationSecretVersion:
+ description: The resource version of the "streaming_replica" user
+ secret
+ type: string
+ serverCaSecretVersion:
+ description: The resource version of the PostgreSQL server-side
+ CA secret version
+ type: string
+ serverSecretVersion:
+ description: The resource version of the PostgreSQL server-side
+ secret version
+ type: string
+ superuserSecretVersion:
+ description: The resource version of the "postgres" user secret
+ type: string
+ type: object
+ switchReplicaClusterStatus:
+ description: SwitchReplicaClusterStatus is the status of the switch
+ to replica cluster
+ properties:
+ inProgress:
+ description: InProgress indicates if there is an ongoing procedure
+ of switching a cluster to a replica cluster.
+ type: boolean
+ type: object
+ tablespacesStatus:
+ description: TablespacesStatus reports the state of the declarative
+ tablespaces in the cluster
+ items:
+ description: TablespaceState represents the state of a tablespace
+ in a cluster
+ properties:
+ error:
+ description: Error is the reconciliation error, if any
+ type: string
+ name:
+ description: Name is the name of the tablespace
+ type: string
+ owner:
+ description: Owner is the PostgreSQL user owning the tablespace
+ type: string
+ state:
+ description: State is the latest reconciliation state
+ type: string
+ required:
+ - name
+ - state
+ type: object
+ type: array
+ targetPrimary:
+ description: |-
+ Target primary instance, this is different from the previous one
+ during a switchover or a failover
+ type: string
+ targetPrimaryTimestamp:
+ description: The timestamp when the last request for a new primary
+ has occurred
+ type: string
+ timelineID:
+ description: The timeline of the Postgres cluster
+ type: integer
+ topology:
+ description: Instances topology.
+ properties:
+ instances:
+ additionalProperties:
+ additionalProperties:
+ type: string
+ description: PodTopologyLabels represent the topology of a Pod.
+ map[labelName]labelValue
+ type: object
+ description: Instances contains the pod topology of the instances
+ type: object
+ nodesUsed:
+ description: |-
+ NodesUsed represents the count of distinct nodes accommodating the instances.
+ A value of '1' suggests that all instances are hosted on a single node,
+ implying the absence of High Availability (HA). Ideally, this value should
+ be the same as the number of instances in the Postgres HA cluster, implying
+ shared nothing architecture on the compute side.
+ format: int32
+ type: integer
+ successfullyExtracted:
+ description: |-
+ SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors
+ in synchronous replica election in case of failures
+ type: boolean
+ type: object
+ unusablePVC:
+ description: List of all the PVCs that are unusable because another
+ PVC is missing
+ items:
+ type: string
+ type: array
+ writeService:
+ description: Current write pod
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ scale:
+ specReplicasPath: .spec.instances
+ statusReplicasPath: .status.instances
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.15.0
+ name: imagecatalogs.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: ImageCatalog
+ listKind: ImageCatalogList
+ plural: imagecatalogs
+ singular: imagecatalog
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ImageCatalog is the Schema for the imagecatalogs API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the ImageCatalog.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ images:
+ description: List of CatalogImages available in the catalog
+ items:
+ description: CatalogImage defines the image and major version
+ properties:
+ image:
+ description: The image reference
+ type: string
+ major:
+ description: The PostgreSQL major version of the image. Must
+ be unique within the catalog.
+ minimum: 10
+ type: integer
+ required:
+ - image
+ - major
+ type: object
+ maxItems: 8
+ minItems: 1
+ type: array
+ x-kubernetes-validations:
+ - message: Images must have unique major versions
+ rule: self.all(e, self.filter(f, f.major==e.major).size() == 1)
+ required:
+ - images
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.15.0
+ name: poolers.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Pooler
+ listKind: PoolerList
+ plural: poolers
+ singular: pooler
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.type
+ name: Type
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Pooler is the Schema for the poolers API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the Pooler.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ cluster:
+ description: |-
+ This is the cluster reference on which the Pooler will work.
+ Pooler name should never match with any cluster name within the same namespace.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ deploymentStrategy:
+ description: The deployment strategy to use for pgbouncer to replace
+ existing pods with new ones
+ properties:
+ rollingUpdate:
+ description: |-
+ Rolling update config params. Present only if DeploymentStrategyType =
+ RollingUpdate.
+ ---
+ TODO: Update this to follow our convention for oneOf, whatever we decide it
+ to be.
+ properties:
+ maxSurge:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of pods that can be scheduled above the desired number of
+ pods.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ This can not be 0 if MaxUnavailable is 0.
+ Absolute number is calculated from percentage by rounding up.
+ Defaults to 25%.
+ Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ the rolling update starts, such that the total number of old and new pods do not exceed
+ 130% of desired pods. Once old pods have been killed,
+ new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ at any time during the update is at most 130% of desired pods.
+ x-kubernetes-int-or-string: true
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of pods that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ Absolute number is calculated from percentage by rounding down.
+ This can not be 0 if MaxSurge is 0.
+ Defaults to 25%.
+ Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ that the total number of pods available at all times during the update is at
+ least 70% of desired pods.
+ x-kubernetes-int-or-string: true
+ type: object
+ type:
+ description: Type of deployment. Can be "Recreate" or "RollingUpdate".
+ Default is RollingUpdate.
+ type: string
+ type: object
+ instances:
+ default: 1
+ description: 'The number of replicas we want. Default: 1.'
+ format: int32
+ type: integer
+ monitoring:
+ description: The configuration of the monitoring infrastructure of
+ this pooler.
+ properties:
+ enablePodMonitor:
+ default: false
+ description: Enable or disable the `PodMonitor`
+ type: boolean
+ podMonitorMetricRelabelings:
+ description: The list of metric relabelings for the `PodMonitor`.
+ Applied to samples before ingestion.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ podMonitorRelabelings:
+ description: The list of relabelings for the `PodMonitor`. Applied
+ to samples before scraping.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ type: object
+ pgbouncer:
+ description: The PgBouncer configuration
+ properties:
+ authQuery:
+ description: |-
+ The query that will be used to download the hash of the password
+ of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)".
+ In case it is specified, also an AuthQuerySecret has to be specified and
+ no automatic CNPG Cluster integration will be triggered.
+ type: string
+ authQuerySecret:
+ description: |-
+ The credentials of the user that need to be used for the authentication
+ query. In case it is specified, also an AuthQuery
+ (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1")
+ has to be specified and no automatic CNPG Cluster integration will be triggered.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Additional parameters to be passed to PgBouncer - please check
+ the CNPG documentation for a list of options you can configure
+ type: object
+ paused:
+ default: false
+ description: |-
+ When set to `true`, PgBouncer will disconnect from the PostgreSQL
+ server, first waiting for all queries to complete, and pause all new
+ client connections until this value is set to `false` (default). Internally,
+ the operator calls PgBouncer's `PAUSE` and `RESUME` commands.
+ type: boolean
+ pg_hba:
+ description: |-
+ PostgreSQL Host Based Authentication rules (lines to be appended
+ to the pg_hba.conf file)
+ items:
+ type: string
+ type: array
+ poolMode:
+ default: session
+ description: 'The pool mode. Default: `session`.'
+ enum:
+ - session
+ - transaction
+ type: string
+ type: object
+ serviceTemplate:
+ description: Template for the Service to be created
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the service.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ allocateLoadBalancerNodePorts:
+ description: |-
+ allocateLoadBalancerNodePorts defines if NodePorts will be automatically
+ allocated for services with type LoadBalancer. Default is "true". It
+ may be set to "false" if the cluster load-balancer does not rely on
+ NodePorts. If the caller requests specific NodePorts (by specifying a
+ value), those requests will be respected, regardless of this field.
+ This field may only be set for services with type LoadBalancer and will
+ be cleared if the type is changed to any other type.
+ type: boolean
+ clusterIP:
+ description: |-
+ clusterIP is the IP address of the service and is usually assigned
+ randomly. If an address is specified manually, is in-range (as per
+ system configuration), and is not in use, it will be allocated to the
+ service; otherwise creation of the service will fail. This field may not
+ be changed through updates unless the type field is also being changed
+ to ExternalName (which requires this field to be blank) or the type
+ field is being changed from ExternalName (in which case this field may
+ optionally be specified, as describe above). Valid values are "None",
+ empty string (""), or a valid IP address. Setting this to "None" makes a
+ "headless service" (no virtual IP), which is useful when direct endpoint
+ connections are preferred and proxying is not required. Only applies to
+ types ClusterIP, NodePort, and LoadBalancer. If this field is specified
+ when creating a Service of type ExternalName, creation will fail. This
+ field will be wiped when updating a Service to type ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ clusterIPs:
+ description: |-
+ ClusterIPs is a list of IP addresses assigned to this service, and are
+ usually assigned randomly. If an address is specified manually, is
+ in-range (as per system configuration), and is not in use, it will be
+ allocated to the service; otherwise creation of the service will fail.
+ This field may not be changed through updates unless the type field is
+ also being changed to ExternalName (which requires this field to be
+ empty) or the type field is being changed from ExternalName (in which
+ case this field may optionally be specified, as describe above). Valid
+ values are "None", empty string (""), or a valid IP address. Setting
+ this to "None" makes a "headless service" (no virtual IP), which is
+ useful when direct endpoint connections are preferred and proxying is
+ not required. Only applies to types ClusterIP, NodePort, and
+ LoadBalancer. If this field is specified when creating a Service of type
+ ExternalName, creation will fail. This field will be wiped when updating
+ a Service to type ExternalName. If this field is not specified, it will
+ be initialized from the clusterIP field. If this field is specified,
+ clients must ensure that clusterIPs[0] and clusterIP have the same
+ value.
+
+
+ This field may hold a maximum of two entries (dual-stack IPs, in either order).
+ These IPs must correspond to the values of the ipFamilies field. Both
+ clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalIPs:
+ description: |-
+ externalIPs is a list of IP addresses for which nodes in the cluster
+ will also accept traffic for this service. These IPs are not managed by
+ Kubernetes. The user is responsible for ensuring that traffic arrives
+ at a node with this IP. A common example is external load-balancers
+ that are not part of the Kubernetes system.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ externalName:
+ description: |-
+ externalName is the external reference that discovery mechanisms will
+ return as an alias for this service (e.g. a DNS CNAME record). No
+ proxying will be involved. Must be a lowercase RFC-1123 hostname
+ (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
+ type: string
+ externalTrafficPolicy:
+ description: |-
+ externalTrafficPolicy describes how nodes distribute service traffic they
+ receive on one of the Service's "externally-facing" addresses (NodePorts,
+ ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure
+ the service in a way that assumes that external load balancers will take care
+ of balancing the service traffic between nodes, and so each node will deliver
+ traffic only to the node-local endpoints of the service, without masquerading
+ the client source IP. (Traffic mistakenly sent to a node with no endpoints will
+ be dropped.) The default value, "Cluster", uses the standard behavior of
+ routing to all endpoints evenly (possibly modified by topology and other
+ features). Note that traffic sent to an External IP or LoadBalancer IP from
+ within the cluster will always get "Cluster" semantics, but clients sending to
+ a NodePort from within the cluster may need to take traffic policy into account
+ when picking a node.
+ type: string
+ healthCheckNodePort:
+ description: |-
+ healthCheckNodePort specifies the healthcheck nodePort for the service.
+ This only applies when type is set to LoadBalancer and
+ externalTrafficPolicy is set to Local. If a value is specified, is
+ in-range, and is not in use, it will be used. If not specified, a value
+ will be automatically allocated. External systems (e.g. load-balancers)
+ can use this port to determine if a given node holds endpoints for this
+ service or not. If this field is specified when creating a Service
+ which does not need it, creation will fail. This field will be wiped
+ when updating a Service to no longer need it (e.g. changing type).
+ This field cannot be updated once set.
+ format: int32
+ type: integer
+ internalTrafficPolicy:
+ description: |-
+ InternalTrafficPolicy describes how nodes distribute service traffic they
+ receive on the ClusterIP. If set to "Local", the proxy will assume that pods
+ only want to talk to endpoints of the service on the same node as the pod,
+ dropping the traffic if there are no local endpoints. The default value,
+ "Cluster", uses the standard behavior of routing to all endpoints evenly
+ (possibly modified by topology and other features).
+ type: string
+ ipFamilies:
+ description: |-
+ IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
+ service. This field is usually assigned automatically based on cluster
+ configuration and the ipFamilyPolicy field. If this field is specified
+ manually, the requested family is available in the cluster,
+ and ipFamilyPolicy allows it, it will be used; otherwise creation of
+ the service will fail. This field is conditionally mutable: it allows
+ for adding or removing a secondary IP family, but it does not allow
+ changing the primary IP family of the Service. Valid values are "IPv4"
+ and "IPv6". This field only applies to Services of types ClusterIP,
+ NodePort, and LoadBalancer, and does apply to "headless" services.
+ This field will be wiped when updating a Service to type ExternalName.
+
+
+ This field may hold a maximum of two entries (dual-stack families, in
+ either order). These families must correspond to the values of the
+ clusterIPs field, if specified. Both clusterIPs and ipFamilies are
+ governed by the ipFamilyPolicy field.
+ items:
+ description: |-
+ IPFamily represents the IP Family (IPv4 or IPv6). This type is used
+ to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ipFamilyPolicy:
+ description: |-
+ IPFamilyPolicy represents the dual-stack-ness requested or required by
+ this Service. If there is no value provided, then this field will be set
+ to SingleStack. Services can be "SingleStack" (a single IP family),
+ "PreferDualStack" (two IP families on dual-stack configured clusters or
+ a single IP family on single-stack clusters), or "RequireDualStack"
+ (two IP families on dual-stack configured clusters, otherwise fail). The
+ ipFamilies and clusterIPs fields depend on the value of this field. This
+ field will be wiped when updating a service to type ExternalName.
+ type: string
+ loadBalancerClass:
+ description: |-
+ loadBalancerClass is the class of the load balancer implementation this Service belongs to.
+ If specified, the value of this field must be a label-style identifier, with an optional prefix,
+ e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
+ This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
+ balancer implementation is used, today this is typically done through the cloud provider integration,
+ but should apply for any default implementation. If set, it is assumed that a load balancer
+ implementation is watching for Services with a matching class. Any default load balancer
+ implementation (e.g. cloud providers) should ignore Services that set this field.
+ This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+ Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+ type: string
+ loadBalancerIP:
+ description: |-
+ Only applies to Service Type: LoadBalancer.
+ This feature depends on whether the underlying cloud-provider supports specifying
+ the loadBalancerIP when a load balancer is created.
+ This field will be ignored if the cloud-provider does not support the feature.
+ Deprecated: This field was under-specified and its meaning varies across implementations.
+ Using it is non-portable and it may not support dual-stack.
+ Users are encouraged to use implementation-specific annotations when available.
+ type: string
+ loadBalancerSourceRanges:
+ description: |-
+ If specified and supported by the platform, this will restrict traffic through the cloud-provider
+ load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+ cloud-provider does not support the feature."
+ More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ports:
+ description: |-
+ The list of ports that are exposed by this service.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ items:
+ description: ServicePort contains information on service's
+ port.
+ properties:
+ appProtocol:
+ description: |-
+ The application protocol for this port.
+ This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+ This field follows standard Kubernetes label syntax.
+ Valid values are either:
+
+
+ * Un-prefixed protocol names - reserved for IANA standard service names (as per
+ RFC-6335 and https://www.iana.org/assignments/service-names).
+
+
+ * Kubernetes-defined prefixed names:
+ * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+ * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+
+
+ * Other protocols should use implementation-defined prefixed names such as
+ mycompany.com/my-custom-protocol.
+ type: string
+ name:
+ description: |-
+ The name of this port within the service. This must be a DNS_LABEL.
+ All ports within a ServiceSpec must have unique names. When considering
+ the endpoints for a Service, this must match the 'name' field in the
+ EndpointPort.
+ Optional if only one ServicePort is defined on this service.
+ type: string
+ nodePort:
+ description: |-
+ The port on each node on which this service is exposed when type is
+ NodePort or LoadBalancer. Usually assigned by the system. If a value is
+ specified, in-range, and not in use it will be used, otherwise the
+ operation will fail. If not specified, a port will be allocated if this
+ Service requires one. If this field is specified when creating a
+ Service which does not need it, creation will fail. This field will be
+ wiped when updating a Service to no longer need it (e.g. changing type
+ from NodePort to ClusterIP).
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ format: int32
+ type: integer
+ port:
+ description: The port that will be exposed by this service.
+ format: int32
+ type: integer
+ protocol:
+ default: TCP
+ description: |-
+ The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+ Default is TCP.
+ type: string
+ targetPort:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the pods targeted by the service.
+ Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ If this is a string, it will be looked up as a named port in the
+ target Pod's container ports. If this is not specified, the value
+ of the 'port' field is used (an identity map).
+ This field is ignored for services with clusterIP=None, and should be
+ omitted or set equal to the 'port' field.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - port
+ - protocol
+ x-kubernetes-list-type: map
+ publishNotReadyAddresses:
+ description: |-
+ publishNotReadyAddresses indicates that any agent which deals with endpoints for this
+ Service should disregard any indications of ready/not-ready.
+ The primary use case for setting this field is for a StatefulSet's Headless Service to
+ propagate SRV DNS records for its Pods for the purpose of peer discovery.
+ The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
+ Services interpret this to mean that all endpoints are considered "ready" even if the
+ Pods themselves are not. Agents which consume only Kubernetes generated endpoints
+ through the Endpoints or EndpointSlice resources can safely assume this behavior.
+ type: boolean
+ selector:
+ additionalProperties:
+ type: string
+ description: |-
+ Route service traffic to pods with label keys and values matching this
+ selector. If empty or not present, the service is assumed to have an
+ external process managing its endpoints, which Kubernetes will not
+ modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+ Ignored if type is ExternalName.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ type: object
+ x-kubernetes-map-type: atomic
+ sessionAffinity:
+ description: |-
+ Supports "ClientIP" and "None". Used to maintain session affinity.
+ Enable client IP based session affinity.
+ Must be ClientIP or None.
+ Defaults to None.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ type: string
+ sessionAffinityConfig:
+ description: sessionAffinityConfig contains the configurations
+ of session affinity.
+ properties:
+ clientIP:
+ description: clientIP contains the configurations of Client
+ IP based session affinity.
+ properties:
+ timeoutSeconds:
+ description: |-
+ timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+ The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+ Default value is 10800(for 3 hours).
+ format: int32
+ type: integer
+ type: object
+ type: object
+ trafficDistribution:
+ description: |-
+ TrafficDistribution offers a way to express preferences for how traffic is
+ distributed to Service endpoints. Implementations can use this field as a
+ hint, but are not required to guarantee strict adherence. If the field is
+ not set, the implementation will apply its default routing strategy. If set
+ to "PreferClose", implementations should prioritize endpoints that are
+ topologically close (e.g., same zone).
+ This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ type: string
+ type:
+ description: |-
+ type determines how the Service is exposed. Defaults to ClusterIP. Valid
+ options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+ "ClusterIP" allocates a cluster-internal IP address for load-balancing
+ to endpoints. Endpoints are determined by the selector or if that is not
+ specified, by manual construction of an Endpoints object or
+ EndpointSlice objects. If clusterIP is "None", no virtual IP is
+ allocated and the endpoints are published as a set of endpoints rather
+ than a virtual IP.
+ "NodePort" builds on ClusterIP and allocates a port on every node which
+ routes to the same endpoints as the clusterIP.
+ "LoadBalancer" builds on NodePort and creates an external load-balancer
+ (if supported in the current cloud) which routes to the same endpoints
+ as the clusterIP.
+ "ExternalName" aliases this service to the specified externalName.
+ Several other fields do not apply to ExternalName services.
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: string
+ type: object
+ type: object
+ template:
+ description: The template of the Pod to be created
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the pod.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ activeDeadlineSeconds:
+ description: |-
+ Optional duration in seconds the pod may be active on the node relative to
+ StartTime before the system will actively try to mark it failed and kill associated containers.
+ Value must be a positive integer.
+ format: int64
+ type: integer
+ affinity:
+ description: If specified, the pod's scheduling constraints
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules
+ for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated
+ with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching
+ the corresponding nodeSelectorTerm, in the
+ range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector
+ terms. The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g.
+ co-locate this pod in the same node, zone, etc. as some
+ other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules
+ (e.g. avoid putting this pod in the same node, zone,
+ etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ automountServiceAccountToken:
+ description: AutomountServiceAccountToken indicates whether
+ a service account token should be automatically mounted.
+ type: boolean
+ containers:
+ description: |-
+ List of containers belonging to the pod.
+ Containers cannot currently be added or removed.
+ There must be at least one container in a Pod.
+ Cannot be updated.
+ items:
+ description: A single application container that you want
+ to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default is DefaultProcMount which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ dnsConfig:
+ description: |-
+ Specifies the DNS parameters of a pod.
+ Parameters specified here will be merged to the generated DNS
+ configuration based on DNSPolicy.
+ properties:
+ nameservers:
+ description: |-
+ A list of DNS name server IP addresses.
+ This will be appended to the base nameservers generated from DNSPolicy.
+ Duplicated nameservers will be removed.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ options:
+ description: |-
+ A list of DNS resolver options.
+ This will be merged with the base options generated from DNSPolicy.
+ Duplicated entries will be removed. Resolution options given in Options
+ will override those that appear in the base DNSPolicy.
+ items:
+ description: PodDNSConfigOption defines DNS resolver
+ options of a pod.
+ properties:
+ name:
+ description: Required.
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ searches:
+ description: |-
+ A list of DNS search domains for host-name lookup.
+ This will be appended to the base search paths generated from DNSPolicy.
+ Duplicated search paths will be removed.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ dnsPolicy:
+ description: |-
+ Set DNS policy for the pod.
+ Defaults to "ClusterFirst".
+ Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+ DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+ To have DNS options set along with hostNetwork, you have to specify DNS policy
+ explicitly to 'ClusterFirstWithHostNet'.
+ type: string
+ enableServiceLinks:
+ description: |-
+ EnableServiceLinks indicates whether information about services should be injected into pod's
+ environment variables, matching the syntax of Docker links.
+ Optional: Defaults to true.
+ type: boolean
+ ephemeralContainers:
+ description: |-
+ List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing
+ pod to perform user-initiated actions such as debugging. This list cannot be specified when
+ creating a pod, and it cannot be modified by updating the pod spec. In order to add an
+ ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
+ items:
+ description: |-
+ An EphemeralContainer is a temporary container that you may add to an existing Pod for
+ user-initiated activities such as debugging. Ephemeral containers have no resource or
+ scheduling guarantees, and they will not be restarted when they exit or when a Pod is
+ removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
+ Pod to exceed its resource allocation.
+
+
+ To add an ephemeral container, use the ephemeralcontainers subresource of an existing
+ Pod. Ephemeral containers may not be removed or restarted.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: Lifecycle is not allowed for ephemeral
+ containers.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the ephemeral container specified as a DNS_LABEL.
+ This name must be unique among all containers, init containers and ephemeral containers.
+ type: string
+ ports:
+ description: Ports are not allowed for ephemeral containers.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
+ already allocated to the pod.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ Restart policy for the container to manage the restart behavior of each
+ container within a pod.
+ This may only be set for init containers. You cannot set this field on
+ ephemeral containers.
+ type: string
+ securityContext:
+ description: |-
+ Optional: SecurityContext defines the security options the ephemeral container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default is DefaultProcMount which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ targetContainerName:
+ description: |-
+ If set, the name of the container from PodSpec that this ephemeral container targets.
+ The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
+ If not set then the ephemeral container uses the namespaces configured in the Pod spec.
+
+
+ The container runtime must implement support for this feature. If the runtime does not
+ support namespace targeting then the result of setting this field is undefined.
+ type: string
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ hostAliases:
+ description: |-
+ HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+ file if specified.
+ items:
+ description: |-
+ HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
+ pod's hosts file.
+ properties:
+ hostnames:
+ description: Hostnames for the above IP address.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ip:
+ description: IP address of the host file entry.
+ type: string
+ required:
+ - ip
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - ip
+ x-kubernetes-list-type: map
+ hostIPC:
+ description: |-
+ Use the host's ipc namespace.
+ Optional: Default to false.
+ type: boolean
+ hostNetwork:
+ description: |-
+ Host networking requested for this pod. Use the host's network namespace.
+ If this option is set, the ports that will be used must be specified.
+ Default to false.
+ type: boolean
+ hostPID:
+ description: |-
+ Use the host's pid namespace.
+ Optional: Default to false.
+ type: boolean
+ hostUsers:
+ description: |-
+ Use the host's user namespace.
+ Optional: Default to true.
+ If set to true or not present, the pod will be run in the host user namespace, useful
+ for when the pod needs a feature only available to the host user namespace, such as
+ loading a kernel module with CAP_SYS_MODULE.
+ When set to false, a new userns is created for the pod. Setting false is useful for
+ mitigating container breakout vulnerabilities even allowing users to run their
+ containers as root without actually having root privileges on the host.
+ This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
+ type: boolean
+ hostname:
+ description: |-
+ Specifies the hostname of the Pod
+ If not specified, the pod's hostname will be set to a system-defined value.
+ type: string
+ imagePullSecrets:
+ description: |-
+ ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+ If specified, these secrets will be passed to individual puller implementations for them to use.
+ More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+ items:
+ description: |-
+ LocalObjectReference contains enough information to let you locate the
+ referenced object inside the same namespace.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ initContainers:
+ description: |-
+ List of initialization containers belonging to the pod.
+ Init containers are executed in order prior to containers being started. If any
+ init container fails, the pod is considered to have failed and is handled according
+ to its restartPolicy. The name for an init container or normal container must be
+ unique among all containers.
+ Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
+ The resourceRequirements of an init container are taken into account during scheduling
+ by finding the highest request/limit for each resource type, and then using the max of
+ of that value or the sum of the normal containers. Limits are applied to init containers
+ in a similar fashion.
+ Init containers cannot currently be added or removed.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ items:
+ description: A single application container that you want
+ to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default is DefaultProcMount which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ nodeName:
+ description: |-
+ NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
+ the scheduler simply schedules this pod onto that node, assuming that it fits resource
+ requirements.
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: |-
+ NodeSelector is a selector which must be true for the pod to fit on a node.
+ Selector which must match a node's labels for the pod to be scheduled on that node.
+ More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ type: object
+ x-kubernetes-map-type: atomic
+ os:
+ description: |-
+ Specifies the OS of the containers in the pod.
+ Some pod and container fields are restricted if this is set.
+
+
+ If the OS field is set to linux, the following fields must be unset:
+ -securityContext.windowsOptions
+
+
+ If the OS field is set to windows, following fields must be unset:
+ - spec.hostPID
+ - spec.hostIPC
+ - spec.hostUsers
+ - spec.securityContext.appArmorProfile
+ - spec.securityContext.seLinuxOptions
+ - spec.securityContext.seccompProfile
+ - spec.securityContext.fsGroup
+ - spec.securityContext.fsGroupChangePolicy
+ - spec.securityContext.sysctls
+ - spec.shareProcessNamespace
+ - spec.securityContext.runAsUser
+ - spec.securityContext.runAsGroup
+ - spec.securityContext.supplementalGroups
+ - spec.containers[*].securityContext.appArmorProfile
+ - spec.containers[*].securityContext.seLinuxOptions
+ - spec.containers[*].securityContext.seccompProfile
+ - spec.containers[*].securityContext.capabilities
+ - spec.containers[*].securityContext.readOnlyRootFilesystem
+ - spec.containers[*].securityContext.privileged
+ - spec.containers[*].securityContext.allowPrivilegeEscalation
+ - spec.containers[*].securityContext.procMount
+ - spec.containers[*].securityContext.runAsUser
+ - spec.containers[*].securityContext.runAsGroup
+ properties:
+ name:
+ description: |-
+ Name is the name of the operating system. The currently supported values are linux and windows.
+ Additional value may be defined in future and can be one of:
+ https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
+ Clients should expect to handle additional values and treat unrecognized values in this field as os: null
+ type: string
+ required:
+ - name
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+ This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+ the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+ The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+ set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+ defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ type: object
+ preemptionPolicy:
+ description: |-
+ PreemptionPolicy is the Policy for preempting pods with lower priority.
+ One of Never, PreemptLowerPriority.
+ Defaults to PreemptLowerPriority if unset.
+ type: string
+ priority:
+ description: |-
+ The priority value. Various system components use this field to find the
+ priority of the pod. When Priority Admission Controller is enabled, it
+ prevents users from setting this field. The admission controller populates
+ this field from PriorityClassName.
+ The higher the value, the higher the priority.
+ format: int32
+ type: integer
+ priorityClassName:
+ description: |-
+ If specified, indicates the pod's priority. "system-node-critical" and
+ "system-cluster-critical" are two special keywords which indicate the
+ highest priorities with the former being the highest priority. Any other
+ name must be defined by creating a PriorityClass object with that name.
+ If not specified, the pod priority will be default or zero if there is no
+ default.
+ type: string
+ readinessGates:
+ description: |-
+ If specified, all readiness gates will be evaluated for pod readiness.
+ A pod is ready when all its containers are ready AND
+ all conditions specified in the readiness gates have status equal to "True"
+ More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
+ items:
+ description: PodReadinessGate contains the reference to
+ a pod condition
+ properties:
+ conditionType:
+ description: ConditionType refers to a condition in
+ the pod's condition list with matching type.
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resourceClaims:
+ description: |-
+ ResourceClaims defines which ResourceClaims must be allocated
+ and reserved before the Pod is allowed to start. The resources
+ will be made available to those containers which consume them
+ by name.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable.
+ items:
+ description: |-
+ PodResourceClaim references exactly one ResourceClaim through a ClaimSource.
+ It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
+ Containers that need access to the ResourceClaim reference it with this name.
+ properties:
+ name:
+ description: |-
+ Name uniquely identifies this resource claim inside the pod.
+ This must be a DNS_LABEL.
+ type: string
+ source:
+ description: Source describes where to find the ResourceClaim.
+ properties:
+ resourceClaimName:
+ description: |-
+ ResourceClaimName is the name of a ResourceClaim object in the same
+ namespace as this pod.
+ type: string
+ resourceClaimTemplateName:
+ description: |-
+ ResourceClaimTemplateName is the name of a ResourceClaimTemplate
+ object in the same namespace as this pod.
+
+
+ The template will be used to create a new ResourceClaim, which will
+ be bound to this pod. When this pod is deleted, the ResourceClaim
+ will also be deleted. The pod name and resource name, along with a
+ generated component, will be used to form a unique name for the
+ ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
+
+
+ This field is immutable and no changes will be made to the
+ corresponding ResourceClaim by the control plane after creating the
+ ResourceClaim.
+ type: string
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ restartPolicy:
+ description: |-
+ Restart policy for all containers within the pod.
+ One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.
+ Default to Always.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+ type: string
+ runtimeClassName:
+ description: |-
+ RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+ to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
+ If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+ empty definition that uses the default runtime handler.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
+ type: string
+ schedulerName:
+ description: |-
+ If specified, the pod will be dispatched by specified scheduler.
+ If not specified, the pod will be dispatched by default scheduler.
+ type: string
+ schedulingGates:
+ description: |-
+ SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
+ If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
+ scheduler will not attempt to schedule the pod.
+
+
+ SchedulingGates can only be set at pod creation time, and be removed only afterwards.
+ items:
+ description: PodSchedulingGate is associated to a Pod to
+ guard its scheduling.
+ properties:
+ name:
+ description: |-
+ Name of the scheduling gate.
+ Each scheduling gate must have a unique name field.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ securityContext:
+ description: |-
+ SecurityContext holds pod-level security attributes and common container settings.
+ Optional: Defaults to empty. See type description for default values of each field.
+ properties:
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by the containers in this pod.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ fsGroup:
+ description: |-
+ A special supplemental group that applies to all containers in a pod.
+ Some volume types allow the Kubelet to change the ownership of that volume
+ to be owned by the pod:
+
+
+ 1. The owning GID will be the FSGroup
+ 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+ 3. The permission bits are OR'd with rw-rw----
+
+
+ If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ description: |-
+ fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
+ before being exposed inside Pod. This field will only apply to
+ volume types which support fsGroup based ownership(and permissions).
+ It will have no effect on ephemeral volume types such as: secret, configmaps
+ and emptydir.
+ Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to all containers.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in SecurityContext. If set in
+ both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that applies
+ to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies
+ to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies
+ to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies
+ to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by the containers in this pod.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ description: |-
+ A list of groups applied to the first process run in each container, in addition
+ to the container's primary GID, the fsGroup (if specified), and group memberships
+ defined in the container image for the uid of the container process. If unspecified,
+ no additional groups are added to any container. Note that group memberships
+ defined in the container image for the uid of the container process are still effective,
+ even if they are not included in this list.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ format: int64
+ type: integer
+ type: array
+ x-kubernetes-list-type: atomic
+ sysctls:
+ description: |-
+ Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+ sysctls (by the container runtime) might fail to launch.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ description: Sysctl defines a kernel parameter to be
+ set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options within a container's SecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of
+ the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ description: |-
+ DeprecatedServiceAccount is a deprecated alias for ServiceAccountName.
+ Deprecated: Use serviceAccountName instead.
+ type: string
+ serviceAccountName:
+ description: |-
+ ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ type: string
+ setHostnameAsFQDN:
+ description: |-
+ If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
+ In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
+ In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
+ If a pod does not have FQDN, this has no effect.
+ Default to false.
+ type: boolean
+ shareProcessNamespace:
+ description: |-
+ Share a single process namespace between all of the containers in a pod.
+ When this is set containers will be able to view and signal processes from other containers
+ in the same pod, and the first process in each container will not be assigned PID 1.
+ HostPID and ShareProcessNamespace cannot both be set.
+ Optional: Default to false.
+ type: boolean
+ subdomain:
+ description: |-
+ If specified, the fully qualified Pod hostname will be "...svc.".
+ If not specified, the pod will not have a domainname at all.
+ type: string
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ If this value is nil, the default grace period will be used instead.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ Defaults to 30 seconds.
+ format: int64
+ type: integer
+ tolerations:
+ description: If specified, the pod's tolerations.
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ topologySpreadConstraints:
+ description: |-
+ TopologySpreadConstraints describes how a group of pods ought to spread across topology
+ domains. Scheduler will schedule pods in a way which abides by the constraints.
+ All topologySpreadConstraints are ANDed.
+ items:
+ description: TopologySpreadConstraint specifies how to spread
+ matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ volumes:
+ description: |-
+ List of volumes that can be mounted by containers belonging to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes
+ items:
+ description: Volume represents a named volume in a pod that
+ may be accessed by any container in the pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description: azureDisk represents an Azure Data Disk
+ mount on the host and bind mount to the pod.
+ properties:
+ cachingMode:
+ description: 'cachingMode is the Host Caching mode:
+ None, Read Only, Read Write.'
+ type: string
+ diskName:
+ description: diskName is the Name of the data disk
+ in the blob storage
+ type: string
+ diskURI:
+ description: diskURI is the URI of data disk in
+ the blob storage
+ type: string
+ fsType:
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description: 'kind expected values are Shared: multiple
+ blob disks per storage account Dedicated: single
+ blob disk per storage account Managed: azure
+ managed data disk (only in managed availability
+ set). defaults to shared'
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description: azureFile represents an Azure File Service
+ mount on the host and bind mount to the pod.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description: secretName is the name of secret that
+ contains Azure Storage Account Name and Key
+ type: string
+ shareName:
+ description: shareName is the azure share Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description: cephFS represents a Ceph FS mount on the
+ host that shares a pod's lifetime
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: 'path is Optional: Used as the mounted
+ root, rather than the full Ceph tree, default
+ is /'
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description: configMap represents a configMap that should
+ populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description: csi (Container Storage Interface) represents
+ ephemeral storage that is handled by certain external
+ CSI drivers (Beta feature).
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description: downwardAPI represents downward API about
+ the pod that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: Items is a list of downward API volume
+ file
+ items:
+ description: DownwardAPIVolumeFile represents
+ information to create the file containing the
+ pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of
+ the pod: only annotations, labels, name,
+ namespace and uid are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative
+ path name of the file to be created. Must
+ not be absolute or contain the ''..'' path.
+ Must be utf-8 encoded. The first item of
+ the relative path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description: Name is the name of resource
+ being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description: Name is the name of resource
+ being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over
+ volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference
+ to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description: fc represents a Fibre Channel resource
+ that is attached to a kubelet's host machine and then
+ exposed to the pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ lun:
+ description: 'lun is Optional: FC target lun number'
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description: 'targetWWNs is Optional: FC target
+ worldwide names (WWNs)'
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ properties:
+ driver:
+ description: driver is the name of the driver to
+ use for this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description: 'options is Optional: this field holds
+ extra command options if any.'
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description: flocker represents a Flocker volume attached
+ to a kubelet's host machine. This depends on the Flocker
+ control service being running
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description: datasetUUID is the UUID of the dataset.
+ This is unique identifier of a Flocker dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description: revision is the commit hash for the
+ specified revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ ---
+ TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+ mount host directories as read/write.
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description: chapAuthDiscovery defines whether support
+ iSCSI Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description: chapAuthSession defines whether support
+ iSCSI Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description: iqn is the target iSCSI Qualified Name.
+ type: string
+ iscsiInterface:
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description: lun represents iSCSI Target Lun number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description: secretRef is the CHAP Secret for iSCSI
+ target and initiator authentication
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description: photonPersistentDisk represents a PhotonController
+ persistent disk attached and mounted on kubelets host
+ machine
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description: pdID is the ID that identifies Photon
+ Controller persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description: portworxVolume represents a portworx volume
+ attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description: volumeID uniquely identifies a Portworx
+ volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description: projected items for all in one resources
+ secrets, configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: sources is the list of volume projections
+ items:
+ description: Projection that may be projected
+ along with other supported volume types
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions is a
+ list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from the volume
+ root to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about the
+ configMap data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: optional specify whether
+ the ConfigMap or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information about
+ the downwardAPI data to project
+ properties:
+ items:
+ description: Items is a list of DownwardAPIVolume
+ file
+ items:
+ description: DownwardAPIVolumeFile represents
+ information to create the file containing
+ the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects
+ a field of the pod: only annotations,
+ labels, name, namespace and uid
+ are supported.'
+ properties:
+ apiVersion:
+ description: Version of the
+ schema the FieldPath is written
+ in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field
+ to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the
+ relative path name of the file
+ to be created. Must not be absolute
+ or contain the ''..'' path. Must
+ be utf-8 encoded. The first item
+ of the relative path must not
+ start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name:
+ required for volumes, optional
+ for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource
+ to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description: secret information about the
+ secret data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description: optional field specify whether
+ the Secret or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is information
+ about the serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ quobyte:
+ description: quobyte represents a Quobyte mount on the
+ host that shares a pod's lifetime
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description: volume is a string that references
+ an already created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ pool:
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description: scaleIO represents a ScaleIO persistent
+ volume attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description: gateway is the host address of the
+ ScaleIO API Gateway.
+ type: string
+ protectionDomain:
+ description: protectionDomain is the name of the
+ ScaleIO Protection Domain for the configured storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description: sslEnabled Flag enable/disable SSL
+ communication with Gateway, default false
+ type: boolean
+ storageMode:
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description: storagePool is the ScaleIO Storage
+ Pool associated with the protection domain.
+ type: string
+ system:
+ description: system is the name of the storage system
+ as configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ optional:
+ description: optional field specify whether the
+ Secret or its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description: storageOS represents a StorageOS volume
+ attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description: vsphereVolume represents a vSphere volume
+ attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description: storagePolicyID is the storage Policy
+ Based Management (SPBM) profile ID associated
+ with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description: storagePolicyName is the storage Policy
+ Based Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description: volumePath is the path that identifies
+ vSphere volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - containers
+ type: object
+ type: object
+ type:
+ default: rw
+ description: 'Type of service to forward traffic to. Default: `rw`.'
+ enum:
+ - rw
+ - ro
+ type: string
+ required:
+ - cluster
+ - pgbouncer
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the Pooler. This data may not be up to
+ date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ instances:
+ description: The number of pods trying to be scheduled
+ format: int32
+ type: integer
+ secrets:
+ description: The resource version of the config object
+ properties:
+ clientCA:
+ description: The client CA secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ pgBouncerSecrets:
+ description: The version of the secrets used by PgBouncer
+ properties:
+ authQuery:
+ description: The auth query secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ type: object
+ serverCA:
+ description: The server CA secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ serverTLS:
+ description: The server TLS secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ type: object
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ scale:
+ specReplicasPath: .spec.instances
+ statusReplicasPath: .status.instances
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.15.0
+ name: scheduledbackups.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: ScheduledBackup
+ listKind: ScheduledBackupList
+ plural: scheduledbackups
+ singular: scheduledbackup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .status.lastScheduleTime
+ name: Last Backup
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ScheduledBackup is the Schema for the scheduledbackups API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the ScheduledBackup.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ backupOwnerReference:
+ default: none
+ description: |-
+ Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum:
+ - none
+ - self
+ - cluster
+ type: string
+ cluster:
+ description: The cluster to backup
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ immediate:
+ description: If the first backup has to be immediately start after
+ creation or not
+ type: boolean
+ method:
+ default: barmanObjectStore
+ description: |-
+ The backup method to be used, possible options are `barmanObjectStore`
+ and `volumeSnapshot`. Defaults to: `barmanObjectStore`.
+ enum:
+ - barmanObjectStore
+ - volumeSnapshot
+ type: string
+ online:
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ type: boolean
+ onlineConfiguration:
+ description: |-
+ Configuration parameters to control the online/hot backup with volume snapshots
+ Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ pluginConfiguration:
+ description: Configuration parameters passed to the plugin managing
+ this backup
+ properties:
+ name:
+ description: Name is the name of the plugin managing this backup
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Parameters are the configuration parameters passed to the backup
+ plugin for this backup
+ type: object
+ required:
+ - name
+ type: object
+ schedule:
+ description: |-
+ The schedule does not follow the same format used in Kubernetes CronJobs
+ as it includes an additional seconds specifier,
+ see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
+ type: string
+ suspend:
+ description: If this backup is suspended or not
+ type: boolean
+ target:
+ description: |-
+ The policy to decide which instance should perform this backup. If empty,
+ it defaults to `cluster.spec.backup.target`.
+ Available options are empty string, `primary` and `prefer-standby`.
+ `primary` to have backups run always on primary instances,
+ `prefer-standby` to have backups run preferably on the most updated
+ standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ required:
+ - cluster
+ - schedule
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the ScheduledBackup. This data may not be up
+ to date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ lastCheckTime:
+ description: The latest time the schedule
+ format: date-time
+ type: string
+ lastScheduleTime:
+ description: Information when was the last time that backup was successfully
+ scheduled.
+ format: date-time
+ type: string
+ nextScheduleTime:
+ description: Next time we will run a backup
+ format: date-time
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: cnpg-manager
+ namespace: cnpg-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: cnpg-manager
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods/exec
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods/status
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - update
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - update
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - podmonitors
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - backups
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - backups/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusterimagecatalogs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters/status
+ verbs:
+ - get
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - imagecatalogs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - poolers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - poolers/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - poolers/status
+ verbs:
+ - get
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - scheduledbackups
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - scheduledbackups/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - rolebindings
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - roles
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: cnpg-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cnpg-manager
+subjects:
+- kind: ServiceAccount
+ name: cnpg-manager
+ namespace: cnpg-system
+---
+apiVersion: v1
+data:
+ queries: |
+ backends:
+ query: |
+ SELECT sa.datname
+ , sa.usename
+ , sa.application_name
+ , states.state
+ , COALESCE(sa.count, 0) AS total
+ , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds
+ FROM ( VALUES ('active')
+ , ('idle')
+ , ('idle in transaction')
+ , ('idle in transaction (aborted)')
+ , ('fastpath function call')
+ , ('disabled')
+ ) AS states(state)
+ LEFT JOIN (
+ SELECT datname
+ , state
+ , usename
+ , COALESCE(application_name, '') AS application_name
+ , COUNT(*)
+ , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs
+ FROM pg_catalog.pg_stat_activity
+ GROUP BY datname, state, usename, application_name
+ ) sa ON states.state = sa.state
+ WHERE sa.usename IS NOT NULL
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of the database"
+ - usename:
+ usage: "LABEL"
+ description: "Name of the user"
+ - application_name:
+ usage: "LABEL"
+ description: "Name of the application"
+ - state:
+ usage: "LABEL"
+ description: "State of the backend"
+ - total:
+ usage: "GAUGE"
+ description: "Number of backends"
+ - max_tx_duration_seconds:
+ usage: "GAUGE"
+ description: "Maximum duration of a transaction in seconds"
+
+ backends_waiting:
+ query: |
+ SELECT count(*) AS total
+ FROM pg_catalog.pg_locks blocked_locks
+ JOIN pg_catalog.pg_locks blocking_locks
+ ON blocking_locks.locktype = blocked_locks.locktype
+ AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
+ AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
+ AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
+ AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
+ AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
+ AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
+ AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
+ AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
+ AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
+ AND blocking_locks.pid != blocked_locks.pid
+ JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
+ WHERE NOT blocked_locks.granted
+ metrics:
+ - total:
+ usage: "GAUGE"
+ description: "Total number of backends that are currently waiting on other queries"
+
+ pg_database:
+ query: |
+ SELECT datname
+ , pg_catalog.pg_database_size(datname) AS size_bytes
+ , pg_catalog.age(datfrozenxid) AS xid_age
+ , pg_catalog.mxid_age(datminmxid) AS mxid_age
+ FROM pg_catalog.pg_database
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of the database"
+ - size_bytes:
+ usage: "GAUGE"
+ description: "Disk space used by the database"
+ - xid_age:
+ usage: "GAUGE"
+ description: "Number of transactions from the frozen XID to the current one"
+ - mxid_age:
+ usage: "GAUGE"
+ description: "Number of multiple transactions (Multixact) from the frozen XID to the current one"
+
+ pg_postmaster:
+ query: |
+ SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time
+ FROM pg_catalog.pg_postmaster_start_time()
+ metrics:
+ - start_time:
+ usage: "GAUGE"
+ description: "Time at which postgres started (based on epoch)"
+
+ pg_replication:
+ query: "SELECT CASE WHEN (
+ NOT pg_catalog.pg_is_in_recovery()
+ OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn())
+ THEN 0
+ ELSE GREATEST (0,
+ EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp())))
+ END AS lag,
+ pg_catalog.pg_is_in_recovery() AS in_recovery,
+ EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,
+ (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas"
+ metrics:
+ - lag:
+ usage: "GAUGE"
+ description: "Replication lag behind primary in seconds"
+ - in_recovery:
+ usage: "GAUGE"
+ description: "Whether the instance is in recovery"
+ - is_wal_receiver_up:
+ usage: "GAUGE"
+ description: "Whether the instance wal_receiver is up"
+ - streaming_replicas:
+ usage: "GAUGE"
+ description: "Number of streaming replicas connected to the instance"
+
+ pg_replication_slots:
+ query: |
+ SELECT slot_name,
+ slot_type,
+ database,
+ active,
+ (CASE pg_catalog.pg_is_in_recovery()
+ WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)
+ ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)
+ END) as pg_wal_lsn_diff
+ FROM pg_catalog.pg_replication_slots
+ WHERE NOT temporary
+ metrics:
+ - slot_name:
+ usage: "LABEL"
+ description: "Name of the replication slot"
+ - slot_type:
+ usage: "LABEL"
+ description: "Type of the replication slot"
+ - database:
+ usage: "LABEL"
+ description: "Name of the database"
+ - active:
+ usage: "GAUGE"
+ description: "Flag indicating whether the slot is active"
+ - pg_wal_lsn_diff:
+ usage: "GAUGE"
+ description: "Replication lag in bytes"
+
+ pg_stat_archiver:
+ query: |
+ SELECT archived_count
+ , failed_count
+ , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival
+ , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure
+ , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time
+ , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time
+ , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn
+ , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn
+ , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
+ FROM pg_catalog.pg_stat_archiver
+ metrics:
+ - archived_count:
+ usage: "COUNTER"
+ description: "Number of WAL files that have been successfully archived"
+ - failed_count:
+ usage: "COUNTER"
+ description: "Number of failed attempts for archiving WAL files"
+ - seconds_since_last_archival:
+ usage: "GAUGE"
+ description: "Seconds since the last successful archival operation"
+ - seconds_since_last_failure:
+ usage: "GAUGE"
+ description: "Seconds since the last failed archival operation"
+ - last_archived_time:
+ usage: "GAUGE"
+ description: "Epoch of the last time WAL archiving succeeded"
+ - last_failed_time:
+ usage: "GAUGE"
+ description: "Epoch of the last time WAL archiving failed"
+ - last_archived_wal_start_lsn:
+ usage: "GAUGE"
+ description: "Archived WAL start LSN"
+ - last_failed_wal_start_lsn:
+ usage: "GAUGE"
+ description: "Last failed WAL LSN"
+ - stats_reset_time:
+ usage: "GAUGE"
+ description: "Time at which these statistics were last reset"
+
+ pg_stat_bgwriter:
+ runonserver: "<17.0.0"
+ query: |
+ SELECT checkpoints_timed
+ , checkpoints_req
+ , checkpoint_write_time
+ , checkpoint_sync_time
+ , buffers_checkpoint
+ , buffers_clean
+ , maxwritten_clean
+ , buffers_backend
+ , buffers_backend_fsync
+ , buffers_alloc
+ FROM pg_catalog.pg_stat_bgwriter
+ metrics:
+ - checkpoints_timed:
+ usage: "COUNTER"
+ description: "Number of scheduled checkpoints that have been performed"
+ - checkpoints_req:
+ usage: "COUNTER"
+ description: "Number of requested checkpoints that have been performed"
+ - checkpoint_write_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds"
+ - checkpoint_sync_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds"
+ - buffers_checkpoint:
+ usage: "COUNTER"
+ description: "Number of buffers written during checkpoints"
+ - buffers_clean:
+ usage: "COUNTER"
+ description: "Number of buffers written by the background writer"
+ - maxwritten_clean:
+ usage: "COUNTER"
+ description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers"
+ - buffers_backend:
+ usage: "COUNTER"
+ description: "Number of buffers written directly by a backend"
+ - buffers_backend_fsync:
+ usage: "COUNTER"
+ description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)"
+ - buffers_alloc:
+ usage: "COUNTER"
+ description: "Number of buffers allocated"
+
+ pg_stat_database:
+ query: |
+ SELECT datname
+ , xact_commit
+ , xact_rollback
+ , blks_read
+ , blks_hit
+ , tup_returned
+ , tup_fetched
+ , tup_inserted
+ , tup_updated
+ , tup_deleted
+ , conflicts
+ , temp_files
+ , temp_bytes
+ , deadlocks
+ , blk_read_time
+ , blk_write_time
+ FROM pg_catalog.pg_stat_database
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of this database"
+ - xact_commit:
+ usage: "COUNTER"
+ description: "Number of transactions in this database that have been committed"
+ - xact_rollback:
+ usage: "COUNTER"
+ description: "Number of transactions in this database that have been rolled back"
+ - blks_read:
+ usage: "COUNTER"
+ description: "Number of disk blocks read in this database"
+ - blks_hit:
+ usage: "COUNTER"
+ description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)"
+ - tup_returned:
+ usage: "COUNTER"
+ description: "Number of rows returned by queries in this database"
+ - tup_fetched:
+ usage: "COUNTER"
+ description: "Number of rows fetched by queries in this database"
+ - tup_inserted:
+ usage: "COUNTER"
+ description: "Number of rows inserted by queries in this database"
+ - tup_updated:
+ usage: "COUNTER"
+ description: "Number of rows updated by queries in this database"
+ - tup_deleted:
+ usage: "COUNTER"
+ description: "Number of rows deleted by queries in this database"
+ - conflicts:
+ usage: "COUNTER"
+ description: "Number of queries canceled due to conflicts with recovery in this database"
+ - temp_files:
+ usage: "COUNTER"
+ description: "Number of temporary files created by queries in this database"
+ - temp_bytes:
+ usage: "COUNTER"
+ description: "Total amount of data written to temporary files by queries in this database"
+ - deadlocks:
+ usage: "COUNTER"
+ description: "Number of deadlocks detected in this database"
+ - blk_read_time:
+ usage: "COUNTER"
+ description: "Time spent reading data file blocks by backends in this database, in milliseconds"
+ - blk_write_time:
+ usage: "COUNTER"
+ description: "Time spent writing data file blocks by backends in this database, in milliseconds"
+
+ pg_stat_replication:
+ primary: true
+ query: |
+ SELECT usename
+ , COALESCE(application_name, '') AS application_name
+ , COALESCE(client_addr::text, '') AS client_addr
+ , COALESCE(client_port::text, '') AS client_port
+ , EXTRACT(EPOCH FROM backend_start) AS backend_start
+ , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes
+ , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes
+ , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds
+ , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds
+ , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds
+ FROM pg_catalog.pg_stat_replication
+ metrics:
+ - usename:
+ usage: "LABEL"
+ description: "Name of the replication user"
+ - application_name:
+ usage: "LABEL"
+ description: "Name of the application"
+ - client_addr:
+ usage: "LABEL"
+ description: "Client IP address"
+ - client_port:
+ usage: "LABEL"
+ description: "Client TCP port"
+ - backend_start:
+ usage: "COUNTER"
+ description: "Time when this process was started"
+ - backend_xmin_age:
+ usage: "COUNTER"
+ description: "The age of this standby's xmin horizon"
+ - sent_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location sent on this connection"
+ - write_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location written to disk by this standby server"
+ - flush_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server"
+ - replay_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server"
+ - write_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it"
+ - flush_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it"
+ - replay_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it"
+
+ pg_settings:
+ query: |
+ SELECT name,
+ CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting
+ FROM pg_catalog.pg_settings
+ WHERE vartype IN ('integer', 'real', 'bool')
+ ORDER BY 1
+ metrics:
+ - name:
+ usage: "LABEL"
+ description: "Name of the setting"
+ - setting:
+ usage: "GAUGE"
+ description: "Setting value"
+kind: ConfigMap
+metadata:
+ labels:
+ cnpg.io/reload: ""
+ name: cnpg-default-monitoring
+ namespace: cnpg-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+spec:
+ ports:
+ - port: 443
+ targetPort: 9443
+ selector:
+ app.kubernetes.io/name: cloudnative-pg
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ name: cnpg-controller-manager
+ namespace: cnpg-system
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: cloudnative-pg
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ spec:
+ containers:
+ - args:
+ - controller
+ - --leader-elect
+ - --config-map-name=cnpg-controller-manager-config
+ - --secret-name=cnpg-controller-manager-config
+ - --webhook-port=9443
+ command:
+ - /manager
+ env:
+ - name: OPERATOR_IMAGE_NAME
+ value: ghcr.io/cloudnative-pg/cloudnative-pg:1.23.2
+ - name: OPERATOR_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MONITORING_QUERIES_CONFIGMAP
+ value: cnpg-default-monitoring
+ image: ghcr.io/cloudnative-pg/cloudnative-pg:1.23.2
+ livenessProbe:
+ httpGet:
+ path: /readyz
+ port: 9443
+ scheme: HTTPS
+ name: manager
+ ports:
+ - containerPort: 8080
+ name: metrics
+ protocol: TCP
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 9443
+ scheme: HTTPS
+ resources:
+ limits:
+ cpu: 100m
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsGroup: 10001
+ runAsUser: 10001
+ seccompProfile:
+ type: RuntimeDefault
+ volumeMounts:
+ - mountPath: /controller
+ name: scratch-data
+ - mountPath: /run/secrets/cnpg.io/webhook
+ name: webhook-certificates
+ securityContext:
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+ serviceAccountName: cnpg-manager
+ terminationGracePeriodSeconds: 10
+ volumes:
+ - emptyDir: {}
+ name: scratch-data
+ - name: webhook-certificates
+ secret:
+ defaultMode: 420
+ optional: true
+ secretName: cnpg-webhook-cert
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: cnpg-mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-backup
+ failurePolicy: Fail
+ name: mbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - backups
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-cluster
+ failurePolicy: Fail
+ name: mcluster.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-scheduledbackup
+ failurePolicy: Fail
+ name: mscheduledbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - scheduledbackups
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: cnpg-validating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-backup
+ failurePolicy: Fail
+ name: vbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - backups
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-cluster
+ failurePolicy: Fail
+ name: vcluster.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-pooler
+ failurePolicy: Fail
+ name: vpooler.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - poolers
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-scheduledbackup
+ failurePolicy: Fail
+ name: vscheduledbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - scheduledbackups
+ sideEffects: None
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index 9808bb17df..c4201515c6 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -348,19 +348,83 @@ func AssertOperatorIsReady() {
}, testTimeouts[testsUtils.OperatorIsReady]).Should(BeTrue(), "Operator pod is not ready")
}
+// AssertDatabaseIsReady checks the database on the primary is ready to run queries
+//
+// NOTE: even if we checked AssertClusterIsReady, a temporary DB connectivity issue would take
+// failureThreshold x periodSeconds to be detected
+func AssertDatabaseIsReady(namespace, clusterName, dbName string) {
+ By(fmt.Sprintf("checking the database on %s is ready", clusterName), func() {
+ primary, err := env.GetClusterPrimary(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+ Eventually(func() error {
+ stdout, stderr, err := env.ExecCommandInInstancePod(testsUtils.PodLocator{
+ Namespace: namespace,
+ PodName: primary.GetName(),
+ }, nil, "pg_isready")
+ if err != nil {
+ return err
+ }
+ if stderr != "" {
+ return fmt.Errorf("while checking pg_isready: %s", stderr)
+ }
+ if !strings.Contains(stdout, "accepting") {
+ return fmt.Errorf("while checking pg_isready: Not accepting connections")
+ }
+ _, _, err = env.ExecQueryInInstancePod(testsUtils.PodLocator{
+ Namespace: namespace,
+ PodName: primary.GetName(),
+ }, testsUtils.DatabaseName(dbName), "select 1")
+ return err
+ }, RetryTimeout, PollingTime).ShouldNot(HaveOccurred())
+ })
+}
+
// AssertCreateTestData create test data.
func AssertCreateTestData(namespace, clusterName, tableName string, pod *corev1.Pod) {
- By("creating test data", func() {
+ AssertDatabaseIsReady(namespace, clusterName, testsUtils.AppDBName)
+ By(fmt.Sprintf("creating test data in cluster %v", clusterName), func() {
query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v AS VALUES (1),(2);", tableName)
- _, _, err := env.ExecCommandWithPsqlClient(
- namespace,
- clusterName,
- pod,
- apiv1.ApplicationUserSecretSuffix,
- testsUtils.AppDBName,
- query,
- )
- Expect(err).ToNot(HaveOccurred())
+ Eventually(func() error {
+ _, _, err := env.ExecCommandWithPsqlClient(
+ namespace,
+ clusterName,
+ pod,
+ apiv1.ApplicationUserSecretSuffix,
+ testsUtils.AppDBName,
+ query,
+ )
+ if err != nil {
+ return err
+ }
+ return nil
+ }, RetryTimeout, PollingTime).Should(BeNil())
+ })
+}
+
+// AssertCreateTestDataWithDatabaseName create test data in a given database.
+func AssertCreateTestDataWithDatabaseName(
+ namespace,
+ clusterName,
+ databaseName,
+ tableName string,
+ pod *corev1.Pod,
+) {
+ By(fmt.Sprintf("creating test data in cluster %v", clusterName), func() {
+ query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v AS VALUES (1),(2);", tableName)
+ Eventually(func() error {
+ _, _, err := env.ExecCommandWithPsqlClient(
+ namespace,
+ clusterName,
+ pod,
+ apiv1.ApplicationUserSecretSuffix,
+ databaseName,
+ query,
+ )
+ if err != nil {
+ return err
+ }
+ return nil
+ }, RetryTimeout, PollingTime).Should(BeNil())
})
}
@@ -371,20 +435,26 @@ type TableLocator struct {
Tablespace string
}
-// AssertCreateTestData create test data.
+// AssertCreateTestDataInTablespace create test data.
func AssertCreateTestDataInTablespace(tl TableLocator, pod *corev1.Pod) {
+ AssertDatabaseIsReady(tl.Namespace, tl.ClusterName, testsUtils.AppDBName)
By(fmt.Sprintf("creating test data in tablespace %q", tl.Tablespace), func() {
query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v TABLESPACE %v AS VALUES (1),(2);",
tl.TableName, tl.Tablespace)
- _, _, err := env.ExecCommandWithPsqlClient(
- tl.Namespace,
- tl.ClusterName,
- pod,
- apiv1.ApplicationUserSecretSuffix,
- testsUtils.AppDBName,
- query,
- )
- Expect(err).ToNot(HaveOccurred())
+ Eventually(func() error {
+ _, _, err := env.ExecCommandWithPsqlClient(
+ tl.Namespace,
+ tl.ClusterName,
+ pod,
+ apiv1.ApplicationUserSecretSuffix,
+ testsUtils.AppDBName,
+ query,
+ )
+ if err != nil {
+ return err
+ }
+ return nil
+ }, RetryTimeout, PollingTime).Should(BeNil())
})
}
@@ -448,9 +518,9 @@ func insertRecordIntoTable(namespace, clusterName, tableName string, value int,
Expect(err).NotTo(HaveOccurred())
}
-// AssertDatabaseExists assert if database is existed
+// AssertDatabaseExists assert if database exists
func AssertDatabaseExists(namespace, podName, databaseName string, expectedValue bool) {
- By(fmt.Sprintf("verifying is database exists %v", databaseName), func() {
+ By(fmt.Sprintf("verifying if database %v exists", databaseName), func() {
pod := &corev1.Pod{}
commandTimeout := time.Second * 10
query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_database WHERE lower(datname) = lower('%v'));", databaseName)
@@ -467,6 +537,28 @@ func AssertDatabaseExists(namespace, podName, databaseName string, expectedValue
})
}
+// AssertUserExists assert if user exists
+func AssertUserExists(namespace, podName, userName string, expectedValue bool) {
+ By(fmt.Sprintf("verifying if user %v exists", userName), func() {
+ pod := &corev1.Pod{}
+ commandTimeout := time.Second * 10
+ query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_user WHERE lower(usename) = lower('%v'));", userName)
+ err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, pod)
+ Expect(err).ToNot(HaveOccurred())
+ stdout, stderr, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName,
+ &commandTimeout, "psql", "-U", "postgres", "postgres", "-tAc", query)
+ if err != nil {
+ GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr)
+ }
+ Expect(err).ToNot(HaveOccurred())
+ if expectedValue {
+ Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t"))
+ } else {
+ Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("f"))
+ }
+ })
+}
+
// AssertDataExpectedCountWithDatabaseName verifies that an expected amount of rows exists on the table
func AssertDataExpectedCountWithDatabaseName(namespace, podName, databaseName string,
tableName string, expectedValue int,
@@ -824,36 +916,50 @@ func getScheduledBackupCompleteBackupsCount(namespace string, scheduledBackupNam
return completed, nil
}
+// AssertPgRecoveryMode verifies if the target pod recovery mode is enabled or disabled
+func AssertPgRecoveryMode(pod *corev1.Pod, expectedValue bool) {
+ By(fmt.Sprintf("verifying that postgres recovery mode is %v", expectedValue), func() {
+ stringExpectedValue := "f"
+ if expectedValue {
+ stringExpectedValue = "t"
+ }
+
+ Eventually(func() (string, error) {
+ commandTimeout := time.Second * 10
+ stdOut, stdErr, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, &commandTimeout,
+ "psql", "-U", "postgres", "postgres", "-tAc", "select pg_is_in_recovery();")
+ if err != nil {
+ GinkgoWriter.Printf("stdout: %v\ntderr: %v\n", stdOut, stdErr)
+ }
+ return strings.Trim(stdOut, "\n"), err
+ }, 300, 10).Should(BeEquivalentTo(stringExpectedValue))
+ })
+}
+
// AssertReplicaModeCluster checks that, after inserting some data in a source cluster,
// a replica cluster can be bootstrapped using pg_basebackup and is properly replicating
// from the source cluster
func AssertReplicaModeCluster(
namespace,
srcClusterName,
+ srcClusterDBName,
replicaClusterSample,
- checkQuery string,
+ testTableName string,
pod *corev1.Pod,
) {
var primaryReplicaCluster *corev1.Pod
commandTimeout := time.Second * 10
+ checkQuery := fmt.Sprintf("SELECT count(*) FROM %v", testTableName)
- By("creating test data in source cluster", func() {
- cmd := "CREATE TABLE IF NOT EXISTS test_replica AS VALUES (1),(2);"
- appUser, appUserPass, err := testsUtils.GetCredentials(srcClusterName, namespace,
- apiv1.ApplicationUserSecretSuffix, env)
- Expect(err).ToNot(HaveOccurred())
- host, err := testsUtils.GetHostName(namespace, srcClusterName, env)
- Expect(err).ToNot(HaveOccurred())
- _, _, err = testsUtils.RunQueryFromPod(
- pod,
- host,
- "appSrc",
- appUser,
- appUserPass,
- cmd,
- env)
- Expect(err).ToNot(HaveOccurred())
- })
+ AssertDatabaseIsReady(namespace, srcClusterName, srcClusterDBName)
+
+ AssertCreateTestDataWithDatabaseName(
+ namespace,
+ srcClusterName,
+ srcClusterDBName,
+ testTableName,
+ pod,
+ )
By("creating replica cluster", func() {
replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSample)
@@ -864,44 +970,33 @@ func AssertReplicaModeCluster(
primaryReplicaCluster, err = env.GetClusterPrimary(namespace, replicaClusterName)
return err
}, 30, 3).Should(BeNil())
- })
-
- By("verifying that replica cluster primary is in recovery mode", func() {
- query := "select pg_is_in_recovery();"
- Eventually(func() (string, error) {
- stdOut, _, err := env.ExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName,
- &commandTimeout, "psql", "-U", "postgres", "appSrc", "-tAc", query)
- return strings.Trim(stdOut, "\n"), err
- }, 300, 15).Should(BeEquivalentTo("t"))
+ AssertPgRecoveryMode(primaryReplicaCluster, true)
})
By("checking data have been copied correctly in replica cluster", func() {
Eventually(func() (string, error) {
stdOut, _, err := env.ExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName,
- &commandTimeout, "psql", "-U", "postgres", "appSrc", "-tAc", checkQuery)
+ &commandTimeout, "psql", "-U", "postgres", srcClusterDBName, "-tAc", checkQuery)
return strings.Trim(stdOut, "\n"), err
}, 180, 10).Should(BeEquivalentTo("2"))
})
By("writing some new data to the source cluster", func() {
- insertRecordIntoTableWithDatabaseName(namespace, srcClusterName, "appSrc", "test_replica", 3, pod)
+ insertRecordIntoTableWithDatabaseName(namespace, srcClusterName, srcClusterDBName, testTableName, 3, pod)
})
By("checking new data have been copied correctly in replica cluster", func() {
Eventually(func() (string, error) {
stdOut, _, err := env.ExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName,
- &commandTimeout, "psql", "-U", "postgres", "appSrc", "-tAc", checkQuery)
+ &commandTimeout, "psql", "-U", "postgres", srcClusterDBName, "-tAc", checkQuery)
return strings.Trim(stdOut, "\n"), err
}, 180, 15).Should(BeEquivalentTo("3"))
})
- // verify that if replica mode is enabled, no application user is created
- By("checking in replica cluster, there is no database app and user app", func() {
- checkDB := "select exists( SELECT datname FROM pg_catalog.pg_database WHERE lower(datname) = lower('app'));"
- stdOut, _, err := env.ExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName,
- &commandTimeout, "psql", "-U", "postgres", "appSrc", "-tAc", checkDB)
- Expect(err).ToNot(HaveOccurred())
- Expect(strings.Trim(stdOut, "\n")).To(BeEquivalentTo("f"))
+ // verify that if replica mode is enabled, no default "app" user and "app" database are created
+ By("checking that in replica cluster there is no database app and user app", func() {
+ AssertDatabaseExists(namespace, primaryReplicaCluster.Name, "app", false)
+ AssertUserExists(namespace, primaryReplicaCluster.Name, "app", false)
})
}
@@ -913,10 +1008,11 @@ func AssertReplicaModeCluster(
func AssertDetachReplicaModeCluster(
namespace,
srcClusterName,
- replicaClusterName,
srcDatabaseName,
+ replicaClusterName,
replicaDatabaseName,
- srcTableName string,
+ replicaUserName,
+ testTableName string,
) {
var primaryReplicaCluster *corev1.Pod
replicaCommandTimeout := time.Second * 10
@@ -941,21 +1037,30 @@ func AssertDetachReplicaModeCluster(
primaryReplicaCluster, err = env.GetClusterPrimary(namespace, replicaClusterName)
g.Expect(err).ToNot(HaveOccurred())
_, _, err = env.EventuallyExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName,
- &replicaCommandTimeout, "psql", "-U", "postgres", "appSrc", "-tAc", query)
+ &replicaCommandTimeout, "psql", "-U", "postgres", srcDatabaseName, "-tAc", query)
g.Expect(err).ToNot(HaveOccurred())
}, 300, 15).Should(Succeed())
})
By("verifying the replica database doesn't exist in the replica cluster", func() {
+ // Application database configuration is skipped for replica clusters,
+ // so we expect these to not be present
AssertDatabaseExists(namespace, primaryReplicaCluster.Name, replicaDatabaseName, false)
+ AssertUserExists(namespace, primaryReplicaCluster.Name, replicaUserName, false)
})
By("writing some new data to the source cluster", func() {
- insertRecordIntoTableWithDatabaseName(namespace, srcClusterName, srcDatabaseName, srcTableName, 4, psqlClientPod)
+ AssertCreateTestDataWithDatabaseName(namespace, srcClusterName, srcDatabaseName, testTableName, psqlClientPod)
})
By("verifying that replica cluster was not modified", func() {
- AssertDataExpectedCountWithDatabaseName(namespace, primaryReplicaCluster.Name, srcDatabaseName, srcTableName, 3)
+ outTables, stdErr, err := env.EventuallyExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName,
+ &replicaCommandTimeout, "psql", "-U", "postgres", srcDatabaseName, "-tAc", "\\dt")
+ if err != nil {
+ GinkgoWriter.Printf("stdout: %v\nstderr: %v\n", outTables, stdErr)
+ }
+ Expect(err).ToNot(HaveOccurred())
+ Expect(strings.Contains(outTables, testTableName), err).Should(BeFalse())
})
}
@@ -2891,7 +2996,8 @@ func assertPredicateClusterHasPhase(namespace, clusterName string, phase []strin
}
}
-// assertMetrics is a utility function used for asserting that specific metrics, defined by regular expressions in
+// assertIncludesMetrics is a utility function used for asserting that specific metrics,
+// defined by regular expressions in
// the 'expectedMetrics' map, are present in the 'rawMetricsOutput' string.
// It also checks whether the metrics match the expected format defined by their regular expressions.
// If any assertion fails, it prints an error message to GinkgoWriter.
@@ -2906,13 +3012,13 @@ func assertPredicateClusterHasPhase(namespace, clusterName string, phase []strin
// "cpu_usage": regexp.MustCompile(`^\d+\.\d+$`), // Example: "cpu_usage 0.25"
// "memory_usage": regexp.MustCompile(`^\d+\s\w+$`), // Example: "memory_usage 512 MiB"
// }
-// assertMetrics(rawMetricsOutput, expectedMetrics)
+// assertIncludesMetrics(rawMetricsOutput, expectedMetrics)
//
// The function will assert that the specified metrics exist in 'rawMetricsOutput' and match their expected formats.
// If any assertion fails, it will print an error message with details about the failed metric collection.
//
// Note: This function is typically used in testing scenarios to validate metric collection behavior.
-func assertMetrics(rawMetricsOutput string, expectedMetrics map[string]*regexp.Regexp) {
+func assertIncludesMetrics(rawMetricsOutput string, expectedMetrics map[string]*regexp.Regexp) {
debugDetails := fmt.Sprintf("Priting rawMetricsOutput:\n%s", rawMetricsOutput)
withDebugDetails := func(baseErrMessage string) string {
return fmt.Sprintf("%s\n%s\n", baseErrMessage, debugDetails)
@@ -2935,3 +3041,10 @@ func assertMetrics(rawMetricsOutput string, expectedMetrics map[string]*regexp.R
withDebugDetails(fmt.Sprintf("Expected %s to have value %v but got %s", key, valueRe, value)))
}
}
+
+func assertExcludesMetrics(rawMetricsOutput string, nonCollected []string) {
+ for _, nonCollectable := range nonCollected {
+ // match a metric with the value of expectedMetrics key
+ Expect(rawMetricsOutput).NotTo(ContainSubstring(nonCollectable))
+ }
+}
diff --git a/tests/e2e/disk_space_test.go b/tests/e2e/disk_space_test.go
new file mode 100644
index 0000000000..f5eb068ccc
--- /dev/null
+++ b/tests/e2e/disk_space_test.go
@@ -0,0 +1,217 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/ptr"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/tests"
+ testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() {
+ const (
+ level = tests.Low
+ namespacePrefix = "diskspace-e2e"
+ )
+
+ diskSpaceDetectionTest := func(namespace, clusterName string) {
+ const walDir = "/var/lib/postgresql/data/pgdata/pg_wal"
+ var cluster *apiv1.Cluster
+ var primaryPod *corev1.Pod
+ By("finding cluster resources", func() {
+ var err error
+ cluster, err = env.GetCluster(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(cluster).ToNot(BeNil())
+
+ primaryPod, err = env.GetClusterPrimary(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(primaryPod).ToNot(BeNil())
+ })
+ By("filling the WAL volume", func() {
+ timeout := time.Minute * 5
+
+ _, _, err := env.ExecCommandInInstancePod(
+ testsUtils.PodLocator{
+ Namespace: namespace,
+ PodName: primaryPod.Name,
+ },
+ &timeout,
+ "dd", "if=/dev/zero", "of="+walDir+"/fill", "bs=1M",
+ )
+ Expect(err).To(HaveOccurred())
+ // FIXME: check if the error is due to the disk being full
+ })
+ By("writing something when no space is available", func() {
+ // Create the table used by the scenario
+ query := "CREATE TABLE diskspace AS SELECT generate_series(1, 1000000);"
+ _, _, err := env.ExecCommandWithPsqlClient(
+ namespace,
+ clusterName,
+ primaryPod,
+ apiv1.ApplicationUserSecretSuffix,
+ testsUtils.AppDBName,
+ query,
+ )
+ Expect(err).To(HaveOccurred())
+ query = "CHECKPOINT; SELECT pg_switch_wal(); CHECKPOINT"
+ _, _, err = env.ExecQueryInInstancePod(
+ testsUtils.PodLocator{
+ Namespace: primaryPod.Namespace,
+ PodName: primaryPod.Name,
+ },
+ testsUtils.DatabaseName("postgres"),
+ query)
+ Expect(err).To(HaveOccurred())
+ })
+ By("waiting for the primary to become not ready", func() {
+ Eventually(func(g Gomega) bool {
+ primaryPod, err := env.GetPod(namespace, primaryPod.Name)
+ g.Expect(err).ToNot(HaveOccurred())
+ return testsUtils.PodHasCondition(primaryPod, corev1.PodReady, corev1.ConditionFalse)
+ }).WithTimeout(time.Minute).Should(BeTrue())
+ })
+ By("checking if the operator detects the issue", func() {
+ Eventually(func(g Gomega) string {
+ cluster, err := env.GetCluster(namespace, clusterName)
+ g.Expect(err).ToNot(HaveOccurred())
+ return cluster.Status.Phase
+ }).WithTimeout(time.Minute).Should(Equal("Not enough disk space"))
+ })
+ }
+
+ recoveryTest := func(namespace, clusterName string) {
+ var cluster *apiv1.Cluster
+ var primaryPod *corev1.Pod
+ primaryWALPVC := &corev1.PersistentVolumeClaim{}
+ By("finding cluster resources", func() {
+ var err error
+ cluster, err = env.GetCluster(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(cluster).ToNot(BeNil())
+
+ primaryPod, err = env.GetClusterPrimary(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(primaryPod).ToNot(BeNil())
+
+ primaryWALPVCName := primaryPod.Name
+ if cluster.Spec.WalStorage != nil {
+ primaryWALPVCName = fmt.Sprintf("%v-wal", primaryWALPVCName)
+ }
+ err = env.Client.Get(env.Ctx,
+ types.NamespacedName{Namespace: primaryPod.Namespace, Name: primaryWALPVCName}, primaryWALPVC)
+ Expect(err).ToNot(HaveOccurred())
+ })
+ By("resizing the WAL volume", func() {
+ originPVC := primaryWALPVC.DeepCopy()
+ newSize := *resource.NewScaledQuantity(2, resource.Giga)
+ primaryWALPVC.Spec.Resources.Requests[corev1.ResourceStorage] = newSize
+ Expect(env.Client.Patch(env.Ctx, primaryWALPVC, ctrlclient.MergeFrom(originPVC))).To(Succeed())
+ Eventually(func(g Gomega) int64 {
+ err := env.Client.Get(env.Ctx,
+ types.NamespacedName{Namespace: primaryPod.Namespace, Name: primaryWALPVC.Name},
+ primaryWALPVC)
+ g.Expect(err).ToNot(HaveOccurred())
+ size := ptr.To(primaryWALPVC.Status.Capacity[corev1.ResourceStorage]).Value()
+ return size
+ }).WithTimeout(time.Minute * 5).Should(BeNumerically(">=",
+ newSize.Value()))
+ })
+ By("waiting for the primary to become ready", func() {
+ // The primary Pod will be in crash loop backoff. We need
+ // to wait for the Pod to restart. The maximum backoff time
+ // is set in the kubelet to 5 minutes, and this parameter
+ // is not configurable without recompiling the kubelet
+ // itself. See:
+ //
+ // https://github.com/kubernetes/kubernetes/blob/
+ // 1d5589e4910ed859a69b3e57c25cbbd3439cd65f/pkg/kubelet/kubelet.go#L145
+ //
+ // This is why we wait for 10 minutes here.
+ // We can't delete the Pod, as this will trigger
+ // a failover.
+ Eventually(func(g Gomega) bool {
+ primaryPod, err := env.GetPod(namespace, primaryPod.Name)
+ g.Expect(err).ToNot(HaveOccurred())
+ return testsUtils.PodHasCondition(primaryPod, corev1.PodReady, corev1.ConditionTrue)
+ }).WithTimeout(10 * time.Minute).Should(BeTrue())
+ })
+ By("writing some WAL", func() {
+ query := "CHECKPOINT; SELECT pg_switch_wal(); CHECKPOINT"
+ _, _, err := env.ExecQueryInInstancePod(
+ testsUtils.PodLocator{
+ Namespace: primaryPod.Namespace,
+ PodName: primaryPod.Name,
+ },
+ testsUtils.DatabaseName("postgres"),
+ query)
+ Expect(err).NotTo(HaveOccurred())
+ })
+ }
+
+ BeforeEach(func() {
+ if testLevelEnv.Depth < int(level) {
+ Skip("Test depth is lower than the amount requested for this test")
+ }
+ if IsLocal() {
+ // Local environments use the node disk space, running out of that space could cause multiple failures
+ Skip("This test is not executed on local environments")
+ }
+ })
+
+ DescribeTable("WAL volume space unavailable",
+ func(sampleFile string) {
+ var namespace string
+ var err error
+ // Create a cluster in a namespace we'll delete after the test
+ namespace, err = env.CreateUniqueNamespace(namespacePrefix)
+ Expect(err).ToNot(HaveOccurred())
+ DeferCleanup(func() error {
+ if CurrentSpecReport().Failed() {
+ env.DumpNamespaceObjects(namespace, "out/"+CurrentSpecReport().LeafNodeText+".log")
+ }
+ return env.DeleteNamespace(namespace)
+ })
+
+ clusterName, err := env.GetResourceNameFromYAML(sampleFile)
+ Expect(err).ToNot(HaveOccurred())
+
+ AssertCreateCluster(namespace, clusterName, sampleFile, env)
+
+ By("leaving a full disk pod fenced", func() {
+ diskSpaceDetectionTest(namespace, clusterName)
+ })
+ By("being able to recover with manual intervention", func() {
+ recoveryTest(namespace, clusterName)
+ })
+ },
+ Entry("Data and WAL same volume", fixturesDir+"/disk_space/cluster-disk-space-single-volume.yaml.template"),
+ Entry("Data and WAL different volume", fixturesDir+"/disk_space/cluster-disk-space-wal-volume.yaml.template"),
+ )
+})
diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go
index a6ff75655c..bf3406a456 100644
--- a/tests/e2e/drain_node_test.go
+++ b/tests/e2e/drain_node_test.go
@@ -326,6 +326,14 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
const clusterName = "cluster-drain-node"
var namespace string
+ BeforeEach(func() {
+ // All GKE and AKS persistent disks are network storage located independently of the underlying Nodes, so
+ // they don't get deleted after a Drain. Hence, even when using "reusePVC off", all the pods will
+ // be recreated with the same name and will reuse the existing volume.
+ if IsAKS() || IsGKE() {
+ Skip("This test case is only applicable on clusters with local storage")
+ }
+ })
JustAfterEach(func() {
if CurrentSpecReport().Failed() {
env.DumpNamespaceObjects(namespace, "out/"+CurrentSpecReport().LeafNodeText+".log")
diff --git a/tests/e2e/fixtures/disk_space/cluster-disk-space-single-volume.yaml.template b/tests/e2e/fixtures/disk_space/cluster-disk-space-single-volume.yaml.template
new file mode 100644
index 0000000000..d89e6ce326
--- /dev/null
+++ b/tests/e2e/fixtures/disk_space/cluster-disk-space-single-volume.yaml.template
@@ -0,0 +1,26 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: diskspace
+spec:
+ instances: 3
+
+ postgresql:
+ parameters:
+ log_checkpoints: "on"
+ log_lock_waits: "on"
+ log_min_duration_statement: '1000'
+ log_statement: 'ddl'
+ log_temp_files: '1024'
+ log_autovacuum_min_duration: '1s'
+ log_replication_commands: 'on'
+
+ bootstrap:
+ initdb:
+ database: app
+ owner: app
+
+ # Persistent storage configuration
+ storage:
+ storageClass: ${E2E_DEFAULT_STORAGE_CLASS}
+ size: 1Gi
diff --git a/tests/e2e/fixtures/disk_space/cluster-disk-space-wal-volume.yaml.template b/tests/e2e/fixtures/disk_space/cluster-disk-space-wal-volume.yaml.template
new file mode 100644
index 0000000000..f7881aa72e
--- /dev/null
+++ b/tests/e2e/fixtures/disk_space/cluster-disk-space-wal-volume.yaml.template
@@ -0,0 +1,29 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: diskspace
+spec:
+ instances: 3
+
+ postgresql:
+ parameters:
+ log_checkpoints: "on"
+ log_lock_waits: "on"
+ log_min_duration_statement: '1000'
+ log_statement: 'ddl'
+ log_temp_files: '1024'
+ log_autovacuum_min_duration: '1s'
+ log_replication_commands: 'on'
+
+ bootstrap:
+ initdb:
+ database: app
+ owner: app
+
+ # Persistent storage configuration
+ storage:
+ storageClass: ${E2E_DEFAULT_STORAGE_CLASS}
+ size: 1Gi
+ walStorage:
+ storageClass: ${E2E_DEFAULT_STORAGE_CLASS}
+ size: 1Gi
diff --git a/tests/e2e/fixtures/metrics/cluster-metrics-with-predicate-query.yaml.template b/tests/e2e/fixtures/metrics/cluster-metrics-with-predicate-query.yaml.template
new file mode 100644
index 0000000000..e0b3178318
--- /dev/null
+++ b/tests/e2e/fixtures/metrics/cluster-metrics-with-predicate-query.yaml.template
@@ -0,0 +1,38 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: postgresql-metrics
+spec:
+ instances: 3
+
+ postgresql:
+ parameters:
+ log_checkpoints: "on"
+ log_lock_waits: "on"
+ log_min_duration_statement: '1000'
+ log_statement: 'ddl'
+ log_temp_files: '1024'
+ log_autovacuum_min_duration: '1s'
+ log_replication_commands: 'on'
+
+ # Example of rolling update strategy:
+ # - unsupervised: automated update of the primary once all
+ # replicas have been upgraded (default)
+ # - supervised: requires manual supervision to perform
+ # the switchover of the primary
+ primaryUpdateStrategy: unsupervised
+
+ bootstrap:
+ initdb:
+ database: app
+ owner: app
+
+ monitoring:
+ customQueriesConfigMap:
+ - name: monitoring-01
+ key: queries.yaml
+
+ # Persistent storage configuration
+ storage:
+ storageClass: ${E2E_DEFAULT_STORAGE_CLASS}
+ size: 1Gi
diff --git a/tests/e2e/fixtures/metrics/custom-queries-for-replica-cluster.yaml b/tests/e2e/fixtures/metrics/custom-queries-for-replica-cluster.yaml
index 8bce58d08d..0bca6626d8 100644
--- a/tests/e2e/fixtures/metrics/custom-queries-for-replica-cluster.yaml
+++ b/tests/e2e/fixtures/metrics/custom-queries-for-replica-cluster.yaml
@@ -6,11 +6,11 @@ metadata:
e2e: metrics
data:
queries.yaml: |
- replica_test:
+ metrics_replica_mode:
query: |
- SELECT count(*) as row_count FROM test_replica
+ SELECT count(*) as row_count FROM metrics_replica_mode
primary: false
metrics:
- row_count:
usage: "GAUGE"
- description: "Number of rows present in test_replica table"
+ description: "Number of rows present in metrics_replica_mode table"
diff --git a/tests/e2e/fixtures/metrics/custom-queries-with-predicate-query.yaml b/tests/e2e/fixtures/metrics/custom-queries-with-predicate-query.yaml
new file mode 100644
index 0000000000..52a372083d
--- /dev/null
+++ b/tests/e2e/fixtures/metrics/custom-queries-with-predicate-query.yaml
@@ -0,0 +1,64 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: monitoring-01
+ labels:
+ e2e: metrics
+data:
+ queries.yaml: |
+ pg_predicate_query_return_true:
+ query: "SELECT 42 as fixed"
+ predicate_query: "SELECT true as result"
+ primary: false
+ metrics:
+ - fixed:
+ usage: "GAUGE"
+ description: "Always 42, used to test predicate_query"
+ pg_predicate_query_empty:
+ query: "SELECT 42 as fixed"
+ primary: false
+ metrics:
+ - fixed:
+ usage: "GAUGE"
+ description: "Always 42, used to test predicate_query"
+ pg_predicate_query_return_false:
+ query: "SELECT 42 as fixed"
+ predicate_query: "SELECT false as result"
+ primary: false
+ metrics:
+ - fixed:
+ usage: "GAUGE"
+ description: "Always 42, used to test predicate_query"
+ pg_predicate_query_return_null_as_false:
+ query: "SELECT 42 as fixed"
+ predicate_query: "SELECT null as result"
+ primary: false
+ metrics:
+ - fixed:
+ usage: "GAUGE"
+ description: "Always 42, used to test predicate_query"
+ pg_predicate_query_return_no_rows:
+ query: "SELECT 42 as fixed"
+ predicate_query: "SELECT true as result WHERE 1 <> 1"
+ primary: false
+ metrics:
+ - fixed:
+ usage: "GAUGE"
+ description: "Always 42, used to test predicate_query"
+ pg_predicate_query_multiple_rows:
+ query: "SELECT 42 as fixed"
+ predicate_query: "SELECT true as result UNION SELECT false as result UNION SELECT true as result"
+ primary: false
+ metrics:
+ - fixed:
+ usage: "GAUGE"
+ description: "Always 42, used to test predicate_query"
+ pg_predicate_query_multiple_columns:
+ query: "SELECT 42 as fixed"
+ predicate_query: "SELECT true as result, 1 as foo"
+ primary: false
+ metrics:
+ - fixed:
+ usage: "GAUGE"
+ description: "Always 42, used to test predicate_query"
diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go
index 111cbea198..06f651029b 100644
--- a/tests/e2e/managed_roles_test.go
+++ b/tests/e2e/managed_roles_test.go
@@ -18,11 +18,11 @@ package e2e
import (
"fmt"
+ "slices"
"strings"
"time"
"github.com/lib/pq"
- "golang.org/x/exp/slices"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go
index e65c9581aa..21e53eb1fd 100644
--- a/tests/e2e/metrics_test.go
+++ b/tests/e2e/metrics_test.go
@@ -36,15 +36,16 @@ import (
var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
const (
- targetDBOne = "test"
- targetDBTwo = "test1"
- targetDBSecret = "secret_test"
- testTableName = "test_table"
- clusterMetricsFile = fixturesDir + "/metrics/cluster-metrics.yaml.template"
- clusterMetricsDBFile = fixturesDir + "/metrics/cluster-metrics-with-target-databases.yaml.template"
- customQueriesSampleFile = fixturesDir + "/metrics/custom-queries-with-target-databases.yaml"
- defaultMonitoringConfigMapName = "cnpg-default-monitoring"
- level = tests.Low
+ targetDBOne = "test"
+ targetDBTwo = "test1"
+ targetDBSecret = "secret_test"
+ testTableName = "test_table"
+ clusterMetricsFile = fixturesDir + "/metrics/cluster-metrics.yaml.template"
+ clusterMetricsDBFile = fixturesDir + "/metrics/cluster-metrics-with-target-databases.yaml.template"
+ clusterMetricsPredicateQueryFile = fixturesDir + "/metrics/cluster-metrics-with-predicate-query.yaml.template"
+ customQueriesSampleFile = fixturesDir + "/metrics/custom-queries-with-target-databases.yaml"
+ defaultMonitoringConfigMapName = "cnpg-default-monitoring"
+ level = tests.Low
)
buildExpectedMetrics := func(cluster *apiv1.Cluster, isReplicaPod bool) map[string]*regexp.Regexp {
@@ -131,7 +132,7 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
out, err := utils.CurlGetMetrics(namespace, curlPodName, pod.Status.PodIP, 9187)
Expect(err).ToNot(HaveOccurred(), "while getting pod metrics")
expectedMetrics := buildExpectedMetrics(metricsCluster, !specs.IsPodPrimary(pod))
- assertMetrics(out, expectedMetrics)
+ assertIncludesMetrics(out, expectedMetrics)
})
}
})
@@ -209,6 +210,59 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, curlPodName, true)
})
+ It("can gather metrics depending on the predicate query", func() {
+ // Create the cluster namespace
+ const namespacePrefix = "predicate-query-metrics-e2e"
+ metricsClusterName, err = env.GetResourceNameFromYAML(clusterMetricsPredicateQueryFile)
+ Expect(err).ToNot(HaveOccurred())
+ namespace, err = env.CreateUniqueNamespace(namespacePrefix)
+ Expect(err).ToNot(HaveOccurred())
+ DeferCleanup(func() error {
+ return env.DeleteNamespace(namespace)
+ })
+
+ AssertCustomMetricsResourcesExist(namespace, fixturesDir+"/metrics/custom-queries-with-predicate-query.yaml", 1, 0)
+
+ // Create the curl client pod and wait for it to be ready.
+ By("setting up curl client pod", func() {
+ curlClient := utils.CurlClient(namespace)
+ err := utils.PodCreateAndWaitForReady(env, &curlClient, 240)
+ Expect(err).ToNot(HaveOccurred())
+ curlPodName = curlClient.GetName()
+ })
+
+ // Create the cluster
+ AssertCreateCluster(namespace, metricsClusterName, clusterMetricsPredicateQueryFile, env)
+
+ By("ensuring only metrics with a positive predicate are collected", func() {
+ podList, err := env.GetClusterPodList(namespace, metricsClusterName)
+ Expect(err).ToNot(HaveOccurred())
+
+ // We expect only the metrics that have a predicate_query valid.
+ expectedMetrics := map[string]*regexp.Regexp{
+ "cnpg_pg_predicate_query_return_true_fixed": regexp.MustCompile(`42`),
+ "cnpg_pg_predicate_query_empty": regexp.MustCompile(`42`),
+ }
+ nonCollectableMetrics := []string{
+ "cnpg_pg_predicate_query_return_false",
+ "cnpg_pg_predicate_query_return_null_as_false",
+ "cnpg_pg_predicate_query_return_no_rows",
+ "cnpg_pg_predicate_query_multiple_rows",
+ "cnpg_pg_predicate_query_multiple_columns",
+ }
+
+ // Gather metrics in each pod
+ for _, pod := range podList.Items {
+ By(fmt.Sprintf("checking metrics for pod: %s", pod.Name), func() {
+ out, err := utils.CurlGetMetrics(namespace, curlPodName, pod.Status.PodIP, 9187)
+ Expect(err).ToNot(HaveOccurred(), "while getting pod metrics")
+ assertIncludesMetrics(out, expectedMetrics)
+ assertExcludesMetrics(out, nonCollectableMetrics)
+ })
+ }
+ })
+ })
+
It("default set of metrics queries should not be injected into the cluster "+
"when disableDefaultQueries field set to be true", func() {
const defaultMonitoringQueriesDisableSampleFile = fixturesDir +
@@ -238,15 +292,15 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
It("execute custom queries against the application database on replica clusters", func() {
const (
+ namespacePrefix = "metrics-with-replica-mode"
replicaModeClusterDir = "/replica_mode_cluster/"
replicaClusterSampleFile = fixturesDir + "/metrics/cluster-replica-tls-with-metrics.yaml.template"
srcClusterSampleFile = fixturesDir + replicaModeClusterDir + "cluster-replica-src.yaml.template"
+ srcClusterDatabaseName = "appSrc"
configMapFIle = fixturesDir + "/metrics/custom-queries-for-replica-cluster.yaml"
- checkQuery = "SELECT count(*) FROM test_replica"
+ testTableName = "metrics_replica_mode"
)
- const namespacePrefix = "metrics-with-replica-mode"
-
// Fetching the source cluster name
srcClusterName, err := env.GetResourceNameFromYAML(srcClusterSampleFile)
Expect(err).ToNot(HaveOccurred())
@@ -280,12 +334,13 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
AssertReplicaModeCluster(
namespace,
srcClusterName,
+ srcClusterDatabaseName,
replicaClusterSampleFile,
- checkQuery,
+ testTableName,
psqlClientPod)
- By("grant select permission for test_replica table to pg_monitor", func() {
- cmd := "GRANT SELECT ON test_replica TO pg_monitor"
+ By(fmt.Sprintf("grant select permission for %v table to pg_monitor", testTableName), func() {
+ cmd := fmt.Sprintf("GRANT SELECT ON %v TO pg_monitor", testTableName)
appUser, appUserPass, err := utils.GetCredentials(srcClusterName, namespace, apiv1.ApplicationUserSecretSuffix, env)
Expect(err).ToNot(HaveOccurred())
host, err := utils.GetHostName(namespace, srcClusterName, env)
@@ -293,7 +348,7 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
_, _, err = utils.RunQueryFromPod(
psqlClientPod,
host,
- "appSrc",
+ srcClusterDatabaseName,
appUser,
appUserPass,
cmd,
@@ -305,11 +360,12 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() {
podList, err := env.GetClusterPodList(namespace, replicaClusterName)
Expect(err).ToNot(HaveOccurred())
// Gather metrics in each pod
+ expectedMetric := fmt.Sprintf("cnpg_%v_row_count 3", testTableName)
for _, pod := range podList.Items {
podIP := pod.Status.PodIP
out, err := utils.CurlGetMetrics(namespace, curlPodName, podIP, 9187)
Expect(err).Should(Not(HaveOccurred()))
- Expect(strings.Split(out, "\n")).Should(ContainElement("cnpg_replica_test_row_count 3"))
+ Expect(strings.Split(out, "\n")).Should(ContainElement(expectedMetric))
}
})
collectAndAssertDefaultMetricsPresentOnEachPod(namespace, replicaClusterName, curlPodName, true)
diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go
index 750a75dfd5..800aa8e77f 100644
--- a/tests/e2e/replica_mode_cluster_test.go
+++ b/tests/e2e/replica_mode_cluster_test.go
@@ -22,7 +22,7 @@ import (
"strings"
"time"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -44,14 +44,17 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
replicaModeClusterDir = "/replica_mode_cluster/"
srcClusterName = "cluster-replica-src"
srcClusterSample = fixturesDir + replicaModeClusterDir + srcClusterName + ".yaml.template"
- checkQuery = "SELECT count(*) FROM test_replica"
level = tests.Medium
)
// those values are present in the cluster manifests
const (
- sourceDBName = "appSrc"
+ // sourceDBName is the name of the database in the source cluster
+ sourceDBName = "appSrc"
+ // Application database configuration is skipped for replica clusters,
+ // so we expect these to not be present
replicaDBName = "appTgt"
+ replicaUser = "userTgt"
)
BeforeEach(func() {
@@ -62,8 +65,12 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
Context("can bootstrap a replica cluster using TLS auth", func() {
It("should work", func() {
- const replicaClusterSampleTLS = fixturesDir + replicaModeClusterDir + "cluster-replica-tls.yaml.template"
- replicaNamespacePrefix := "replica-mode-tls-auth"
+ const (
+ replicaClusterSampleTLS = fixturesDir + replicaModeClusterDir + "cluster-replica-tls.yaml.template"
+ replicaNamespacePrefix = "replica-mode-tls-auth"
+ testTableName = "replica_mode_tls_auth"
+ )
+
replicaNamespace, err := env.CreateUniqueNamespace(replicaNamespacePrefix)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() error {
@@ -73,11 +80,13 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
return env.DeleteNamespace(replicaNamespace)
})
AssertCreateCluster(replicaNamespace, srcClusterName, srcClusterSample, env)
+
AssertReplicaModeCluster(
replicaNamespace,
srcClusterName,
+ sourceDBName,
replicaClusterSampleTLS,
- checkQuery,
+ testTableName,
psqlClientPod)
})
})
@@ -87,6 +96,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
const (
replicaClusterSampleBasicAuth = fixturesDir + replicaModeClusterDir + "cluster-replica-basicauth.yaml.template"
replicaNamespacePrefix = "replica-mode-basic-auth"
+ testTableName = "replica_mode_basic_auth"
)
replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSampleBasicAuth)
@@ -100,20 +110,23 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
return env.DeleteNamespace(replicaNamespace)
})
AssertCreateCluster(replicaNamespace, srcClusterName, srcClusterSample, env)
+
AssertReplicaModeCluster(
replicaNamespace,
srcClusterName,
+ sourceDBName,
replicaClusterSampleBasicAuth,
- checkQuery,
+ testTableName,
psqlClientPod)
AssertDetachReplicaModeCluster(
replicaNamespace,
srcClusterName,
- replicaClusterName,
sourceDBName,
+ replicaClusterName,
replicaDBName,
- "test_replica")
+ replicaUser,
+ "replica_mode_basic_auth_detach")
})
It("should be able to switch to replica cluster and sync data", func(ctx SpecContext) {
@@ -124,7 +137,9 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
"cluster-demotion-one.yaml.template"
clusterTwoFile = fixturesDir + replicaModeClusterDir +
"cluster-demotion-two.yaml.template"
+ testTableName = "replica_promotion_demotion"
)
+ var clusterOnePrimary, clusterTwoPrimary *corev1.Pod
getReplicaClusterSwitchCondition := func(conditions []metav1.Condition) *metav1.Condition {
for _, condition := range conditions {
@@ -144,11 +159,13 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
return env.DeleteNamespace(namespace)
})
AssertCreateCluster(namespace, clusterOneName, clusterOneFile, env)
+
AssertReplicaModeCluster(
namespace,
clusterOneName,
+ sourceDBName,
clusterTwoFile,
- checkQuery,
+ testTableName,
psqlClientPod)
// turn the src cluster into a replica
@@ -169,42 +186,42 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
}).Should(Succeed())
})
- By("disabling the replica mode on the src cluster", func() {
+ By("checking that src cluster is now a replica cluster", func() {
+ Eventually(func() error {
+ clusterOnePrimary, err = env.GetClusterPrimary(namespace, clusterOneName)
+ return err
+ }, 30, 3).Should(BeNil())
+ AssertPgRecoveryMode(clusterOnePrimary, true)
+ })
+
+ // turn the dst cluster into a primary
+ By("disabling the replica mode on the dst cluster", func() {
cluster, err := env.GetCluster(namespace, clusterTwoName)
Expect(err).ToNot(HaveOccurred())
cluster.Spec.ReplicaCluster.Enabled = false
err = env.Client.Update(ctx, cluster)
Expect(err).ToNot(HaveOccurred())
- AssertClusterIsReady(namespace, clusterOneName, testTimeouts[testUtils.ClusterIsReady], env)
+ AssertClusterIsReady(namespace, clusterTwoName, testTimeouts[testUtils.ClusterIsReady], env)
})
- var newPrimaryPod *corev1.Pod
- Eventually(func() error {
- newPrimaryPod, err = env.GetClusterPrimary(namespace, clusterTwoName)
- return err
- }, 30, 3).Should(BeNil())
-
- var newPrimaryReplicaPod *corev1.Pod
- Eventually(func() error {
- newPrimaryReplicaPod, err = env.GetClusterPrimary(namespace, clusterOneName)
- return err
- }, 30, 3).Should(BeNil())
+ By("checking that dst cluster has been promoted", func() {
+ Eventually(func() error {
+ clusterTwoPrimary, err = env.GetClusterPrimary(namespace, clusterTwoName)
+ return err
+ }, 30, 3).Should(BeNil())
+ AssertPgRecoveryMode(clusterTwoPrimary, false)
+ })
By("creating a new data in the new source cluster", func() {
- query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s AS VALUES (1),(2);", "new_test_table")
- commandTimeout := time.Second * 10
- Eventually(func(g Gomega) {
- _, _, err := env.ExecCommand(env.Ctx, *newPrimaryPod, specs.PostgresContainerName,
- &commandTimeout, "psql", "-U", "postgres", "appSrc", "-tAc", query)
- g.Expect(err).ToNot(HaveOccurred())
- }, 300).Should(Succeed())
+ AssertCreateTestDataWithDatabaseName(namespace, clusterTwoName, sourceDBName,
+ "new_test_table", clusterTwoPrimary)
})
By("checking that the data is present in the old src cluster", func() {
AssertDataExpectedCountWithDatabaseName(
namespace,
- newPrimaryReplicaPod.Name,
- "appSrc",
+ clusterOnePrimary.Name,
+ sourceDBName,
"new_test_table",
2,
)
@@ -217,6 +234,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
const (
replicaClusterSample = fixturesDir + replicaModeClusterDir + "cluster-replica-archive-mode-always.yaml.template"
replicaNamespacePrefix = "replica-mode-archive"
+ testTableName = "replica_mode_archive"
)
replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSample)
@@ -239,11 +257,13 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
})
AssertCreateCluster(replicaNamespace, srcClusterName, srcClusterSample, env)
+
AssertReplicaModeCluster(
replicaNamespace,
srcClusterName,
+ sourceDBName,
replicaClusterSample,
- checkQuery,
+ testTableName,
psqlClientPod)
// Get primary from replica cluster
@@ -268,26 +288,24 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
})
})
- Context("can bootstrap a replica cluster from a backup", func() {
+ Context("can bootstrap a replica cluster from a backup", Ordered, func() {
const (
clusterSample = fixturesDir + replicaModeClusterDir + "cluster-replica-src-with-backup.yaml.template"
namespacePrefix = "replica-cluster-from-backup"
)
var namespace, clusterName string
- BeforeEach(func() {
+ JustAfterEach(func() {
+ if CurrentSpecReport().Failed() {
+ env.DumpNamespaceObjects(namespace, "out/"+CurrentSpecReport().LeafNodeText+".log")
+ }
+ })
+
+ BeforeAll(func() {
var err error
namespace, err = env.CreateUniqueNamespace(namespacePrefix)
Expect(err).ToNot(HaveOccurred())
- clusterName, err = env.GetResourceNameFromYAML(clusterSample)
- Expect(err).ToNot(HaveOccurred())
-
- DeferCleanup(func() error {
- if CurrentSpecReport().Failed() {
- env.DumpNamespaceObjects(namespace, "out/"+CurrentSpecReport().LeafNodeText+".log")
- }
- return env.DeleteNamespace(namespace)
- })
+ DeferCleanup(func() error { return env.DeleteNamespace(namespace) })
By("creating the credentials for minio", func() {
AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
@@ -299,11 +317,16 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
})
// Create the cluster
+ clusterName, err = env.GetResourceNameFromYAML(clusterSample)
+ Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, clusterSample, env)
})
It("using a Backup from the object store", func() {
- const replicaClusterSample = fixturesDir + replicaModeClusterDir + "cluster-replica-from-backup.yaml.template"
+ const (
+ replicaClusterSample = fixturesDir + replicaModeClusterDir + "cluster-replica-from-backup.yaml.template"
+ testTableName = "replica_mode_backup"
+ )
By("creating a backup and waiting until it's completed", func() {
backupName := fmt.Sprintf("%v-backup", clusterName)
@@ -329,8 +352,9 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
AssertReplicaModeCluster(
namespace,
clusterName,
+ sourceDBName,
replicaClusterSample,
- checkQuery,
+ testTableName,
psqlClientPod)
})
})
@@ -340,6 +364,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
replicaClusterSample = fixturesDir + replicaModeClusterDir + "cluster-replica-from-snapshot.yaml.template"
snapshotDataEnv = "REPLICA_CLUSTER_SNAPSHOT_NAME_PGDATA"
snapshotWalEnv = "REPLICA_CLUSTER_SNAPSHOT_NAME_PGWAL"
+ testTableName = "replica_mode_snapshot"
)
DeferCleanup(func() error {
@@ -398,8 +423,9 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
AssertReplicaModeCluster(
namespace,
clusterName,
+ sourceDBName,
replicaClusterSample,
- checkQuery,
+ testTableName,
psqlClientPod)
})
})
diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go
index 601ae14a9a..aaa9746d3f 100644
--- a/tests/e2e/suite_test.go
+++ b/tests/e2e/suite_test.go
@@ -160,13 +160,20 @@ func saveLogs(buf *bytes.Buffer, logsType, specName string, output io.Writer, ca
return
}
defer func() {
+ var err error
syncErr := f.Sync()
if syncErr != nil {
- fmt.Fprintln(output, "ERROR while flushing file:", syncErr)
+ _, err = fmt.Fprintln(output, "ERROR while flushing file:", syncErr)
+ }
+ if err != nil {
+ fmt.Println(err)
}
closeErr := f.Close()
if closeErr != nil {
- fmt.Fprintln(output, "ERROR while closing file:", err)
+ _, err = fmt.Fprintln(output, "ERROR while closing file:", err)
+ }
+ if err != nil {
+ fmt.Println(err)
}
}()
@@ -183,7 +190,11 @@ func saveLogs(buf *bytes.Buffer, logsType, specName string, output io.Writer, ca
var js map[string]interface{}
err = json.Unmarshal([]byte(lg), &js)
if err != nil {
- fmt.Fprintln(output, "ERROR parsing log:", err, lg)
+ _, err = fmt.Fprintln(output, "ERROR parsing log:", err, lg)
+ if err != nil {
+ fmt.Println(err)
+ continue
+ }
}
timestamp, ok := js["ts"].(float64)
if ok {
@@ -200,25 +211,37 @@ func saveLogs(buf *bytes.Buffer, logsType, specName string, output io.Writer, ca
bufferIdx = linesToShow % capLines
}
// write every line to the file stream
- fmt.Fprintln(f, lg)
+ _, err := fmt.Fprintln(f, lg)
+ if err != nil {
+ fmt.Println(err)
+ continue
+ }
}
// print the last `capLines` lines of logs to the `output`
+ var switchErr error
switch {
case linesToShow == 0:
- fmt.Fprintln(output, "-- no error / warning logs --")
+ _, switchErr = fmt.Fprintln(output, "-- no error / warning logs --")
case linesToShow <= capLines:
- fmt.Fprintln(output, strings.Join(lineBuffer[:linesToShow], "\n"))
+ _, switchErr = fmt.Fprintln(output, strings.Join(lineBuffer[:linesToShow], "\n"))
case bufferIdx == 0:
// if bufferIdx == 0, the buffer just finished filling and is in order
- fmt.Fprintln(output, strings.Join(lineBuffer, "\n"))
+ _, switchErr = fmt.Fprintln(output, strings.Join(lineBuffer, "\n"))
default:
// the line buffer cycled back and the items 0 to bufferIdx - 1 are newer than the rest
- fmt.Fprintln(output, strings.Join(append(lineBuffer[bufferIdx:], lineBuffer[:bufferIdx]...), "\n"))
+ _, switchErr = fmt.Fprintln(output, strings.Join(append(lineBuffer[bufferIdx:], lineBuffer[:bufferIdx]...), "\n"))
+ }
+
+ if switchErr != nil {
+ fmt.Println(switchErr)
}
if err := scanner.Err(); err != nil {
- fmt.Fprintln(output, "ERROR while scanning:", err)
+ _, err := fmt.Fprintln(output, "ERROR while scanning:", err)
+ if err != nil {
+ fmt.Println(err)
+ }
}
}
diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go
index 6a97edfe49..b6d4c577f8 100644
--- a/tests/e2e/tablespaces_test.go
+++ b/tests/e2e/tablespaces_test.go
@@ -28,7 +28,7 @@ import (
"strings"
"time"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go
index 67aca8fbba..79c3770edc 100644
--- a/tests/e2e/upgrade_test.go
+++ b/tests/e2e/upgrade_test.go
@@ -535,6 +535,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
podList, err := env.GetClusterPodList(upgradeNamespace, clusterName1)
Expect(err).ToNot(HaveOccurred())
for _, pod := range podList.Items {
+ Expect(pod.Status.ContainerStatuses).NotTo(BeEmpty())
Expect(pod.Status.ContainerStatuses[0].RestartCount).To(BeEquivalentTo(0))
}
})
diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go
index a299969657..ca32853819 100644
--- a/tests/e2e/volume_snapshot_test.go
+++ b/tests/e2e/volume_snapshot_test.go
@@ -23,7 +23,7 @@ import (
"strings"
"time"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8client "sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/tests/utils/backup.go b/tests/utils/backup.go
index 404d32e227..d88c0b5504 100644
--- a/tests/utils/backup.go
+++ b/tests/utils/backup.go
@@ -21,7 +21,7 @@ import (
"fmt"
"os"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
diff --git a/tests/utils/environment.go b/tests/utils/environment.go
index c5d35feab9..c4dc444a9a 100644
--- a/tests/utils/environment.go
+++ b/tests/utils/environment.go
@@ -29,7 +29,7 @@ import (
"time"
"github.com/go-logr/logr"
- storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/thoas/go-funk"
appsv1 "k8s.io/api/apps/v1"
diff --git a/tests/utils/logs.go b/tests/utils/logs.go
index 86bfb6fcb1..04c7e2318c 100644
--- a/tests/utils/logs.go
+++ b/tests/utils/logs.go
@@ -19,10 +19,9 @@ package utils
import (
"encoding/json"
"fmt"
+ "slices"
"strings"
"time"
-
- "golang.org/x/exp/slices"
)
// ParseJSONLogs returns the pod's logs of a given pod name,
diff --git a/tests/utils/pod.go b/tests/utils/pod.go
index 5e18a1f1a8..f62b4fcba1 100644
--- a/tests/utils/pod.go
+++ b/tests/utils/pod.go
@@ -92,6 +92,16 @@ func PodHasAnnotations(pod corev1.Pod, annotations map[string]string) bool {
return true
}
+// PodHasCondition verifies that a pod has a specified condition
+func PodHasCondition(pod *corev1.Pod, conditionType corev1.PodConditionType, status corev1.ConditionStatus) bool {
+ for _, cond := range pod.Status.Conditions {
+ if cond.Type == conditionType && cond.Status == status {
+ return true
+ }
+ }
+ return false
+}
+
// DeletePod deletes a pod if existent
func (env TestingEnvironment) DeletePod(namespace string, name string, opts ...client.DeleteOption) error {
u := &unstructured.Unstructured{}
diff --git a/tests/utils/storage.go b/tests/utils/storage.go
index 6dd4ea3652..16883a5f05 100644
--- a/tests/utils/storage.go
+++ b/tests/utils/storage.go
@@ -20,7 +20,7 @@ import (
"fmt"
"os"
- volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/tests/utils/timeouts.go b/tests/utils/timeouts.go
index 81d0f01e94..13280fa4f1 100644
--- a/tests/utils/timeouts.go
+++ b/tests/utils/timeouts.go
@@ -62,7 +62,7 @@ var DefaultTestTimeouts = map[Timeout]int{
NewPrimaryAfterSwitchover: 45,
NewPrimaryAfterFailover: 30,
NewTargetOnFailover: 120,
- PodRollout: 120,
+ PodRollout: 180,
OperatorIsReady: 120,
LargeObject: 300,
WalsInMinio: 60,
@@ -73,7 +73,7 @@ var DefaultTestTimeouts = map[Timeout]int{
Short: 5,
}
-// Timeouts returns the map of timeouts, where each event gets the timeout specificed
+// Timeouts returns the map of timeouts, where each event gets the timeout specified
// in the `TEST_TIMEOUTS` environment variable, or if not specified, takes the default
// value
func Timeouts() (map[Timeout]int, error) {